From bde613cb63f0171ba75f8d8101317a41121a5976 Mon Sep 17 00:00:00 2001 From: Yan Gao Date: Tue, 12 Mar 2024 10:36:11 +0000 Subject: [PATCH 01/57] Update required package bitsandbytes==0.41.3 (#3114) --- examples/llm-flowertune/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/llm-flowertune/requirements.txt b/examples/llm-flowertune/requirements.txt index e557dbfc2ff8..c7ff57b403f7 100644 --- a/examples/llm-flowertune/requirements.txt +++ b/examples/llm-flowertune/requirements.txt @@ -2,7 +2,7 @@ flwr-nightly[rest,simulation] flwr_datasets==0.0.2 hydra-core==1.3.2 trl==0.7.2 -bitsandbytes==0.40.2 +bitsandbytes==0.41.3 scipy==1.11.2 peft==0.4.0 fschat[model_worker,webui]==0.2.35 From 0fe721b15ec1877c65eb0abdc85514598cb12ad8 Mon Sep 17 00:00:00 2001 From: tabdar-khan <71217662+tabdar-khan@users.noreply.github.com> Date: Tue, 12 Mar 2024 12:28:16 +0100 Subject: [PATCH 02/57] Add validation function for project name to comply with PEP 621 and PEP 503 (#3111) --- src/py/flwr/common/pyproject.py | 41 ++++++++++ src/py/flwr/common/pyproject_test.py | 108 +++++++++++++++++++++++++++ 2 files changed, 149 insertions(+) create mode 100644 src/py/flwr/common/pyproject.py create mode 100644 src/py/flwr/common/pyproject_test.py diff --git a/src/py/flwr/common/pyproject.py b/src/py/flwr/common/pyproject.py new file mode 100644 index 000000000000..66585e422397 --- /dev/null +++ b/src/py/flwr/common/pyproject.py @@ -0,0 +1,41 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Validates the project's name property.""" + +import re + + +def validate_project_name(name: str) -> bool: + """Validate the project name against PEP 621 and PEP 503 specifications. + + Conventions at a glance: + - Must be lowercase + - Must not contain special characters + - Must use hyphens(recommended) or underscores. No spaces. + - Recommended to be no more than 40 characters long (But it can be) + + Parameters + ---------- + name : str + The project name to validate. + + Returns + ------- + bool + True if the name is valid, False otherwise. + """ + if not name or len(name) > 40 or not re.match(r"^[a-z0-9-_]+$", name): + return False + return True diff --git a/src/py/flwr/common/pyproject_test.py b/src/py/flwr/common/pyproject_test.py new file mode 100644 index 000000000000..88a945054b83 --- /dev/null +++ b/src/py/flwr/common/pyproject_test.py @@ -0,0 +1,108 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for the function that validates name property.""" + +from .pyproject import validate_project_name + + +# Happy Flow +def test_valid_name_with_lower_case() -> None: + """Test a valid single-word project name with all lower case.""" + # Prepare + name = "myproject" + expected = True + # Execute + actual = validate_project_name(name) + # Assert + assert actual == expected, f"Expected {name} to be valid" + + +def test_valid_name_with_dashes() -> None: + """Test a valid project name with hyphens inbetween.""" + # Prepare + name = "valid-project-name" + expected = True + # Execute + actual = validate_project_name(name) + # Assert + assert actual == expected, f"Expected {name} to be valid" + + +def test_valid_name_with_underscores() -> None: + """Test a valid project name with underscores inbetween.""" + # Prepare + name = "valid_project_name" + expected = True + # Execute + actual = validate_project_name(name) + # Assert + assert actual == expected, f"Expected {name} to be valid" + + +def test_invalid_name_with_upper_letters() -> None: + """Tests a project name with Spaces and Uppercase letter.""" + # Prepare + name = "Invalid Project Name" + expected = False + # Execute + actual = validate_project_name(name) + # Assert + assert actual == expected, "Upper Case and Spaces are not allowed" + + +def test_name_with_spaces() -> None: + """Tests a project name with spaces inbetween.""" + # Prepare + name = "name with spaces" + expected = False + # Execute + actual = validate_project_name(name) + # Assert + assert actual == expected, "Spaces are not allowed" + + +def test_empty_name() -> None: + """Tests use-case for an empty project name.""" + # Prepare + name = "" + expected = False + # Execute + actual = validate_project_name(name) + # Assert + assert actual == expected, "Empty name is not valid" + + +def test_long_name() -> None: + """Tests for long project names.""" + # Prepare + name = "a" * 41 + expected = False + # Execute + actual = validate_project_name(name) + # Assert + # It can be more than 40 but generally + # it is recommended not to be more than 40 + assert actual == expected, "Name longer than 40 characters is not recommended" + + +def test_name_with_special_characters() -> None: + """Tests for project names with special characters.""" + # Prepare + name = "name!@#" + expected = False + # Execute + actual = validate_project_name(name) + # Assert + assert actual == expected, "Special characters are not allowed" From ad627835d5182265f46e4b2c8e48b51c344703d0 Mon Sep 17 00:00:00 2001 From: Heng Pan Date: Tue, 12 Mar 2024 11:58:36 +0000 Subject: [PATCH 03/57] Set correct `group_id` in `DefaultWorkflow` (#3115) --- src/py/flwr/server/workflow/default_workflows.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/py/flwr/server/workflow/default_workflows.py b/src/py/flwr/server/workflow/default_workflows.py index fad85d8eecf8..a5c726b0b191 100644 --- a/src/py/flwr/server/workflow/default_workflows.py +++ b/src/py/flwr/server/workflow/default_workflows.py @@ -127,7 +127,7 @@ def default_init_params_workflow(driver: Driver, context: Context) -> None: content=content, message_type=MessageTypeLegacy.GET_PARAMETERS, dst_node_id=random_client.node_id, - group_id="", + group_id="0", ttl="", ) ] @@ -226,7 +226,7 @@ def default_fit_workflow( # pylint: disable=R0914 content=compat.fitins_to_recordset(fitins, True), message_type=MessageType.TRAIN, dst_node_id=proxy.node_id, - group_id="", + group_id=str(current_round), ttl="", ) for proxy, fitins in client_instructions @@ -306,7 +306,7 @@ def default_evaluate_workflow(driver: Driver, context: Context) -> None: content=compat.evaluateins_to_recordset(evalins, True), message_type=MessageType.EVALUATE, dst_node_id=proxy.node_id, - group_id="", + group_id=str(current_round), ttl="", ) for proxy, evalins in client_instructions From 25b797dd97e30722202bba849779014771542404 Mon Sep 17 00:00:00 2001 From: Heng Pan Date: Tue, 12 Mar 2024 17:38:16 +0000 Subject: [PATCH 04/57] Change the log-level to `DEBUG` for logs in `secaggplus_mod` (#3119) --- .../mod/secure_aggregation/secaggplus_mod.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py b/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py index ed0f8f4fd7b5..3e33438c9ddc 100644 --- a/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py +++ b/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py @@ -17,7 +17,7 @@ import os from dataclasses import dataclass, field -from logging import INFO, WARNING +from logging import DEBUG, WARNING from typing import Any, Callable, Dict, List, Tuple, cast from flwr.client.typing import ClientAppCallable @@ -322,7 +322,7 @@ def _setup( # Assigning parameter values to object fields sec_agg_param_dict = configs state.sample_num = cast(int, sec_agg_param_dict[Key.SAMPLE_NUMBER]) - log(INFO, "Node %d: starting stage 0...", state.nid) + log(DEBUG, "Node %d: starting stage 0...", state.nid) state.share_num = cast(int, sec_agg_param_dict[Key.SHARE_NUMBER]) state.threshold = cast(int, sec_agg_param_dict[Key.THRESHOLD]) @@ -347,7 +347,7 @@ def _setup( state.sk1, state.pk1 = private_key_to_bytes(sk1), public_key_to_bytes(pk1) state.sk2, state.pk2 = private_key_to_bytes(sk2), public_key_to_bytes(pk2) - log(INFO, "Node %d: stage 0 completes. uploading public keys...", state.nid) + log(DEBUG, "Node %d: stage 0 completes. uploading public keys...", state.nid) return {Key.PUBLIC_KEY_1: state.pk1, Key.PUBLIC_KEY_2: state.pk2} @@ -357,7 +357,7 @@ def _share_keys( ) -> Dict[str, ConfigsRecordValues]: named_bytes_tuples = cast(Dict[str, Tuple[bytes, bytes]], configs) key_dict = {int(sid): (pk1, pk2) for sid, (pk1, pk2) in named_bytes_tuples.items()} - log(INFO, "Node %d: starting stage 1...", state.nid) + log(DEBUG, "Node %d: starting stage 1...", state.nid) state.public_keys_dict = key_dict # Check if the size is larger than threshold @@ -409,7 +409,7 @@ def _share_keys( dsts.append(nid) ciphertexts.append(ciphertext) - log(INFO, "Node %d: stage 1 completes. uploading key shares...", state.nid) + log(DEBUG, "Node %d: stage 1 completes. uploading key shares...", state.nid) return {Key.DESTINATION_LIST: dsts, Key.CIPHERTEXT_LIST: ciphertexts} @@ -419,7 +419,7 @@ def _collect_masked_vectors( configs: ConfigsRecord, fit: Callable[[], FitRes], ) -> Dict[str, ConfigsRecordValues]: - log(INFO, "Node %d: starting stage 2...", state.nid) + log(DEBUG, "Node %d: starting stage 2...", state.nid) available_clients: List[int] = [] ciphertexts = cast(List[bytes], configs[Key.CIPHERTEXT_LIST]) srcs = cast(List[int], configs[Key.SOURCE_LIST]) @@ -500,7 +500,7 @@ def _collect_masked_vectors( # Take mod of final weight update vector and return to server quantized_parameters = parameters_mod(quantized_parameters, state.mod_range) - log(INFO, "Node %d: stage 2 completed, uploading masked parameters...", state.nid) + log(DEBUG, "Node %d: stage 2 completed, uploading masked parameters...", state.nid) return { Key.MASKED_PARAMETERS: [ndarray_to_bytes(arr) for arr in quantized_parameters] } @@ -509,7 +509,7 @@ def _collect_masked_vectors( def _unmask( state: SecAggPlusState, configs: ConfigsRecord ) -> Dict[str, ConfigsRecordValues]: - log(INFO, "Node %d: starting stage 3...", state.nid) + log(DEBUG, "Node %d: starting stage 3...", state.nid) active_nids = cast(List[int], configs[Key.ACTIVE_NODE_ID_LIST]) dead_nids = cast(List[int], configs[Key.DEAD_NODE_ID_LIST]) @@ -523,5 +523,5 @@ def _unmask( shares += [state.rd_seed_share_dict[nid] for nid in active_nids] shares += [state.sk1_share_dict[nid] for nid in dead_nids] - log(INFO, "Node %d: stage 3 completes. uploading key shares...", state.nid) + log(DEBUG, "Node %d: stage 3 completes. uploading key shares...", state.nid) return {Key.NODE_ID_LIST: all_nids, Key.SHARE_LIST: shares} From 5866311f8d20ce8c7e113b5486cf476dd9be09e3 Mon Sep 17 00:00:00 2001 From: Heng Pan Date: Tue, 12 Mar 2024 21:00:00 +0000 Subject: [PATCH 05/57] Fix the module doc string of `secaggplus_mod` (#3106) --- src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py | 2 +- .../flwr/client/mod/secure_aggregation/secaggplus_mod_test.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py b/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py index 3e33438c9ddc..7d965cb031cb 100644 --- a/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py +++ b/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Message handler for the SecAgg+ protocol.""" +"""Modifier for the SecAgg+ protocol.""" import os diff --git a/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod_test.py b/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod_test.py index d72d8b414f65..db5ed67c02a4 100644 --- a/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod_test.py +++ b/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod_test.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""The SecAgg+ protocol handler tests.""" +"""The SecAgg+ protocol modifier tests.""" import unittest from itertools import product From d6f274bf14697b07e45ac6cc254e4635f33e8b6d Mon Sep 17 00:00:00 2001 From: Heng Pan Date: Tue, 12 Mar 2024 21:11:42 +0000 Subject: [PATCH 06/57] Fix `SecAggPlusWorkflow` and `secaggplus_mod` (#3120) --- .../mod/secure_aggregation/secaggplus_mod.py | 51 ++++++++----------- .../secure_aggregation/secaggplus_workflow.py | 43 ++++++++-------- 2 files changed, 41 insertions(+), 53 deletions(-) diff --git a/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py b/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py index 7d965cb031cb..989d5f6e1361 100644 --- a/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py +++ b/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py @@ -18,13 +18,14 @@ import os from dataclasses import dataclass, field from logging import DEBUG, WARNING -from typing import Any, Callable, Dict, List, Tuple, cast +from typing import Any, Dict, List, Tuple, cast from flwr.client.typing import ClientAppCallable from flwr.common import ( ConfigsRecord, Context, Message, + Parameters, RecordSet, ndarray_to_bytes, parameters_to_ndarrays, @@ -62,7 +63,7 @@ share_keys_plaintext_concat, share_keys_plaintext_separate, ) -from flwr.common.typing import ConfigsRecordValues, FitRes +from flwr.common.typing import ConfigsRecordValues @dataclass @@ -132,18 +133,6 @@ def to_dict(self) -> Dict[str, ConfigsRecordValues]: return ret -def _get_fit_fn( - msg: Message, ctxt: Context, call_next: ClientAppCallable -) -> Callable[[], FitRes]: - """Get the fit function.""" - - def fit() -> FitRes: - out_msg = call_next(msg, ctxt) - return compat.recordset_to_fitres(out_msg.content, keep_input=False) - - return fit - - def secaggplus_mod( msg: Message, ctxt: Context, @@ -173,25 +162,32 @@ def secaggplus_mod( check_configs(state.current_stage, configs) # Execute + out_content = RecordSet() if state.current_stage == Stage.SETUP: state.nid = msg.metadata.dst_node_id res = _setup(state, configs) elif state.current_stage == Stage.SHARE_KEYS: res = _share_keys(state, configs) elif state.current_stage == Stage.COLLECT_MASKED_VECTORS: - fit = _get_fit_fn(msg, ctxt, call_next) - res = _collect_masked_vectors(state, configs, fit) + out_msg = call_next(msg, ctxt) + out_content = out_msg.content + fitres = compat.recordset_to_fitres(out_content, keep_input=True) + res = _collect_masked_vectors( + state, configs, fitres.num_examples, fitres.parameters + ) + for p_record in out_content.parameters_records.values(): + p_record.clear() elif state.current_stage == Stage.UNMASK: res = _unmask(state, configs) else: - raise ValueError(f"Unknown secagg stage: {state.current_stage}") + raise ValueError(f"Unknown SecAgg/SecAgg+ stage: {state.current_stage}") # Save state ctxt.state.configs_records[RECORD_KEY_STATE] = ConfigsRecord(state.to_dict()) # Return message - content = RecordSet(configs_records={RECORD_KEY_CONFIGS: ConfigsRecord(res, False)}) - return msg.create_reply(content, ttl="") + out_content.configs_records[RECORD_KEY_CONFIGS] = ConfigsRecord(res, False) + return msg.create_reply(out_content, ttl="") def check_stage(current_stage: str, configs: ConfigsRecord) -> None: @@ -417,7 +413,8 @@ def _share_keys( def _collect_masked_vectors( state: SecAggPlusState, configs: ConfigsRecord, - fit: Callable[[], FitRes], + num_examples: int, + updated_parameters: Parameters, ) -> Dict[str, ConfigsRecordValues]: log(DEBUG, "Node %d: starting stage 2...", state.nid) available_clients: List[int] = [] @@ -447,26 +444,20 @@ def _collect_masked_vectors( state.rd_seed_share_dict[src] = rd_seed_share state.sk1_share_dict[src] = sk1_share - # Fit client - fit_res = fit() - if len(fit_res.metrics) > 0: - log( - WARNING, - "The metrics in FitRes will not be preserved or sent to the server.", - ) - ratio = fit_res.num_examples / state.max_weight + # Fit + ratio = num_examples / state.max_weight if ratio > 1: log( WARNING, "Potential overflow warning: the provided weight (%s) exceeds the specified" " max_weight (%s). This may lead to overflow issues.", - fit_res.num_examples, + num_examples, state.max_weight, ) q_ratio = round(ratio * state.target_range) dq_ratio = q_ratio / state.target_range - parameters = parameters_to_ndarrays(fit_res.parameters) + parameters = parameters_to_ndarrays(updated_parameters) parameters = parameters_multiply(parameters, dq_ratio) # Quantize parameter update (vector) diff --git a/src/py/flwr/server/workflow/secure_aggregation/secaggplus_workflow.py b/src/py/flwr/server/workflow/secure_aggregation/secaggplus_workflow.py index 559dc1cf8739..42ee9c15f1cd 100644 --- a/src/py/flwr/server/workflow/secure_aggregation/secaggplus_workflow.py +++ b/src/py/flwr/server/workflow/secure_aggregation/secaggplus_workflow.py @@ -18,11 +18,10 @@ import random from dataclasses import dataclass, field from logging import DEBUG, ERROR, INFO, WARN -from typing import Dict, List, Optional, Set, Union, cast +from typing import Dict, List, Optional, Set, Tuple, Union, cast import flwr.common.recordset_compat as compat from flwr.common import ( - Code, ConfigsRecord, Context, FitRes, @@ -30,7 +29,6 @@ MessageType, NDArrays, RecordSet, - Status, bytes_to_ndarray, log, ndarrays_to_parameters, @@ -55,7 +53,7 @@ Stage, ) from flwr.common.secure_aggregation.secaggplus_utils import pseudo_rand_gen -from flwr.server.compat.driver_client_proxy import DriverClientProxy +from flwr.server.client_proxy import ClientProxy from flwr.server.compat.legacy_context import LegacyContext from flwr.server.driver import Driver @@ -67,6 +65,7 @@ class WorkflowState: # pylint: disable=R0902 """The state of the SecAgg+ protocol.""" + nid_to_proxies: Dict[int, ClientProxy] = field(default_factory=dict) nid_to_fitins: Dict[int, RecordSet] = field(default_factory=dict) sampled_node_ids: Set[int] = field(default_factory=set) active_node_ids: Set[int] = field(default_factory=set) @@ -81,6 +80,7 @@ class WorkflowState: # pylint: disable=R0902 forward_srcs: Dict[int, List[int]] = field(default_factory=dict) forward_ciphertexts: Dict[int, List[bytes]] = field(default_factory=dict) aggregate_ndarrays: NDArrays = field(default_factory=list) + legacy_results: List[Tuple[ClientProxy, FitRes]] = field(default_factory=list) class SecAggPlusWorkflow: @@ -301,9 +301,10 @@ def setup_stage( # pylint: disable=R0912, R0914, R0915 ) state.nid_to_fitins = { - proxy.node_id: compat.fitins_to_recordset(fitins, False) + proxy.node_id: compat.fitins_to_recordset(fitins, True) for proxy, fitins in proxy_fitins_lst } + state.nid_to_proxies = {proxy.node_id: proxy for proxy, _ in proxy_fitins_lst} # Protocol config sampled_node_ids = list(state.nid_to_fitins.keys()) @@ -528,6 +529,12 @@ def make(nid: int) -> Message: masked_vector = parameters_mod(masked_vector, state.mod_range) state.aggregate_ndarrays = masked_vector + # Backward compatibility with Strategy + for msg in msgs: + fitres = compat.recordset_to_fitres(msg.content, True) + proxy = state.nid_to_proxies[msg.metadata.src_node_id] + state.legacy_results.append((proxy, fitres)) + return self._check_threshold(state) def unmask_stage( # pylint: disable=R0912, R0914, R0915 @@ -637,31 +644,21 @@ def make(nid: int) -> Message: for vec in aggregated_vector: vec += offset vec *= inv_dq_total_ratio - state.aggregate_ndarrays = aggregated_vector + + # Backward compatibility with Strategy + results = state.legacy_results + parameters = ndarrays_to_parameters(aggregated_vector) + for _, fitres in results: + fitres.parameters = parameters # No exception/failure handling currently log( INFO, "aggregate_fit: received %s results and %s failures", - 1, - 0, - ) - - final_fitres = FitRes( - status=Status(code=Code.OK, message=""), - parameters=ndarrays_to_parameters(aggregated_vector), - num_examples=round(state.max_weight / inv_dq_total_ratio), - metrics={}, - ) - empty_proxy = DriverClientProxy( + len(results), 0, - driver.grpc_driver, # type: ignore - False, - driver.run_id, # type: ignore - ) - aggregated_result = context.strategy.aggregate_fit( - current_round, [(empty_proxy, final_fitres)], [] ) + aggregated_result = context.strategy.aggregate_fit(current_round, results, []) parameters_aggregated, metrics_aggregated = aggregated_result # Update the parameters and write history From 1057001fc05ace6dcb87b373ff251bc870f7fc72 Mon Sep 17 00:00:00 2001 From: Adam Narozniak <51029327+adam-narozniak@users.noreply.github.com> Date: Tue, 12 Mar 2024 22:36:40 +0100 Subject: [PATCH 07/57] Fds add num_partitions property to partitioners (#3095) * Add num_partition property * Trigger the partitioning in the num_partitions --------- Co-authored-by: Daniel J. Beutel --- .../partitioner/dirichlet_partitioner.py | 7 +++++++ datasets/flwr_datasets/partitioner/iid_partitioner.py | 5 +++++ .../partitioner/inner_dirichlet_partitioner.py | 11 +++++++++++ .../partitioner/natural_id_partitioner.py | 7 +++++++ datasets/flwr_datasets/partitioner/partitioner.py | 5 +++++ .../flwr_datasets/partitioner/shard_partitioner.py | 9 +++++++++ .../flwr_datasets/partitioner/size_partitioner.py | 6 ++++++ 7 files changed, 50 insertions(+) diff --git a/datasets/flwr_datasets/partitioner/dirichlet_partitioner.py b/datasets/flwr_datasets/partitioner/dirichlet_partitioner.py index 5f1df71991bb..5271aad74a1e 100644 --- a/datasets/flwr_datasets/partitioner/dirichlet_partitioner.py +++ b/datasets/flwr_datasets/partitioner/dirichlet_partitioner.py @@ -132,6 +132,13 @@ def load_partition(self, node_id: int) -> datasets.Dataset: self._determine_node_id_to_indices_if_needed() return self.dataset.select(self._node_id_to_indices[node_id]) + @property + def num_partitions(self) -> int: + """Total number of partitions.""" + self._check_num_partitions_correctness_if_needed() + self._determine_node_id_to_indices_if_needed() + return self._num_partitions + def _initialize_alpha( self, alpha: Union[int, float, List[float], NDArrayFloat] ) -> NDArrayFloat: diff --git a/datasets/flwr_datasets/partitioner/iid_partitioner.py b/datasets/flwr_datasets/partitioner/iid_partitioner.py index c72b34f081f2..faa1dfa10615 100644 --- a/datasets/flwr_datasets/partitioner/iid_partitioner.py +++ b/datasets/flwr_datasets/partitioner/iid_partitioner.py @@ -50,3 +50,8 @@ def load_partition(self, node_id: int) -> datasets.Dataset: return self.dataset.shard( num_shards=self._num_partitions, index=node_id, contiguous=True ) + + @property + def num_partitions(self) -> int: + """Total number of partitions.""" + return self._num_partitions diff --git a/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner.py b/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner.py index c25a9b059d18..bf07ab3591f5 100644 --- a/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner.py +++ b/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner.py @@ -119,6 +119,17 @@ def load_partition(self, node_id: int) -> datasets.Dataset: self._determine_node_id_to_indices_if_needed() return self.dataset.select(self._node_id_to_indices[node_id]) + @property + def num_partitions(self) -> int: + """Total number of partitions.""" + self._check_num_partitions_correctness_if_needed() + self._check_partition_sizes_correctness_if_needed() + self._check_the_sum_of_partition_sizes() + self._determine_num_unique_classes_if_needed() + self._alpha = self._initialize_alpha_if_needed(self._initial_alpha) + self._determine_node_id_to_indices_if_needed() + return self._num_partitions + def _initialize_alpha_if_needed( self, alpha: Union[int, float, List[float], NDArrayFloat] ) -> NDArrayFloat: diff --git a/datasets/flwr_datasets/partitioner/natural_id_partitioner.py b/datasets/flwr_datasets/partitioner/natural_id_partitioner.py index b8f28696f3b7..947501965cc6 100644 --- a/datasets/flwr_datasets/partitioner/natural_id_partitioner.py +++ b/datasets/flwr_datasets/partitioner/natural_id_partitioner.py @@ -65,6 +65,13 @@ def load_partition(self, node_id: int) -> datasets.Dataset: lambda row: row[self._partition_by] == self._node_id_to_natural_id[node_id] ) + @property + def num_partitions(self) -> int: + """Total number of partitions.""" + if len(self._node_id_to_natural_id) == 0: + self._create_int_node_id_to_natural_id() + return len(self._node_id_to_natural_id) + @property def node_id_to_natural_id(self) -> Dict[int, str]: """Node id to corresponding natural id present. diff --git a/datasets/flwr_datasets/partitioner/partitioner.py b/datasets/flwr_datasets/partitioner/partitioner.py index 92405152efc6..73eb6f4a17b3 100644 --- a/datasets/flwr_datasets/partitioner/partitioner.py +++ b/datasets/flwr_datasets/partitioner/partitioner.py @@ -79,3 +79,8 @@ def is_dataset_assigned(self) -> bool: True if a dataset is assigned, otherwise False. """ return self._dataset is not None + + @property + @abstractmethod + def num_partitions(self) -> int: + """Total number of partitions.""" diff --git a/datasets/flwr_datasets/partitioner/shard_partitioner.py b/datasets/flwr_datasets/partitioner/shard_partitioner.py index 7c86570fe487..05444f537c8c 100644 --- a/datasets/flwr_datasets/partitioner/shard_partitioner.py +++ b/datasets/flwr_datasets/partitioner/shard_partitioner.py @@ -179,6 +179,15 @@ def load_partition(self, node_id: int) -> datasets.Dataset: self._determine_node_id_to_indices_if_needed() return self.dataset.select(self._node_id_to_indices[node_id]) + @property + def num_partitions(self) -> int: + """Total number of partitions.""" + self._check_num_partitions_correctness_if_needed() + self._check_possibility_of_partitions_creation() + self._sort_dataset_if_needed() + self._determine_node_id_to_indices_if_needed() + return self._num_partitions + def _determine_node_id_to_indices_if_needed(self) -> None: # pylint: disable=R0914 """Assign sample indices to each node id. diff --git a/datasets/flwr_datasets/partitioner/size_partitioner.py b/datasets/flwr_datasets/partitioner/size_partitioner.py index 35ca750949ee..29fc2e5b1add 100644 --- a/datasets/flwr_datasets/partitioner/size_partitioner.py +++ b/datasets/flwr_datasets/partitioner/size_partitioner.py @@ -84,6 +84,12 @@ def load_partition(self, node_id: int) -> datasets.Dataset: self._determine_node_id_to_indices_if_needed() return self.dataset.select(self._node_id_to_indices[node_id]) + @property + def num_partitions(self) -> int: + """Total number of partitions.""" + self._determine_node_id_to_indices_if_needed() + return self._num_partitions + @property def node_id_to_size(self) -> Dict[int, int]: """Node id to the number of samples.""" From 9e7e4a8035911fb2307045aa19fce280f74906b7 Mon Sep 17 00:00:00 2001 From: Charles Beauville Date: Wed, 13 Mar 2024 12:21:46 +0000 Subject: [PATCH 08/57] Improve formatting for changelog generating script (#3122) --- src/py/flwr_tool/update_changelog.py | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/src/py/flwr_tool/update_changelog.py b/src/py/flwr_tool/update_changelog.py index a158cca21765..e3cffff7e36c 100644 --- a/src/py/flwr_tool/update_changelog.py +++ b/src/py/flwr_tool/update_changelog.py @@ -50,7 +50,7 @@ def _get_pull_requests_since_tag(gh_api, tag): def _format_pr_reference(title, number, url): """Format a pull request reference as a markdown list item.""" - return f"- **{title}** ([#{number}]({url}))" + return f"- **{title.replace('*', '')}** ([#{number}]({url}))" def _extract_changelog_entry(pr_info): @@ -193,11 +193,24 @@ def _insert_new_entry(content, pr_info, pr_reference, pr_entry_text, unreleased_ content = content[:pr_ref_end] + updated_entry + content[existing_entry_start:] else: insert_index = content.find("\n", unreleased_index) + 1 + + # Split the pr_entry_text into paragraphs + paragraphs = pr_entry_text.split("\n") + + # Indent each paragraph + indented_paragraphs = [ + " " + paragraph if paragraph else paragraph for paragraph in paragraphs + ] + + # Join the paragraphs back together, ensuring each is separated by a newline + indented_pr_entry_text = "\n".join(indented_paragraphs) + content = ( content[:insert_index] + + "\n" + pr_reference - + "\n " - + pr_entry_text + + "\n\n" + + indented_pr_entry_text + "\n" + content[insert_index:] ) From 930cdafe2405cd0f5064d33664aad9ab6869a23c Mon Sep 17 00:00:00 2001 From: Adam Narozniak <51029327+adam-narozniak@users.noreply.github.com> Date: Wed, 13 Mar 2024 16:00:45 +0100 Subject: [PATCH 09/57] Change the `self_balancing` to `False` (#3123) --- datasets/flwr_datasets/partitioner/dirichlet_partitioner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/datasets/flwr_datasets/partitioner/dirichlet_partitioner.py b/datasets/flwr_datasets/partitioner/dirichlet_partitioner.py index 5271aad74a1e..cb23acea01b6 100644 --- a/datasets/flwr_datasets/partitioner/dirichlet_partitioner.py +++ b/datasets/flwr_datasets/partitioner/dirichlet_partitioner.py @@ -89,7 +89,7 @@ def __init__( # pylint: disable=R0913 partition_by: str, alpha: Union[int, float, List[float], NDArrayFloat], min_partition_size: int = 10, - self_balancing: bool = True, + self_balancing: bool = False, shuffle: bool = True, seed: Optional[int] = 42, ) -> None: From 6b599b7acf4b8760ff0d064e0913e1b326c4c193 Mon Sep 17 00:00:00 2001 From: Taner Topal Date: Wed, 13 Mar 2024 17:43:51 +0000 Subject: [PATCH 10/57] Improve the CLI command logs (#3130) --- src/py/flwr/cli/new/new.py | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/src/py/flwr/cli/new/new.py b/src/py/flwr/cli/new/new.py index 8d644391ca5b..468d434acd8e 100644 --- a/src/py/flwr/cli/new/new.py +++ b/src/py/flwr/cli/new/new.py @@ -81,7 +81,13 @@ def new( ] = None, ) -> None: """Create new Flower project.""" - print(f"Creating Flower project {project_name}...") + print( + typer.style( + f"🔨 Creating Flower project {project_name}...", + fg=typer.colors.GREEN, + bold=True, + ) + ) if project_name is None: project_name = prompt_text("Please provide project name") @@ -131,4 +137,18 @@ def new( context=context, ) - print("Project creation successful.") + print( + typer.style( + "🎊 Project creation successful.\n\n" + "Use the following command to run your project:\n", + fg=typer.colors.GREEN, + bold=True, + ) + ) + print( + typer.style( + f" cd {project_name}\n" + " pip install .\n flwr run\n", + fg=typer.colors.BRIGHT_CYAN, + bold=True, + ) + ) From e259976318e62dd17b9176b31d221c36ac4bb286 Mon Sep 17 00:00:00 2001 From: Charles Beauville Date: Wed, 13 Mar 2024 18:10:29 +0000 Subject: [PATCH 11/57] Remove legacy check that leads to unregistered clients (#3128) --- .../server/superlink/fleet/message_handler/message_handler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py b/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py index 5fe815180823..c99a7854d53a 100644 --- a/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py +++ b/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py @@ -47,7 +47,7 @@ def create_node( def delete_node(request: DeleteNodeRequest, state: State) -> DeleteNodeResponse: """.""" # Validate node_id - if request.node.anonymous or request.node.node_id <= 0: + if request.node.anonymous or request.node.node_id == 0: return DeleteNodeResponse() # Update state From 0abd7e44690033523557189d1325c60d651e1fc1 Mon Sep 17 00:00:00 2001 From: Heng Pan Date: Wed, 13 Mar 2024 18:50:20 +0000 Subject: [PATCH 12/57] Fix a bug in `Driver` class (#3125) --------- Co-authored-by: Daniel J. Beutel Co-authored-by: Charles Beauville --- src/py/flwr/server/compat/app.py | 5 +---- src/py/flwr/server/driver/driver.py | 4 ++-- src/py/flwr/server/driver/driver_test.py | 6 ++---- src/py/flwr/server/run_serverapp.py | 2 +- src/py/flwr/server/workflow/default_workflows.py | 1 - src/py/flwr/simulation/run_simulation.py | 2 +- 6 files changed, 7 insertions(+), 13 deletions(-) diff --git a/src/py/flwr/server/compat/app.py b/src/py/flwr/server/compat/app.py index c13a713b5f2c..ff1d99b5366e 100644 --- a/src/py/flwr/server/compat/app.py +++ b/src/py/flwr/server/compat/app.py @@ -143,11 +143,8 @@ def start_driver( # pylint: disable=too-many-arguments, too-many-locals config=initialized_config, ) + # Terminate the thread f_stop.set() - - # Stop the Driver API server and the thread - del driver - thread.join() event(EventType.START_SERVER_LEAVE) diff --git a/src/py/flwr/server/driver/driver.py b/src/py/flwr/server/driver/driver.py index bcaac1f61b85..0098e0ce97c2 100644 --- a/src/py/flwr/server/driver/driver.py +++ b/src/py/flwr/server/driver/driver.py @@ -247,8 +247,8 @@ def send_and_receive( time.sleep(3) return ret - def __del__(self) -> None: - """Disconnect GrpcDriver if connected.""" + def close(self) -> None: + """Disconnect from the SuperLink if connected.""" # Check if GrpcDriver is initialized if self.grpc_driver is None: return diff --git a/src/py/flwr/server/driver/driver_test.py b/src/py/flwr/server/driver/driver_test.py index 2bf253222f94..5136f4f90210 100644 --- a/src/py/flwr/server/driver/driver_test.py +++ b/src/py/flwr/server/driver/driver_test.py @@ -205,8 +205,7 @@ def test_del_with_initialized_driver(self) -> None: self.driver._get_grpc_driver_and_run_id() # Execute - # pylint: disable-next=unnecessary-dunder-call - self.driver.__del__() + self.driver.close() # Assert self.mock_grpc_driver.disconnect.assert_called_once() @@ -214,8 +213,7 @@ def test_del_with_initialized_driver(self) -> None: def test_del_with_uninitialized_driver(self) -> None: """Test cleanup behavior when Driver is not initialized.""" # Execute - # pylint: disable-next=unnecessary-dunder-call - self.driver.__del__() + self.driver.close() # Assert self.mock_grpc_driver.disconnect.assert_not_called() diff --git a/src/py/flwr/server/run_serverapp.py b/src/py/flwr/server/run_serverapp.py index 5b00d356886a..2f0f1185847e 100644 --- a/src/py/flwr/server/run_serverapp.py +++ b/src/py/flwr/server/run_serverapp.py @@ -138,7 +138,7 @@ def run_server_app() -> None: run(driver=driver, server_app_dir=server_app_dir, server_app_attr=server_app_attr) # Clean up - driver.__del__() # pylint: disable=unnecessary-dunder-call + driver.close() event(EventType.RUN_SERVER_APP_LEAVE) diff --git a/src/py/flwr/server/workflow/default_workflows.py b/src/py/flwr/server/workflow/default_workflows.py index a5c726b0b191..876ae56dcadc 100644 --- a/src/py/flwr/server/workflow/default_workflows.py +++ b/src/py/flwr/server/workflow/default_workflows.py @@ -98,7 +98,6 @@ def __call__(self, driver: Driver, context: Context) -> None: # Terminate the thread f_stop.set() - del driver thread.join() diff --git a/src/py/flwr/simulation/run_simulation.py b/src/py/flwr/simulation/run_simulation.py index 31884f2edc68..56fce363726a 100644 --- a/src/py/flwr/simulation/run_simulation.py +++ b/src/py/flwr/simulation/run_simulation.py @@ -240,7 +240,7 @@ def _main_loop( finally: # Stop Driver driver_server.stop(grace=0) - del driver + driver.close() # Trigger stop event f_stop.set() From 8252cdf16fa2cc09f6254fdb64d3a74a4af7becf Mon Sep 17 00:00:00 2001 From: Charles Beauville Date: Wed, 13 Mar 2024 21:14:49 +0000 Subject: [PATCH 13/57] Create pyproject for templates (#3131) --------- Co-authored-by: Taner Topal Co-authored-by: Daniel J. Beutel Co-authored-by: Taner Topal --- src/py/flwr/cli/new/new.py | 2 +- .../flwr/cli/new/templates/app/README.md.tpl | 6 +++++- .../templates/app/pyproject.numpy.toml.tpl | 19 +++++++++++++++++ ...ct.toml.tpl => pyproject.pytorch.toml.tpl} | 2 +- .../app/pyproject.tensorflow.toml.tpl | 21 +++++++++++++++++++ 5 files changed, 47 insertions(+), 3 deletions(-) create mode 100644 src/py/flwr/cli/new/templates/app/pyproject.numpy.toml.tpl rename src/py/flwr/cli/new/templates/app/{pyproject.toml.tpl => pyproject.pytorch.toml.tpl} (85%) create mode 100644 src/py/flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl diff --git a/src/py/flwr/cli/new/new.py b/src/py/flwr/cli/new/new.py index 468d434acd8e..4fa6c48c7ed5 100644 --- a/src/py/flwr/cli/new/new.py +++ b/src/py/flwr/cli/new/new.py @@ -118,7 +118,7 @@ def new( "README.md": {"template": "app/README.md.tpl"}, "requirements.txt": {"template": f"app/requirements.{framework_str}.txt.tpl"}, "flower.toml": {"template": "app/flower.toml.tpl"}, - "pyproject.toml": {"template": "app/pyproject.toml.tpl"}, + "pyproject.toml": {"template": f"app/pyproject.{framework_str}.toml.tpl"}, f"{pnl}/__init__.py": {"template": "app/code/__init__.py.tpl"}, f"{pnl}/server.py": {"template": f"app/code/server.{framework_str}.py.tpl"}, f"{pnl}/client.py": {"template": f"app/code/client.{framework_str}.py.tpl"}, diff --git a/src/py/flwr/cli/new/templates/app/README.md.tpl b/src/py/flwr/cli/new/templates/app/README.md.tpl index 6edb99a7f5ed..516bed0f40c2 100644 --- a/src/py/flwr/cli/new/templates/app/README.md.tpl +++ b/src/py/flwr/cli/new/templates/app/README.md.tpl @@ -3,7 +3,11 @@ ## Install dependencies ```bash -pip install -r requirements.txt +# Using pip +pip install . + +# Or using Poetry +poetry install ``` ## Run (Simulation Engine) diff --git a/src/py/flwr/cli/new/templates/app/pyproject.numpy.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.numpy.toml.tpl new file mode 100644 index 000000000000..15d8211a1a25 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/pyproject.numpy.toml.tpl @@ -0,0 +1,19 @@ +[build-system] +requires = ["poetry-core>=1.4.0"] +build-backend = "poetry.core.masonry.api" + +[tool.poetry] +name = "$project_name" +version = "1.0.0" +description = "" +license = "Apache-2.0" +authors = [ + "The Flower Authors ", +] +readme = "README.md" + +[tool.poetry.dependencies] +python = "^3.9" +# Mandatory dependencies +numpy = "^1.21.0" +flwr = { version = "^1.8.0", extras = ["simulation"] } diff --git a/src/py/flwr/cli/new/templates/app/pyproject.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl similarity index 85% rename from src/py/flwr/cli/new/templates/app/pyproject.toml.tpl rename to src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl index ca3f625e2437..8c67ec43ac07 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl @@ -15,7 +15,7 @@ readme = "README.md" [tool.poetry.dependencies] python = "^3.9" # Mandatory dependencies -flwr-nightly = { version = "1.8.0.dev20240309", extras = ["simulation"] } +flwr = { version = "^1.8.0", extras = ["simulation"] } flwr-datasets = { version = "^0.0.2", extras = ["vision"] } torch = "2.2.1" torchvision = "0.17.1" diff --git a/src/py/flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl new file mode 100644 index 000000000000..f7383a78b7d5 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl @@ -0,0 +1,21 @@ +[build-system] +requires = ["poetry-core>=1.4.0"] +build-backend = "poetry.core.masonry.api" + +[tool.poetry] +name = "$project_name" +version = "1.0.0" +description = "" +license = "Apache-2.0" +authors = [ + "The Flower Authors ", +] +readme = "README.md" + +[tool.poetry.dependencies] +python = ">=3.9,<3.11" +# Mandatory dependencies +flwr = { version = "^1.8.0", extras = ["simulation"] } +flwr-datasets = { version = "^0.0.2", extras = ["vision"] } +tensorflow-cpu = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "platform_machine == \"x86_64\"" } +tensorflow-macos = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "sys_platform == \"darwin\" and platform_machine == \"arm64\"" } From 8654bfb99c8e2b0b68a3921b0e52ee63f64040a1 Mon Sep 17 00:00:00 2001 From: Adam Narozniak <51029327+adam-narozniak@users.noreply.github.com> Date: Wed, 13 Mar 2024 21:53:33 +0000 Subject: [PATCH 14/57] Rename `load_full` to `load_split` (#3097) Co-authored-by: Javier --- datasets/README.md | 4 ++-- datasets/doc/source/how-to-use-with-numpy.rst | 2 +- datasets/doc/source/how-to-use-with-pytorch.rst | 2 +- .../doc/source/how-to-use-with-tensorflow.rst | 2 +- datasets/doc/source/tutorial-quickstart.rst | 2 +- datasets/flwr_datasets/federated_dataset.py | 12 ++++++------ datasets/flwr_datasets/federated_dataset_test.py | 16 ++++++++-------- doc/source/tutorial-quickstart-xgboost.rst | 2 +- ...-series-get-started-with-flower-pytorch.ipynb | 2 +- examples/advanced-pytorch/utils.py | 2 +- examples/advanced-tensorflow/server.py | 2 +- examples/custom-metrics/client.py | 4 ++-- examples/embedded-devices/client_pytorch.py | 2 +- examples/embedded-devices/client_tf.py | 2 +- examples/quickstart-sklearn-tabular/client.py | 2 +- examples/simulation-pytorch/sim.ipynb | 2 +- examples/simulation-pytorch/sim.py | 2 +- examples/simulation-tensorflow/sim.ipynb | 2 +- examples/simulation-tensorflow/sim.py | 2 +- examples/sklearn-logreg-mnist/server.py | 2 +- examples/vit-finetune/dataset.py | 2 +- examples/xgboost-comprehensive/client.py | 2 +- examples/xgboost-comprehensive/server.py | 2 +- examples/xgboost-comprehensive/sim.py | 2 +- 24 files changed, 38 insertions(+), 38 deletions(-) diff --git a/datasets/README.md b/datasets/README.md index 61292fe988bf..fb274a9f4bc1 100644 --- a/datasets/README.md +++ b/datasets/README.md @@ -59,7 +59,7 @@ If you plan to change the type of the dataset to run the code with your ML frame # Usage -Flower Datasets exposes the `FederatedDataset` abstraction to represent the dataset needed for federated learning/evaluation/analytics. It has two powerful methods that let you handle the dataset preprocessing: `load_partition(node_id, split)` and `load_full(split)`. +Flower Datasets exposes the `FederatedDataset` abstraction to represent the dataset needed for federated learning/evaluation/analytics. It has two powerful methods that let you handle the dataset preprocessing: `load_partition(node_id, split)` and `load_split(split)`. Here's a basic quickstart example of how to partition the MNIST dataset: @@ -71,7 +71,7 @@ mnist_fds = FederatedDataset("mnist", partitioners={"train": 100} mnist_partition_0 = mnist_fds.load_partition(0, "train") -centralized_data = mnist_fds.load_full("test") +centralized_data = mnist_fds.load_split("test") ``` For more details, please refer to the specific how-to guides or tutorial. They showcase customization and more advanced features. diff --git a/datasets/doc/source/how-to-use-with-numpy.rst b/datasets/doc/source/how-to-use-with-numpy.rst index db80b712a13e..a131ef940a01 100644 --- a/datasets/doc/source/how-to-use-with-numpy.rst +++ b/datasets/doc/source/how-to-use-with-numpy.rst @@ -9,7 +9,7 @@ Create a ``FederatedDataset``:: fds = FederatedDataset(dataset="cifar10", partitioners={"train": 10}) partition = fds.load_partition(0, "train") - centralized_dataset = fds.load_full("test") + centralized_dataset = fds.load_split("test") Inspect the names of the features:: diff --git a/datasets/doc/source/how-to-use-with-pytorch.rst b/datasets/doc/source/how-to-use-with-pytorch.rst index 85e7833b0869..613f00a9a059 100644 --- a/datasets/doc/source/how-to-use-with-pytorch.rst +++ b/datasets/doc/source/how-to-use-with-pytorch.rst @@ -8,7 +8,7 @@ Standard setup - download the dataset, choose the partitioning:: fds = FederatedDataset(dataset="cifar10", partitioners={"train": 10}) partition = fds.load_partition(0, "train") - centralized_dataset = fds.load_full("test") + centralized_dataset = fds.load_split("test") Determine the names of the features (you can alternatively do that directly on the Hugging Face website). The name can vary e.g. "img" or "image", "label" or "labels":: diff --git a/datasets/doc/source/how-to-use-with-tensorflow.rst b/datasets/doc/source/how-to-use-with-tensorflow.rst index 3dc8474b726a..dc215819a9e8 100644 --- a/datasets/doc/source/how-to-use-with-tensorflow.rst +++ b/datasets/doc/source/how-to-use-with-tensorflow.rst @@ -11,7 +11,7 @@ Create a ``FederatedDataset``:: fds = FederatedDataset(dataset="cifar10", partitioners={"train": 10}) partition = fds.load_partition(0, "train") - centralized_dataset = fds.load_full("test") + centralized_dataset = fds.load_split("test") Inspect the names of the features:: diff --git a/datasets/doc/source/tutorial-quickstart.rst b/datasets/doc/source/tutorial-quickstart.rst index bd4f336d618d..e820e116fc61 100644 --- a/datasets/doc/source/tutorial-quickstart.rst +++ b/datasets/doc/source/tutorial-quickstart.rst @@ -38,7 +38,7 @@ To iid partition your dataset, choose the split you want to partition and the nu fds = FederatedDataset(dataset="cifar10", partitioners={"train": 10}) partition = fds.load_partition(0, "train") - centralized_dataset = fds.load_full("test") + centralized_dataset = fds.load_split("test") Now you're ready to go. You have ten partitions created from the train split of the CIFAR10 dataset and the test split for the centralized evaluation. We will convert the type of the dataset from Hugging Face's `Dataset` type to the one diff --git a/datasets/flwr_datasets/federated_dataset.py b/datasets/flwr_datasets/federated_dataset.py index 588d1ab40aec..fc94be3c39a1 100644 --- a/datasets/flwr_datasets/federated_dataset.py +++ b/datasets/flwr_datasets/federated_dataset.py @@ -83,7 +83,7 @@ class FederatedDataset: >>> # Load partition for client with ID 10. >>> partition = mnist_fds.load_partition(10, "train") >>> # Use test split for centralized evaluation. - >>> centralized = mnist_fds.load_full("test") + >>> centralized = mnist_fds.load_split("test") Automatically divde the data returned from `load_partition` >>> mnist_fds = FederatedDataset( @@ -131,9 +131,9 @@ def __init__( self._shuffle = shuffle self._seed = seed # _dataset is prepared lazily on the first call to `load_partition` - # or `load_full`. See _prepare_datasets for more details + # or `load_split`. See _prepare_datasets for more details self._dataset: Optional[DatasetDict] = None - # Indicate if the dataset is prepared for `load_partition` or `load_full` + # Indicate if the dataset is prepared for `load_partition` or `load_split` self._dataset_prepared: bool = False def load_partition( @@ -144,7 +144,7 @@ def load_partition( """Load the partition specified by the idx in the selected split. The dataset is downloaded only when the first call to `load_partition` or - `load_full` is made. + `load_split` is made. Parameters ---------- @@ -190,11 +190,11 @@ def load_partition( ) return divided_partition - def load_full(self, split: str) -> Dataset: + def load_split(self, split: str) -> Dataset: """Load the full split of the dataset. The dataset is downloaded only when the first call to `load_partition` or - `load_full` is made. + `load_split` is made. Parameters ---------- diff --git a/datasets/flwr_datasets/federated_dataset_test.py b/datasets/flwr_datasets/federated_dataset_test.py index e01f56342954..fb9958a32008 100644 --- a/datasets/flwr_datasets/federated_dataset_test.py +++ b/datasets/flwr_datasets/federated_dataset_test.py @@ -109,12 +109,12 @@ def test_divide_partition_integration_size( else: self.assertEqual(len(partition), expected_length) - def test_load_full(self) -> None: - """Test if the load_full works with the correct split name.""" + def test_load_split(self) -> None: + """Test if the load_split works with the correct split name.""" dataset_fds = FederatedDataset( dataset=self.dataset_name, partitioners={"train": 100} ) - dataset_fds_test = dataset_fds.load_full(self.test_split) + dataset_fds_test = dataset_fds.load_split(self.test_split) dataset_test = datasets.load_dataset(self.dataset_name)[self.test_split] self.assertEqual(len(dataset_fds_test), len(dataset_test)) @@ -158,7 +158,7 @@ def test_resplit_dataset_into_one(self) -> None: partitioners={"train": 100}, resplitter={"full": ("train", self.test_split)}, ) - full = fds.load_full("full") + full = fds.load_split("full") self.assertEqual(dataset_length, len(full)) # pylint: disable=protected-access @@ -193,7 +193,7 @@ def resplit(dataset: DatasetDict) -> DatasetDict: fds = FederatedDataset( dataset=self.dataset_name, partitioners={"train": 100}, resplitter=resplit ) - full = fds.load_full("full") + full = fds.load_split("full") dataset = datasets.load_dataset(self.dataset_name) dataset_length = sum([len(ds) for ds in dataset.values()]) self.assertEqual(len(full), dataset_length) @@ -227,7 +227,7 @@ def test_shuffling_applied(self, mock_func: Mock) -> None: fds = FederatedDataset( dataset="does-not-matter", partitioners={"train": 10}, shuffle=True, seed=42 ) - train = fds.load_full("train") + train = fds.load_split("train") # This should be shuffled result = train["features"] @@ -245,7 +245,7 @@ def test_shuffling_not_applied(self, mock_func: Mock) -> None: partitioners={"train": 10}, shuffle=False, ) - train = fds.load_full("train") + train = fds.load_split("train") # This should not be shuffled result = train["features"] @@ -278,7 +278,7 @@ def resplit(dataset: DatasetDict) -> DatasetDict: resplitter=resplit, shuffle=True, ) - train = fds.load_full("train") + train = fds.load_split("train") # This should not be shuffled result = train["features"] diff --git a/doc/source/tutorial-quickstart-xgboost.rst b/doc/source/tutorial-quickstart-xgboost.rst index 751024db14e4..7ac055138814 100644 --- a/doc/source/tutorial-quickstart-xgboost.rst +++ b/doc/source/tutorial-quickstart-xgboost.rst @@ -884,7 +884,7 @@ After importing all required packages, we define a :code:`main()` function to pe # Load centralised test set if args.centralised_eval or args.centralised_eval_client: log(INFO, "Loading centralised test set...") - test_data = fds.load_full("test") + test_data = fds.load_split("test") test_data.set_format("numpy") num_test = test_data.shape[0] test_dmatrix = transform_dataset_to_dmatrix(test_data) diff --git a/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb b/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb index fab3dafba5e5..2b8dd382bb79 100644 --- a/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb +++ b/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb @@ -148,7 +148,7 @@ " partition = partition.train_test_split(train_size=0.8)\n", " trainloaders.append(DataLoader(partition[\"train\"], batch_size=BATCH_SIZE))\n", " valloaders.append(DataLoader(partition[\"test\"], batch_size=BATCH_SIZE))\n", - " testset = fds.load_full(\"test\").with_transform(apply_transforms)\n", + " testset = fds.load_split(\"test\").with_transform(apply_transforms)\n", " testloader = DataLoader(testset, batch_size=BATCH_SIZE)\n", " return trainloaders, valloaders, testloader\n", "\n", diff --git a/examples/advanced-pytorch/utils.py b/examples/advanced-pytorch/utils.py index 4a0f6918cdd6..fd9dab19a70d 100644 --- a/examples/advanced-pytorch/utils.py +++ b/examples/advanced-pytorch/utils.py @@ -21,7 +21,7 @@ def load_partition(partition_id, toy: bool = False): def load_centralized_data(): fds = FederatedDataset(dataset="cifar10", partitioners={"train": 10}) - centralized_data = fds.load_full("test") + centralized_data = fds.load_split("test") centralized_data = centralized_data.with_transform(apply_transforms) return centralized_data diff --git a/examples/advanced-tensorflow/server.py b/examples/advanced-tensorflow/server.py index 26dde312bee5..e159a096dc83 100644 --- a/examples/advanced-tensorflow/server.py +++ b/examples/advanced-tensorflow/server.py @@ -47,7 +47,7 @@ def get_evaluate_fn(model): # Load data here to avoid the overhead of doing it in `evaluate` itself fds = FederatedDataset(dataset="cifar10", partitioners={"train": 10}) - test = fds.load_full("test") + test = fds.load_split("test") test.set_format("numpy") x_test, y_test = test["img"] / 255.0, test["label"] diff --git a/examples/custom-metrics/client.py b/examples/custom-metrics/client.py index d0230e455477..6a194e92cdce 100644 --- a/examples/custom-metrics/client.py +++ b/examples/custom-metrics/client.py @@ -17,8 +17,8 @@ # Load data with Flower Datasets (CIFAR-10) fds = FederatedDataset(dataset="cifar10", partitioners={"train": 10}) -train = fds.load_full("train") -test = fds.load_full("test") +train = fds.load_split("train") +test = fds.load_split("test") # Using Numpy format train_np = train.with_format("numpy") diff --git a/examples/embedded-devices/client_pytorch.py b/examples/embedded-devices/client_pytorch.py index 3f1e6c7d51b7..6bd69c16567e 100644 --- a/examples/embedded-devices/client_pytorch.py +++ b/examples/embedded-devices/client_pytorch.py @@ -112,7 +112,7 @@ def apply_transforms(batch): partition = partition.with_transform(apply_transforms) trainsets.append(partition["train"]) validsets.append(partition["test"]) - testset = fds.load_full("test") + testset = fds.load_split("test") testset = testset.with_transform(apply_transforms) return trainsets, validsets, testset diff --git a/examples/embedded-devices/client_tf.py b/examples/embedded-devices/client_tf.py index d59b31ab1569..49c63ce5d9dc 100644 --- a/examples/embedded-devices/client_tf.py +++ b/examples/embedded-devices/client_tf.py @@ -51,7 +51,7 @@ def prepare_dataset(use_mnist: bool): ) x_test, y_test = partition["test"][img_key] / 255.0, partition["test"]["label"] partitions.append(((x_train, y_train), (x_test, y_test))) - data_centralized = fds.load_full("test") + data_centralized = fds.load_split("test") data_centralized.set_format("numpy") x_centralized = data_centralized[img_key] / 255.0 y_centralized = data_centralized["label"] diff --git a/examples/quickstart-sklearn-tabular/client.py b/examples/quickstart-sklearn-tabular/client.py index fcab8f5d5612..b7e3046c822d 100644 --- a/examples/quickstart-sklearn-tabular/client.py +++ b/examples/quickstart-sklearn-tabular/client.py @@ -28,7 +28,7 @@ dataset = fds.load_partition(partition_id, "train").with_format("pandas")[:] X = dataset[["petal_length", "petal_width", "sepal_length", "sepal_width"]] y = dataset["species"] - unique_labels = fds.load_full("train").unique("species") + unique_labels = fds.load_split("train").unique("species") # Split the on edge data: 80% train, 20% test X_train, X_test = X[: int(0.8 * len(X))], X[int(0.8 * len(X)) :] y_train, y_test = y[: int(0.8 * len(y))], y[int(0.8 * len(y)) :] diff --git a/examples/simulation-pytorch/sim.ipynb b/examples/simulation-pytorch/sim.ipynb index e27721a7fa5f..6dda1ef9319d 100644 --- a/examples/simulation-pytorch/sim.ipynb +++ b/examples/simulation-pytorch/sim.ipynb @@ -197,7 +197,7 @@ "# Download MNIST dataset and partition the \"train\" partition (so one can be assigned to each client)\n", "mnist_fds = FederatedDataset(dataset=\"mnist\", partitioners={\"train\": NUM_CLIENTS})\n", "# Let's keep the test set as is, and use it to evaluate the global model on the server\n", - "centralized_testset = mnist_fds.load_full(\"test\")" + "centralized_testset = mnist_fds.load_split(\"test\")" ] }, { diff --git a/examples/simulation-pytorch/sim.py b/examples/simulation-pytorch/sim.py index ca9e6f0e8366..6fb750f2e59c 100644 --- a/examples/simulation-pytorch/sim.py +++ b/examples/simulation-pytorch/sim.py @@ -169,7 +169,7 @@ def evaluate( # Download MNIST dataset and partition it mnist_fds = FederatedDataset(dataset="mnist", partitioners={"train": NUM_CLIENTS}) -centralized_testset = mnist_fds.load_full("test") +centralized_testset = mnist_fds.load_split("test") # Configure the strategy strategy = fl.server.strategy.FedAvg( diff --git a/examples/simulation-tensorflow/sim.ipynb b/examples/simulation-tensorflow/sim.ipynb index 9acfba99237c..797e2dcc603e 100644 --- a/examples/simulation-tensorflow/sim.ipynb +++ b/examples/simulation-tensorflow/sim.ipynb @@ -247,7 +247,7 @@ "# Download MNIST dataset and partition it\n", "mnist_fds = FederatedDataset(dataset=\"mnist\", partitioners={\"train\": NUM_CLIENTS})\n", "# Get the whole test set for centralised evaluation\n", - "centralized_testset = mnist_fds.load_full(\"test\").to_tf_dataset(\n", + "centralized_testset = mnist_fds.load_split(\"test\").to_tf_dataset(\n", " columns=\"image\", label_cols=\"label\", batch_size=64\n", ")\n", "\n", diff --git a/examples/simulation-tensorflow/sim.py b/examples/simulation-tensorflow/sim.py index 2a19e131fe79..e94e5ec96850 100644 --- a/examples/simulation-tensorflow/sim.py +++ b/examples/simulation-tensorflow/sim.py @@ -131,7 +131,7 @@ def evaluate( # Download MNIST dataset and partition it mnist_fds = FederatedDataset(dataset="mnist", partitioners={"train": NUM_CLIENTS}) # Get the whole test set for centralised evaluation -centralized_testset = mnist_fds.load_full("test").to_tf_dataset( +centralized_testset = mnist_fds.load_split("test").to_tf_dataset( columns="image", label_cols="label", batch_size=64 ) diff --git a/examples/sklearn-logreg-mnist/server.py b/examples/sklearn-logreg-mnist/server.py index 8541100c3a26..e0af91fabcee 100644 --- a/examples/sklearn-logreg-mnist/server.py +++ b/examples/sklearn-logreg-mnist/server.py @@ -17,7 +17,7 @@ def get_evaluate_fn(model: LogisticRegression): # Load test data here to avoid the overhead of doing it in `evaluate` itself fds = FederatedDataset(dataset="mnist", partitioners={"train": 10}) - dataset = fds.load_full("test").with_format("numpy") + dataset = fds.load_split("test").with_format("numpy") X_test, y_test = dataset["image"].reshape((len(dataset), -1)), dataset["label"] # The `evaluate` function will be called after every round diff --git a/examples/vit-finetune/dataset.py b/examples/vit-finetune/dataset.py index c11eb7c19712..42e0af560a17 100644 --- a/examples/vit-finetune/dataset.py +++ b/examples/vit-finetune/dataset.py @@ -21,7 +21,7 @@ def get_dataset_with_partitions(num_partitions: int): dataset="nelorth/oxford-flowers", partitioners={"train": num_partitions} ) - centralized_testset = ox_flowers_fds.load_full("test") + centralized_testset = ox_flowers_fds.load_split("test") return ox_flowers_fds, centralized_testset diff --git a/examples/xgboost-comprehensive/client.py b/examples/xgboost-comprehensive/client.py index 66daed449fd5..2d54c3fd63c7 100644 --- a/examples/xgboost-comprehensive/client.py +++ b/examples/xgboost-comprehensive/client.py @@ -43,7 +43,7 @@ if args.centralised_eval: # Use centralised test set for evaluation train_data = partition - valid_data = fds.load_full("test") + valid_data = fds.load_split("test") valid_data.set_format("numpy") num_train = train_data.shape[0] num_val = valid_data.shape[0] diff --git a/examples/xgboost-comprehensive/server.py b/examples/xgboost-comprehensive/server.py index 2fecbcc65853..939819641438 100644 --- a/examples/xgboost-comprehensive/server.py +++ b/examples/xgboost-comprehensive/server.py @@ -35,7 +35,7 @@ dataset="jxie/higgs", partitioners={"train": 20}, resplitter=resplit ) log(INFO, "Loading centralised test set...") - test_set = fds.load_full("test") + test_set = fds.load_split("test") test_set.set_format("numpy") test_dmatrix = transform_dataset_to_dmatrix(test_set) diff --git a/examples/xgboost-comprehensive/sim.py b/examples/xgboost-comprehensive/sim.py index b72b23931929..c9481f1cdd5d 100644 --- a/examples/xgboost-comprehensive/sim.py +++ b/examples/xgboost-comprehensive/sim.py @@ -86,7 +86,7 @@ def main(): # Load centralised test set if args.centralised_eval or args.centralised_eval_client: log(INFO, "Loading centralised test set...") - test_data = fds.load_full("test") + test_data = fds.load_split("test") test_data.set_format("numpy") num_test = test_data.shape[0] test_dmatrix = transform_dataset_to_dmatrix(test_data) From 4c94ff97f6081060e4b4905905a95c564cb73a51 Mon Sep 17 00:00:00 2001 From: Charles Beauville Date: Wed, 13 Mar 2024 22:13:33 +0000 Subject: [PATCH 15/57] Store latest commit in separate folder (#3135) --- .github/workflows/e2e.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index db9f65a4f4f3..62f3c0a78ce4 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -45,6 +45,7 @@ jobs: [ -z "${{ github.head_ref }}" ] && dir="${{ github.ref_name }}" || dir="pr/${{ github.head_ref }}" echo "DIR=$dir" >> "$GITHUB_OUTPUT" aws s3 cp --content-disposition "attachment" --cache-control "no-cache" ./ s3://${{ env.ARTIFACT_BUCKET }}/py/$dir/$sha_short --recursive + aws s3 cp --content-disposition "attachment" --cache-control "no-cache" ./ s3://${{ env.ARTIFACT_BUCKET }}/py/$dir/latest --recursive outputs: whl_path: ${{ steps.upload.outputs.WHL_PATH }} short_sha: ${{ steps.upload.outputs.SHORT_SHA }} From f316049c56fafa9af48e75a1d53a5e0c4fa1c85f Mon Sep 17 00:00:00 2001 From: mohammadnaseri Date: Wed, 13 Mar 2024 22:25:57 +0000 Subject: [PATCH 16/57] Add logging for DP (#3132) --- src/py/flwr/client/mod/centraldp_mods.py | 10 ++++++++ src/py/flwr/client/mod/localdp_mod.py | 14 +++++++++++ .../server/strategy/dp_adaptive_clipping.py | 23 ++++++++++++++++++- .../flwr/server/strategy/dp_fixed_clipping.py | 23 ++++++++++++++++++- 4 files changed, 68 insertions(+), 2 deletions(-) diff --git a/src/py/flwr/client/mod/centraldp_mods.py b/src/py/flwr/client/mod/centraldp_mods.py index 0c0134e0f876..4f4a595e8d9c 100644 --- a/src/py/flwr/client/mod/centraldp_mods.py +++ b/src/py/flwr/client/mod/centraldp_mods.py @@ -15,6 +15,8 @@ """Clipping modifiers for central DP with client-side clipping.""" +from logging import INFO + from flwr.client.typing import ClientAppCallable from flwr.common import ndarrays_to_parameters, parameters_to_ndarrays from flwr.common import recordset_compat as compat @@ -25,6 +27,7 @@ compute_clip_model_update, ) from flwr.common.differential_privacy_constants import KEY_CLIPPING_NORM, KEY_NORM_BIT +from flwr.common.logger import log from flwr.common.message import Message @@ -79,6 +82,8 @@ def fixedclipping_mod( clipping_norm, ) + log(INFO, "fixedclipping_mod: parameters are clipped by value: %s.", clipping_norm) + fit_res.parameters = ndarrays_to_parameters(client_to_server_params) out_msg.content = compat.fitres_to_recordset(fit_res, keep_input=True) return out_msg @@ -139,6 +144,11 @@ def adaptiveclipping_mod( server_to_client_params, clipping_norm, ) + log( + INFO, + "adaptiveclipping_mod: parameters are clipped by value: %s.", + clipping_norm, + ) fit_res.parameters = ndarrays_to_parameters(client_to_server_params) diff --git a/src/py/flwr/client/mod/localdp_mod.py b/src/py/flwr/client/mod/localdp_mod.py index 5f62c9e44800..3b0311a612b9 100644 --- a/src/py/flwr/client/mod/localdp_mod.py +++ b/src/py/flwr/client/mod/localdp_mod.py @@ -15,6 +15,10 @@ """Local DP modifier.""" +from logging import INFO + +import numpy as np + from flwr.client.typing import ClientAppCallable from flwr.common import ndarrays_to_parameters, parameters_to_ndarrays from flwr.common import recordset_compat as compat @@ -24,6 +28,7 @@ add_localdp_gaussian_noise_to_params, compute_clip_model_update, ) +from flwr.common.logger import log from flwr.common.message import Message @@ -122,6 +127,9 @@ def __call__( server_to_client_params, self.clipping_norm, ) + log( + INFO, "LocalDpMod: parameters are clipped by value: %s.", self.clipping_norm + ) fit_res.parameters = ndarrays_to_parameters(client_to_server_params) @@ -129,6 +137,12 @@ def __call__( add_localdp_gaussian_noise_to_params( fit_res.parameters, self.sensitivity, self.epsilon, self.delta ) + log( + INFO, + "LocalDpMod: local DP noise with " + "standard deviation: %s added to parameters.", + self.sensitivity * np.sqrt(2 * np.log(1.25 / self.delta)) / self.epsilon, + ) out_msg.content = compat.fitres_to_recordset(fit_res, keep_input=True) return out_msg diff --git a/src/py/flwr/server/strategy/dp_adaptive_clipping.py b/src/py/flwr/server/strategy/dp_adaptive_clipping.py index d9422c791167..c3c3761f9dc7 100644 --- a/src/py/flwr/server/strategy/dp_adaptive_clipping.py +++ b/src/py/flwr/server/strategy/dp_adaptive_clipping.py @@ -19,7 +19,7 @@ import math -from logging import WARNING +from logging import INFO, WARNING from typing import Dict, List, Optional, Tuple, Union import numpy as np @@ -39,6 +39,7 @@ adaptive_clip_inputs_inplace, add_gaussian_noise_to_params, compute_adaptive_noise_params, + compute_stdv, ) from flwr.common.differential_privacy_constants import ( CLIENTS_DISCREPANCY_WARNING, @@ -197,6 +198,12 @@ def aggregate_fit( norm_bit = adaptive_clip_inputs_inplace(model_update, self.clipping_norm) norm_bit_set_count += norm_bit + log( + INFO, + "aggregate_fit: parameters are clipped by value: %s.", + self.clipping_norm, + ) + for i, _ in enumerate(self.current_round_params): param[i] = self.current_round_params[i] + model_update[i] # Convert back to parameters @@ -225,6 +232,13 @@ def aggregate_fit( self.clipping_norm, self.num_sampled_clients, ) + log( + INFO, + "aggregate_fit: central DP noise with standard deviation: %s added to parameters.", + compute_stdv( + self.noise_multiplier, self.clipping_norm, self.num_sampled_clients + ), + ) return aggregated_params, metrics @@ -408,6 +422,13 @@ def aggregate_fit( self.clipping_norm, self.num_sampled_clients, ) + log( + INFO, + "aggregate_fit: central DP noise with standard deviation: %s added to parameters.", + compute_stdv( + self.noise_multiplier, self.clipping_norm, self.num_sampled_clients + ), + ) return aggregated_params, metrics diff --git a/src/py/flwr/server/strategy/dp_fixed_clipping.py b/src/py/flwr/server/strategy/dp_fixed_clipping.py index 69930ce49c0b..c670c26e4977 100644 --- a/src/py/flwr/server/strategy/dp_fixed_clipping.py +++ b/src/py/flwr/server/strategy/dp_fixed_clipping.py @@ -18,7 +18,7 @@ """ -from logging import WARNING +from logging import INFO, WARNING from typing import Dict, List, Optional, Tuple, Union from flwr.common import ( @@ -35,6 +35,7 @@ from flwr.common.differential_privacy import ( add_gaussian_noise_to_params, compute_clip_model_update, + compute_stdv, ) from flwr.common.differential_privacy_constants import ( CLIENTS_DISCREPANCY_WARNING, @@ -155,6 +156,11 @@ def aggregate_fit( compute_clip_model_update( param, self.current_round_params, self.clipping_norm ) + log( + INFO, + "aggregate_fit: parameters are clipped by value: %s.", + self.clipping_norm, + ) # Convert back to parameters res.parameters = ndarrays_to_parameters(param) @@ -172,6 +178,14 @@ def aggregate_fit( self.num_sampled_clients, ) + log( + INFO, + "aggregate_fit: central DP noise with standard deviation: %s added to parameters.", + compute_stdv( + self.noise_multiplier, self.clipping_norm, self.num_sampled_clients + ), + ) + return aggregated_params, metrics def aggregate_evaluate( @@ -321,6 +335,13 @@ def aggregate_fit( self.clipping_norm, self.num_sampled_clients, ) + log( + INFO, + "aggregate_fit: central DP noise with standard deviation: %s added to parameters.", + compute_stdv( + self.noise_multiplier, self.clipping_norm, self.num_sampled_clients + ), + ) return aggregated_params, metrics def aggregate_evaluate( From d510b35e5e2d020ae72ee9b6e1307bf1f3f8a0d0 Mon Sep 17 00:00:00 2001 From: Taner Topal Date: Wed, 13 Mar 2024 22:42:29 +0000 Subject: [PATCH 17/57] Improve the `flower.toml` loading module (#3136) --- src/py/flwr/cli/flower_toml.py | 51 +++++++++++++++---- src/py/flwr/cli/flower_toml_test.py | 24 ++++----- src/py/flwr/cli/run/run.py | 78 ++++++++--------------------- 3 files changed, 74 insertions(+), 79 deletions(-) diff --git a/src/py/flwr/cli/flower_toml.py b/src/py/flwr/cli/flower_toml.py index 75d4b9f7e2cd..103f83532054 100644 --- a/src/py/flwr/cli/flower_toml.py +++ b/src/py/flwr/cli/flower_toml.py @@ -19,10 +19,45 @@ import tomli -from flwr.common.object_ref import validate +from flwr.common import object_ref -def load_flower_toml(path: Optional[str] = None) -> Optional[Dict[str, Any]]: +def load_and_validate_with_defaults( + path: Optional[str] = None, +) -> Tuple[Optional[Dict[str, Any]], List[str], List[str]]: + """Load and validate flower.toml as dict. + + Returns + ------- + Tuple[Optional[config], List[str], List[str]] + A tuple with the optional config in case it exists and is valid + and associated errors and warnings. + """ + config = load(path) + + if config is None: + errors = [ + "Project configuration could not be loaded. flower.toml does not exist." + ] + return (None, errors, []) + + is_valid, errors, warnings = validate(config) + + if not is_valid: + return (None, errors, warnings) + + # Apply defaults + defaults = { + "flower": { + "engine": {"name": "simulation", "simulation": {"supernode": {"num": 2}}} + } + } + config = apply_defaults(config, defaults) + + return (config, errors, warnings) + + +def load(path: Optional[str] = None) -> Optional[Dict[str, Any]]: """Load flower.toml and return as dict.""" if path is None: cur_dir = os.getcwd() @@ -38,9 +73,7 @@ def load_flower_toml(path: Optional[str] = None) -> Optional[Dict[str, Any]]: return data -def validate_flower_toml_fields( - config: Dict[str, Any] -) -> Tuple[bool, List[str], List[str]]: +def validate_fields(config: Dict[str, Any]) -> Tuple[bool, List[str], List[str]]: """Validate flower.toml fields.""" errors = [] warnings = [] @@ -72,20 +105,20 @@ def validate_flower_toml_fields( return len(errors) == 0, errors, warnings -def validate_flower_toml(config: Dict[str, Any]) -> Tuple[bool, List[str], List[str]]: +def validate(config: Dict[str, Any]) -> Tuple[bool, List[str], List[str]]: """Validate flower.toml.""" - is_valid, errors, warnings = validate_flower_toml_fields(config) + is_valid, errors, warnings = validate_fields(config) if not is_valid: return False, errors, warnings # Validate serverapp - is_valid, reason = validate(config["flower"]["components"]["serverapp"]) + is_valid, reason = object_ref.validate(config["flower"]["components"]["serverapp"]) if not is_valid and isinstance(reason, str): return False, [reason], [] # Validate clientapp - is_valid, reason = validate(config["flower"]["components"]["clientapp"]) + is_valid, reason = object_ref.validate(config["flower"]["components"]["clientapp"]) if not is_valid and isinstance(reason, str): return False, [reason], [] diff --git a/src/py/flwr/cli/flower_toml_test.py b/src/py/flwr/cli/flower_toml_test.py index 67ccab97e59d..72a52e4e8b9b 100644 --- a/src/py/flwr/cli/flower_toml_test.py +++ b/src/py/flwr/cli/flower_toml_test.py @@ -18,11 +18,7 @@ import textwrap from typing import Any, Dict -from .flower_toml import ( - load_flower_toml, - validate_flower_toml, - validate_flower_toml_fields, -) +from .flower_toml import load, validate, validate_fields def test_load_flower_toml_load_from_cwd(tmp_path: str) -> None: @@ -68,7 +64,7 @@ def test_load_flower_toml_load_from_cwd(tmp_path: str) -> None: f.write(textwrap.dedent(flower_toml_content)) # Execute - config = load_flower_toml() + config = load() # Assert assert config == expected_config @@ -119,7 +115,7 @@ def test_load_flower_toml_from_path(tmp_path: str) -> None: f.write(textwrap.dedent(flower_toml_content)) # Execute - config = load_flower_toml(path=os.path.join(tmp_path, "flower.toml")) + config = load(path=os.path.join(tmp_path, "flower.toml")) # Assert assert config == expected_config @@ -133,7 +129,7 @@ def test_validate_flower_toml_fields_empty() -> None: config: Dict[str, Any] = {} # Execute - is_valid, errors, warnings = validate_flower_toml_fields(config) + is_valid, errors, warnings = validate_fields(config) # Assert assert not is_valid @@ -155,7 +151,7 @@ def test_validate_flower_toml_fields_no_flower() -> None: } # Execute - is_valid, errors, warnings = validate_flower_toml_fields(config) + is_valid, errors, warnings = validate_fields(config) # Assert assert not is_valid @@ -178,7 +174,7 @@ def test_validate_flower_toml_fields_no_flower_components() -> None: } # Execute - is_valid, errors, warnings = validate_flower_toml_fields(config) + is_valid, errors, warnings = validate_fields(config) # Assert assert not is_valid @@ -201,7 +197,7 @@ def test_validate_flower_toml_fields_no_server_and_client_app() -> None: } # Execute - is_valid, errors, warnings = validate_flower_toml_fields(config) + is_valid, errors, warnings = validate_fields(config) # Assert assert not is_valid @@ -224,7 +220,7 @@ def test_validate_flower_toml_fields() -> None: } # Execute - is_valid, errors, warnings = validate_flower_toml_fields(config) + is_valid, errors, warnings = validate_fields(config) # Assert assert is_valid @@ -252,7 +248,7 @@ def test_validate_flower_toml() -> None: } # Execute - is_valid, errors, warnings = validate_flower_toml(config) + is_valid, errors, warnings = validate(config) # Assert assert is_valid @@ -280,7 +276,7 @@ def test_validate_flower_toml_fail() -> None: } # Execute - is_valid, errors, warnings = validate_flower_toml(config) + is_valid, errors, warnings = validate(config) # Assert assert not is_valid diff --git a/src/py/flwr/cli/run/run.py b/src/py/flwr/cli/run/run.py index d0838d18d7e4..98b5da1843a6 100644 --- a/src/py/flwr/cli/run/run.py +++ b/src/py/flwr/cli/run/run.py @@ -18,64 +18,34 @@ import typer -from flwr.cli.flower_toml import apply_defaults, load_flower_toml, validate_flower_toml +from flwr.cli import flower_toml from flwr.simulation.run_simulation import _run_simulation def run() -> None: """Run Flower project.""" - print( - typer.style("Loading project configuration... ", fg=typer.colors.BLUE), - end="", - ) - config = load_flower_toml() - if not config: - print( - typer.style( - "Project configuration could not be loaded. " - "flower.toml does not exist.", - fg=typer.colors.RED, - bold=True, - ) + typer.secho("Loading project configuration... ", fg=typer.colors.BLUE) + + config, errors, warnings = flower_toml.load_and_validate_with_defaults() + + if config is None: + typer.secho( + "Project configuration could not be loaded.\nflower.toml is invalid:\n" + + "\n".join([f"- {line}" for line in errors]), + fg=typer.colors.RED, + bold=True, ) sys.exit() - print(typer.style("Success", fg=typer.colors.GREEN)) - print( - typer.style("Validating project configuration... ", fg=typer.colors.BLUE), - end="", - ) - is_valid, errors, warnings = validate_flower_toml(config) if warnings: - print( - typer.style( - "Project configuration is missing the following " - "recommended properties:\n" - + "\n".join([f"- {line}" for line in warnings]), - fg=typer.colors.RED, - bold=True, - ) - ) - - if not is_valid: - print( - typer.style( - "Project configuration could not be loaded.\nflower.toml is invalid:\n" - + "\n".join([f"- {line}" for line in errors]), - fg=typer.colors.RED, - bold=True, - ) + typer.secho( + "Project configuration is missing the following " + "recommended properties:\n" + "\n".join([f"- {line}" for line in warnings]), + fg=typer.colors.RED, + bold=True, ) - sys.exit() - print(typer.style("Success", fg=typer.colors.GREEN)) - # Apply defaults - defaults = { - "flower": { - "engine": {"name": "simulation", "simulation": {"supernode": {"num": 2}}} - } - } - config = apply_defaults(config, defaults) + typer.secho("Success", fg=typer.colors.GREEN) server_app_ref = config["flower"]["components"]["serverapp"] client_app_ref = config["flower"]["components"]["clientapp"] @@ -84,19 +54,15 @@ def run() -> None: if engine == "simulation": num_supernodes = config["flower"]["engine"]["simulation"]["supernode"]["num"] - print( - typer.style("Starting run... ", fg=typer.colors.BLUE), - ) + typer.secho("Starting run... ", fg=typer.colors.BLUE) _run_simulation( server_app_attr=server_app_ref, client_app_attr=client_app_ref, num_supernodes=num_supernodes, ) else: - print( - typer.style( - f"Engine '{engine}' is not yet supported in `flwr run`", - fg=typer.colors.RED, - bold=True, - ) + typer.secho( + f"Engine '{engine}' is not yet supported in `flwr run`", + fg=typer.colors.RED, + bold=True, ) From 808ef75affa51536cc352f3a05861a7b1865721a Mon Sep 17 00:00:00 2001 From: Adam Narozniak <51029327+adam-narozniak@users.noreply.github.com> Date: Wed, 13 Mar 2024 22:54:39 +0000 Subject: [PATCH 18/57] Rename node_id to partition_id in Flower Datasets (#3129) Co-authored-by: jafermarq --- datasets/README.md | 2 +- datasets/flwr_datasets/federated_dataset.py | 6 +- .../partitioner/dirichlet_partitioner.py | 78 +++++----- .../partitioner/dirichlet_partitioner_test.py | 12 +- .../partitioner/exponential_partitioner.py | 6 +- .../partitioner/iid_partitioner.py | 6 +- .../inner_dirichlet_partitioner.py | 67 +++++---- .../inner_dirichlet_partitioner_test.py | 4 +- .../partitioner/linear_partitioner.py | 6 +- .../partitioner/natural_id_partitioner.py | 35 ++--- .../natural_id_partitioner_test.py | 18 ++- .../flwr_datasets/partitioner/partitioner.py | 4 +- .../partitioner/shard_partitioner.py | 138 ++++++++++-------- .../partitioner/shard_partitioner_test.py | 72 ++++----- .../partitioner/size_partitioner.py | 78 +++++----- .../partitioner/size_partitioner_test.py | 6 +- .../partitioner/square_partitioner.py | 6 +- 17 files changed, 289 insertions(+), 255 deletions(-) diff --git a/datasets/README.md b/datasets/README.md index fb274a9f4bc1..cf5caac3e1cd 100644 --- a/datasets/README.md +++ b/datasets/README.md @@ -59,7 +59,7 @@ If you plan to change the type of the dataset to run the code with your ML frame # Usage -Flower Datasets exposes the `FederatedDataset` abstraction to represent the dataset needed for federated learning/evaluation/analytics. It has two powerful methods that let you handle the dataset preprocessing: `load_partition(node_id, split)` and `load_split(split)`. +Flower Datasets exposes the `FederatedDataset` abstraction to represent the dataset needed for federated learning/evaluation/analytics. It has two powerful methods that let you handle the dataset preprocessing: `load_partition(partition_id, split)` and `load_split(split)`. Here's a basic quickstart example of how to partition the MNIST dataset: diff --git a/datasets/flwr_datasets/federated_dataset.py b/datasets/flwr_datasets/federated_dataset.py index fc94be3c39a1..37f1e084d4c6 100644 --- a/datasets/flwr_datasets/federated_dataset.py +++ b/datasets/flwr_datasets/federated_dataset.py @@ -138,7 +138,7 @@ def __init__( def load_partition( self, - node_id: int, + partition_id: int, split: Optional[str] = None, ) -> Union[Dataset, List[Dataset], DatasetDict]: """Load the partition specified by the idx in the selected split. @@ -148,7 +148,7 @@ def load_partition( Parameters ---------- - node_id : int + partition_id : int Partition index for the selected split, idx in {0, ..., num_partitions - 1}. split : Optional[str] Name of the (partitioned) split (e.g. "train", "test"). You can skip this @@ -179,7 +179,7 @@ def load_partition( self._check_if_split_possible_to_federate(split) partitioner: Partitioner = self._partitioners[split] self._assign_dataset_to_partitioner(split) - partition = partitioner.load_partition(node_id) + partition = partitioner.load_partition(partition_id) if self._partition_division is None: return partition partition_division = self._partition_division.get(split) diff --git a/datasets/flwr_datasets/partitioner/dirichlet_partitioner.py b/datasets/flwr_datasets/partitioner/dirichlet_partitioner.py index cb23acea01b6..f3feb2174bde 100644 --- a/datasets/flwr_datasets/partitioner/dirichlet_partitioner.py +++ b/datasets/flwr_datasets/partitioner/dirichlet_partitioner.py @@ -25,7 +25,7 @@ from flwr_datasets.partitioner.partitioner import Partitioner -# pylint: disable=R0902, R0912 +# pylint: disable=R0902, R0912, R0914 class DirichletPartitioner(Partitioner): """Partitioner based on Dirichlet distribution. @@ -39,10 +39,10 @@ class DirichletPartitioner(Partitioner): even though the alpha stays the same). The notion of balancing is explicitly introduced here (not mentioned in paper but - implemented in the code). It is a mechanism that excludes the node from - assigning new samples to it if the current number of samples on that node exceeds - the average number that the node would get in case of even data distribution. - It is controlled by`self_balancing` parameter. + implemented in the code). It is a mechanism that excludes the partition from + assigning new samples to it if the current number of samples on that partition + exceeds the average number that the partition would get in case of even data + distribution. It is controlled by`self_balancing` parameter. Parameters ---------- @@ -61,7 +61,7 @@ class DirichletPartitioner(Partitioner): paper's code although not mentioned in paper itself). shuffle: bool Whether to randomize the order of samples. Shuffling applied after the - samples assignment to nodes. + samples assignment to partitions. seed: int Seed used for dataset shuffling. It has no effect if `shuffle` is False. @@ -78,7 +78,9 @@ class DirichletPartitioner(Partitioner): >>> print(partition[0]) # Print the first example {'image': , 'label': 4} - >>> partition_sizes = [len(fds.load_partition(node_id)) for node_id in range(10)] + >>> partition_sizes = partition_sizes = [ + >>> len(fds.load_partition(partition_id)) for partition_id in range(10) + >>> ] >>> print(sorted(partition_sizes)) [2134, 2615, 3646, 6011, 6170, 6386, 6715, 7653, 8435, 10235] """ @@ -107,17 +109,17 @@ def __init__( # pylint: disable=R0913 # Utility attributes # The attributes below are determined during the first call to load_partition - self._avg_num_of_samples_per_node: Optional[float] = None + self._avg_num_of_samples_per_partition: Optional[float] = None self._unique_classes: Optional[Union[List[int], List[str]]] = None - self._node_id_to_indices: Dict[int, List[int]] = {} - self._node_id_to_indices_determined = False + self._partition_id_to_indices: Dict[int, List[int]] = {} + self._partition_id_to_indices_determined = False - def load_partition(self, node_id: int) -> datasets.Dataset: + def load_partition(self, partition_id: int) -> datasets.Dataset: """Load a partition based on the partition index. Parameters ---------- - node_id : int + partition_id : int the index that corresponds to the requested partition Returns @@ -129,14 +131,14 @@ def load_partition(self, node_id: int) -> datasets.Dataset: # requested. Only the first call creates the indices assignments for all the # partition indices. self._check_num_partitions_correctness_if_needed() - self._determine_node_id_to_indices_if_needed() - return self.dataset.select(self._node_id_to_indices[node_id]) + self._determine_partition_id_to_indices_if_needed() + return self.dataset.select(self._partition_id_to_indices[partition_id]) @property def num_partitions(self) -> int: """Total number of partitions.""" self._check_num_partitions_correctness_if_needed() - self._determine_node_id_to_indices_if_needed() + self._determine_partition_id_to_indices_if_needed() return self._num_partitions def _initialize_alpha( @@ -192,16 +194,20 @@ def _initialize_alpha( ) return alpha - def _determine_node_id_to_indices_if_needed(self) -> None: # pylint: disable=R0914 + def _determine_partition_id_to_indices_if_needed( + self, + ) -> None: """Create an assignment of indices to the partition indices.""" - if self._node_id_to_indices_determined: + if self._partition_id_to_indices_determined: return # Generate information needed for Dirichlet partitioning self._unique_classes = self.dataset.unique(self._partition_by) assert self._unique_classes is not None # This is needed only if self._self_balancing is True (the default option) - self._avg_num_of_samples_per_node = self.dataset.num_rows / self._num_partitions + self._avg_num_of_samples_per_partition = ( + self.dataset.num_rows / self._num_partitions + ) # Change targets list data type to numpy targets = np.array(self.dataset[self._partition_by]) @@ -210,10 +216,10 @@ def _determine_node_id_to_indices_if_needed(self) -> None: # pylint: disable=R0 # min_partition_size is reached. sampling_try = 0 while True: - # Prepare data structure to store indices assigned to node ids - node_id_to_indices: Dict[int, List[int]] = {} + # Prepare data structure to store indices assigned to partition ids + partition_id_to_indices: Dict[int, List[int]] = {} for nid in range(self._num_partitions): - node_id_to_indices[nid] = [] + partition_id_to_indices[nid] = [] # Iterated over all unique labels (they are not necessarily of type int) for k in self._unique_classes: @@ -228,16 +234,16 @@ def _determine_node_id_to_indices_if_needed(self) -> None: # pylint: disable=R0 nid ] # Balancing (not mentioned in the paper but implemented) - # Do not assign additional samples to the node if it already has more - # than the average numbers of samples per partition. Note that it might - # especially affect classes that are later in the order. This is the - # reason for more sparse division that the alpha might suggest. + # Do not assign additional samples to the partition if it already has + # more than the average numbers of samples per partition. Note that it + # might especially affect classes that are later in the order. This is + # the reason for more sparse division that the alpha might suggest. if self._self_balancing: - assert self._avg_num_of_samples_per_node is not None + assert self._avg_num_of_samples_per_partition is not None for nid in nid_to_proportion_of_k_samples.copy(): if ( - len(node_id_to_indices[nid]) - > self._avg_num_of_samples_per_node + len(partition_id_to_indices[nid]) + > self._avg_num_of_samples_per_partition ): nid_to_proportion_of_k_samples[nid] = 0 @@ -262,18 +268,20 @@ def _determine_node_id_to_indices_if_needed(self) -> None: # pylint: disable=R0 ) # Append new indices (coming from class k) to the existing indices - for nid, indices in node_id_to_indices.items(): + for nid, indices in partition_id_to_indices.items(): indices.extend(split_indices[nid].tolist()) # Determine if the indices assignment meets the min_partition_size # If it does not mean the requirement repeat the Dirichlet sampling process # Otherwise break the while loop min_sample_size_on_client = min( - len(indices) for indices in node_id_to_indices.values() + len(indices) for indices in partition_id_to_indices.values() ) if min_sample_size_on_client >= self._min_partition_size: break - sample_sizes = [len(indices) for indices in node_id_to_indices.values()] + sample_sizes = [ + len(indices) for indices in partition_id_to_indices.values() + ] alpha_not_met = [ self._alpha[i] for i, ss in enumerate(sample_sizes) @@ -309,15 +317,15 @@ def _determine_node_id_to_indices_if_needed(self) -> None: # pylint: disable=R0 # Shuffle the indices not to have the datasets with targets in sequences like # [00000, 11111, ...]) if the shuffle is True if self._shuffle: - for indices in node_id_to_indices.values(): + for indices in partition_id_to_indices.values(): # In place shuffling self._rng.shuffle(indices) - self._node_id_to_indices = node_id_to_indices - self._node_id_to_indices_determined = True + self._partition_id_to_indices = partition_id_to_indices + self._partition_id_to_indices_determined = True def _check_num_partitions_correctness_if_needed(self) -> None: """Test num_partitions when the dataset is given (in load_partition).""" - if not self._node_id_to_indices_determined: + if not self._partition_id_to_indices_determined: if self._num_partitions > self.dataset.num_rows: raise ValueError( "The number of partitions needs to be smaller than the number of " diff --git a/datasets/flwr_datasets/partitioner/dirichlet_partitioner_test.py b/datasets/flwr_datasets/partitioner/dirichlet_partitioner_test.py index c123f84effb7..b2407b5d5822 100644 --- a/datasets/flwr_datasets/partitioner/dirichlet_partitioner_test.py +++ b/datasets/flwr_datasets/partitioner/dirichlet_partitioner_test.py @@ -77,7 +77,9 @@ def test_valid_initialization( def test_min_partition_size_requirement(self) -> None: """Test if partitions are created with min partition size required.""" _, partitioner = _dummy_setup(3, 0.5, 100, "labels") - partition_list = [partitioner.load_partition(node_id) for node_id in [0, 1, 2]] + partition_list = [ + partitioner.load_partition(partition_id) for partition_id in [0, 1, 2] + ] self.assertTrue( all(len(p) > partitioner._min_partition_size for p in partition_list) ) @@ -87,14 +89,14 @@ def test_alpha_in_ndarray_initialization(self) -> None: _, partitioner = _dummy_setup(3, np.array([1.0, 1.0, 1.0]), 100, "labels") self.assertTrue(np.all(partitioner._alpha == np.array([1.0, 1.0, 1.0]))) - def test__determine_node_id_to_indices(self) -> None: + def test__determine_partition_id_to_indices(self) -> None: """Test the determine_nod_id_to_indices matches the flag after the call.""" num_partitions, alpha, num_rows, partition_by = 3, 0.5, 100, "labels" _, partitioner = _dummy_setup(num_partitions, alpha, num_rows, partition_by) - partitioner._determine_node_id_to_indices_if_needed() + partitioner._determine_partition_id_to_indices_if_needed() self.assertTrue( - partitioner._node_id_to_indices_determined - and len(partitioner._node_id_to_indices) == num_partitions + partitioner._partition_id_to_indices_determined + and len(partitioner._partition_id_to_indices) == num_partitions ) diff --git a/datasets/flwr_datasets/partitioner/exponential_partitioner.py b/datasets/flwr_datasets/partitioner/exponential_partitioner.py index 10b11eb3e126..d35944f29f6f 100644 --- a/datasets/flwr_datasets/partitioner/exponential_partitioner.py +++ b/datasets/flwr_datasets/partitioner/exponential_partitioner.py @@ -21,7 +21,7 @@ class ExponentialPartitioner(SizePartitioner): - """Partitioner creates partitions of size that are correlated with exp(node_id). + """Partitioner creates partitions of size that are correlated with exp(id). The amount of data each client gets is correlated with the exponent of partition ID. For instance, if the IDs range from 1 to M, client with ID 1 gets e units of @@ -29,7 +29,7 @@ class ExponentialPartitioner(SizePartitioner): The floor operation is applied on each of these numbers, it means floor(2.71...) = 2; e^2 ~ 7.39 floor(7.39) = 7. The number is rounded down = the fraction is always cut. The remainders of theses unassigned (fraction) samples is added to the - biggest partition (the one with the biggest node_id). + biggest partition (the one with the biggest partition_id). Parameters ---------- @@ -38,6 +38,6 @@ class ExponentialPartitioner(SizePartitioner): """ def __init__(self, num_partitions: int) -> None: - super().__init__(num_partitions=num_partitions, node_id_to_size_fn=np.exp) + super().__init__(num_partitions=num_partitions, partition_id_to_size_fn=np.exp) if num_partitions <= 0: raise ValueError("The number of partitions must be greater than zero.") diff --git a/datasets/flwr_datasets/partitioner/iid_partitioner.py b/datasets/flwr_datasets/partitioner/iid_partitioner.py index faa1dfa10615..ceddd386c7d3 100644 --- a/datasets/flwr_datasets/partitioner/iid_partitioner.py +++ b/datasets/flwr_datasets/partitioner/iid_partitioner.py @@ -34,12 +34,12 @@ def __init__(self, num_partitions: int) -> None: raise ValueError("The number of partitions must be greater than zero.") self._num_partitions = num_partitions - def load_partition(self, node_id: int) -> datasets.Dataset: + def load_partition(self, partition_id: int) -> datasets.Dataset: """Load a single IID partition based on the partition index. Parameters ---------- - node_id : int + partition_id : int the index that corresponds to the requested partition Returns @@ -48,7 +48,7 @@ def load_partition(self, node_id: int) -> datasets.Dataset: single dataset partition """ return self.dataset.shard( - num_shards=self._num_partitions, index=node_id, contiguous=True + num_shards=self._num_partitions, index=partition_id, contiguous=True ) @property diff --git a/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner.py b/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner.py index bf07ab3591f5..e3e46813dfc8 100644 --- a/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner.py +++ b/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner.py @@ -49,7 +49,7 @@ class InnerDirichletPartitioner(Partitioner): # pylint: disable=R0902 number of unique classes) shuffle: bool Whether to randomize the order of samples. Shuffling applied after the - samples assignment to nodes. + samples assignment to partitions. seed: int Seed used for dataset shuffling. It has no effect if `shuffle` is False. @@ -91,16 +91,15 @@ def __init__( # pylint: disable=R0913 self._num_unique_classes: Optional[int] = None self._num_partitions = len(self._partition_sizes) - # self._avg_num_of_samples_per_node: Optional[float] = None - self._node_id_to_indices: Dict[int, List[int]] = {} - self._node_id_to_indices_determined = False + self._partition_id_to_indices: Dict[int, List[int]] = {} + self._partition_id_to_indices_determined = False - def load_partition(self, node_id: int) -> datasets.Dataset: + def load_partition(self, partition_id: int) -> datasets.Dataset: """Load a partition based on the partition index. Parameters ---------- - node_id : int + partition_id : int the index that corresponds to the requested partition Returns @@ -116,8 +115,8 @@ def load_partition(self, node_id: int) -> datasets.Dataset: self._check_the_sum_of_partition_sizes() self._determine_num_unique_classes_if_needed() self._alpha = self._initialize_alpha_if_needed(self._initial_alpha) - self._determine_node_id_to_indices_if_needed() - return self.dataset.select(self._node_id_to_indices[node_id]) + self._determine_partition_id_to_indices_if_needed() + return self.dataset.select(self._partition_id_to_indices[partition_id]) @property def num_partitions(self) -> int: @@ -127,7 +126,7 @@ def num_partitions(self) -> int: self._check_the_sum_of_partition_sizes() self._determine_num_unique_classes_if_needed() self._alpha = self._initialize_alpha_if_needed(self._initial_alpha) - self._determine_node_id_to_indices_if_needed() + self._determine_partition_id_to_indices_if_needed() return self._num_partitions def _initialize_alpha_if_needed( @@ -190,9 +189,11 @@ def _initialize_alpha_if_needed( ) return alpha - def _determine_node_id_to_indices_if_needed(self) -> None: # pylint: disable=R0914 + def _determine_partition_id_to_indices_if_needed( + self, + ) -> None: # pylint: disable=R0914 """Create an assignment of indices to the partition indices.""" - if self._node_id_to_indices_determined: + if self._partition_id_to_indices_determined: return # Create class priors for the whole partitioning process @@ -209,23 +210,25 @@ def _determine_node_id_to_indices_if_needed(self) -> None: # pylint: disable=R0 for cid in range(self._num_partitions) ] - # Node id to number of sample left for allocation for that node id - node_id_to_left_to_allocate = dict( + # Node id to number of sample left for allocation for that partition id + partition_id_to_left_to_allocate = dict( zip(range(self._num_partitions), self._partition_sizes) ) - not_full_node_ids = list(range(self._num_partitions)) - while np.sum(list(node_id_to_left_to_allocate.values())) != 0: - # Choose a node - current_node_id = self._rng.choice(not_full_node_ids) - # If current node is full resample a client - if node_id_to_left_to_allocate[current_node_id] == 0: - # When the node is full, exclude it from the sampling nodes list - not_full_node_ids.pop(not_full_node_ids.index(current_node_id)) + not_full_partition_ids = list(range(self._num_partitions)) + while np.sum(list(partition_id_to_left_to_allocate.values())) != 0: + # Choose a partition + current_partition_id = self._rng.choice(not_full_partition_ids) + # If current partition is full resample a client + if partition_id_to_left_to_allocate[current_partition_id] == 0: + # When the partition is full, exclude it from the sampling list + not_full_partition_ids.pop( + not_full_partition_ids.index(current_partition_id) + ) continue - node_id_to_left_to_allocate[current_node_id] -= 1 + partition_id_to_left_to_allocate[current_partition_id] -= 1 # Access the label distribution of the chosen client - current_probabilities = class_priors[current_node_id] + current_probabilities = class_priors[current_partition_id] while True: # curr_class = np.argmax(np.random.uniform() <= curr_prior) curr_class = self._rng.choice( @@ -240,32 +243,32 @@ def _determine_node_id_to_indices_if_needed(self) -> None: # pylint: disable=R0 row_sums = class_priors.sum(axis=1, keepdims=True) class_priors = class_priors / row_sums # Adjust the current_probabilities (it won't sum up to 1 otherwise) - current_probabilities = class_priors[current_node_id] + current_probabilities = class_priors[current_partition_id] continue class_sizes[curr_class] -= 1 # Store sample index at the empty array cell - index = node_id_to_left_to_allocate[current_node_id] - client_indices[current_node_id][index] = idx_list[curr_class][ + index = partition_id_to_left_to_allocate[current_partition_id] + client_indices[current_partition_id][index] = idx_list[curr_class][ class_sizes[curr_class] ] break - node_id_to_indices = { + partition_id_to_indices = { cid: client_indices[cid].tolist() for cid in range(self._num_partitions) } # Shuffle the indices if the shuffle is True. # Note that the samples from this partitioning do not necessarily require # shuffling, the order should exhibit consecutive samples. if self._shuffle: - for indices in node_id_to_indices.values(): + for indices in partition_id_to_indices.values(): # In place shuffling self._rng.shuffle(indices) - self._node_id_to_indices = node_id_to_indices - self._node_id_to_indices_determined = True + self._partition_id_to_indices = partition_id_to_indices + self._partition_id_to_indices_determined = True def _check_num_partitions_correctness_if_needed(self) -> None: """Test num_partitions when the dataset is given (in load_partition).""" - if not self._node_id_to_indices_determined: + if not self._partition_id_to_indices_determined: if self._num_partitions > self.dataset.num_rows: raise ValueError( "The number of partitions needs to be smaller or equal to " @@ -274,7 +277,7 @@ def _check_num_partitions_correctness_if_needed(self) -> None: def _check_partition_sizes_correctness_if_needed(self) -> None: """Test partition_sizes when the dataset is given (in load_partition).""" - if not self._node_id_to_indices_determined: + if not self._partition_id_to_indices_determined: if sum(self._partition_sizes) > self.dataset.num_rows: raise ValueError( "The sum of the `partition_sizes` needs to be smaller or equal to " diff --git a/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner_test.py b/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner_test.py index 0c5fb502870e..86dc8a5df532 100644 --- a/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner_test.py +++ b/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner_test.py @@ -58,7 +58,7 @@ def test_correct_num_of_partitions(self) -> None: _, partitioner = _dummy_setup(num_rows, partition_by, partition_sizes, alpha) _ = partitioner.load_partition(0) self.assertEqual( - len(partitioner._node_id_to_indices.keys()), len(partition_sizes) + len(partitioner._partition_id_to_indices.keys()), len(partition_sizes) ) def test_correct_partition_sizes(self) -> None: @@ -71,7 +71,7 @@ def test_correct_partition_sizes(self) -> None: _, partitioner = _dummy_setup(num_rows, partition_by, partition_sizes, alpha) _ = partitioner.load_partition(0) sizes_created = [ - len(indices) for indices in partitioner._node_id_to_indices.values() + len(indices) for indices in partitioner._partition_id_to_indices.values() ] self.assertEqual(sorted(sizes_created), partition_sizes) diff --git a/datasets/flwr_datasets/partitioner/linear_partitioner.py b/datasets/flwr_datasets/partitioner/linear_partitioner.py index f77b0b87146d..84d419ab5592 100644 --- a/datasets/flwr_datasets/partitioner/linear_partitioner.py +++ b/datasets/flwr_datasets/partitioner/linear_partitioner.py @@ -19,7 +19,7 @@ class LinearPartitioner(SizePartitioner): - """Partitioner creates partitions of size that are linearly correlated with node_id. + """Partitioner creates partitions of size that are linearly correlated with id. The amount of data each client gets is linearly correlated with the partition ID. For instance, if the IDs range from 1 to M, client with ID 1 gets 1 unit of data, @@ -32,6 +32,8 @@ class LinearPartitioner(SizePartitioner): """ def __init__(self, num_partitions: int) -> None: - super().__init__(num_partitions=num_partitions, node_id_to_size_fn=lambda x: x) + super().__init__( + num_partitions=num_partitions, partition_id_to_size_fn=lambda x: x + ) if num_partitions <= 0: raise ValueError("The number of partitions must be greater than zero.") diff --git a/datasets/flwr_datasets/partitioner/natural_id_partitioner.py b/datasets/flwr_datasets/partitioner/natural_id_partitioner.py index 947501965cc6..8bad0668595b 100644 --- a/datasets/flwr_datasets/partitioner/natural_id_partitioner.py +++ b/datasets/flwr_datasets/partitioner/natural_id_partitioner.py @@ -29,28 +29,28 @@ def __init__( partition_by: str, ): super().__init__() - self._node_id_to_natural_id: Dict[int, str] = {} + self._partition_id_to_natural_id: Dict[int, str] = {} self._partition_by = partition_by - def _create_int_node_id_to_natural_id(self) -> None: + def _create_int_partition_id_to_natural_id(self) -> None: """Create a mapping from int indices to unique client ids from dataset. Natural ids come from the column specified in `partition_by`. """ unique_natural_ids = self.dataset.unique(self._partition_by) - self._node_id_to_natural_id = dict( + self._partition_id_to_natural_id = dict( zip(range(len(unique_natural_ids)), unique_natural_ids) ) - def load_partition(self, node_id: int) -> datasets.Dataset: - """Load a single partition corresponding to a single `node_id`. + def load_partition(self, partition_id: int) -> datasets.Dataset: + """Load a single partition corresponding to a single `partition_id`. The choice of the partition is based on unique integers assigned to each natural id present in the dataset in the `partition_by` column. Parameters ---------- - node_id : int + partition_id : int the index that corresponds to the requested partition Returns @@ -58,31 +58,32 @@ def load_partition(self, node_id: int) -> datasets.Dataset: dataset_partition : Dataset single dataset partition """ - if len(self._node_id_to_natural_id) == 0: - self._create_int_node_id_to_natural_id() + if len(self._partition_id_to_natural_id) == 0: + self._create_int_partition_id_to_natural_id() return self.dataset.filter( - lambda row: row[self._partition_by] == self._node_id_to_natural_id[node_id] + lambda row: row[self._partition_by] + == self._partition_id_to_natural_id[partition_id] ) @property def num_partitions(self) -> int: """Total number of partitions.""" - if len(self._node_id_to_natural_id) == 0: - self._create_int_node_id_to_natural_id() - return len(self._node_id_to_natural_id) + if len(self._partition_id_to_natural_id) == 0: + self._create_int_partition_id_to_natural_id() + return len(self._partition_id_to_natural_id) @property - def node_id_to_natural_id(self) -> Dict[int, str]: + def partition_id_to_natural_id(self) -> Dict[int, str]: """Node id to corresponding natural id present. Natural ids are the unique values in `partition_by` column in dataset. """ - return self._node_id_to_natural_id + return self._partition_id_to_natural_id # pylint: disable=R0201 - @node_id_to_natural_id.setter - def node_id_to_natural_id(self, value: Dict[int, str]) -> None: + @partition_id_to_natural_id.setter + def partition_id_to_natural_id(self, value: Dict[int, str]) -> None: raise AttributeError( - "Setting the node_id_to_natural_id dictionary is not allowed." + "Setting the partition_id_to_natural_id dictionary is not allowed." ) diff --git a/datasets/flwr_datasets/partitioner/natural_id_partitioner_test.py b/datasets/flwr_datasets/partitioner/natural_id_partitioner_test.py index fb296294aec3..f447634ad9ed 100644 --- a/datasets/flwr_datasets/partitioner/natural_id_partitioner_test.py +++ b/datasets/flwr_datasets/partitioner/natural_id_partitioner_test.py @@ -65,9 +65,11 @@ def test_load_partition_num_partitions( Only the correct data is tested in this method. """ _, partitioner = _dummy_setup(num_rows, num_unique_natural_id) - # Simulate usage to start lazy node_id_to_natural_id creation + # Simulate usage to start lazy partition_id_to_natural_id creation _ = partitioner.load_partition(0) - self.assertEqual(len(partitioner.node_id_to_natural_id), num_unique_natural_id) + self.assertEqual( + len(partitioner.partition_id_to_natural_id), num_unique_natural_id + ) @parameterized.expand( # type: ignore # num_rows, num_unique_natural_ids @@ -105,14 +107,16 @@ def test_correct_number_of_partitions( ) -> None: """Test if the # of available partitions is equal to # of unique clients.""" _, partitioner = _dummy_setup(num_rows, num_unique_natural_ids) - _ = partitioner.load_partition(node_id=0) - self.assertEqual(len(partitioner.node_id_to_natural_id), num_unique_natural_ids) + _ = partitioner.load_partition(partition_id=0) + self.assertEqual( + len(partitioner.partition_id_to_natural_id), num_unique_natural_ids + ) - def test_cannot_set_node_id_to_natural_id(self) -> None: - """Test the lack of ability to set node_id_to_natural_id.""" + def test_cannot_set_partition_id_to_natural_id(self) -> None: + """Test the lack of ability to set partition_id_to_natural_id.""" _, partitioner = _dummy_setup(num_rows=10, n_unique_natural_ids=2) with self.assertRaises(AttributeError): - partitioner.node_id_to_natural_id = {0: "0"} + partitioner.partition_id_to_natural_id = {0: "0"} if __name__ == "__main__": diff --git a/datasets/flwr_datasets/partitioner/partitioner.py b/datasets/flwr_datasets/partitioner/partitioner.py index 73eb6f4a17b3..10ade52640e8 100644 --- a/datasets/flwr_datasets/partitioner/partitioner.py +++ b/datasets/flwr_datasets/partitioner/partitioner.py @@ -53,12 +53,12 @@ def dataset(self, value: Dataset) -> None: self._dataset = value @abstractmethod - def load_partition(self, node_id: int) -> Dataset: + def load_partition(self, partition_id: int) -> Dataset: """Load a single partition based on the partition index. Parameters ---------- - node_id : int + partition_id : int the index that corresponds to the requested partition Returns diff --git a/datasets/flwr_datasets/partitioner/shard_partitioner.py b/datasets/flwr_datasets/partitioner/shard_partitioner.py index 05444f537c8c..a973f7e5bcb9 100644 --- a/datasets/flwr_datasets/partitioner/shard_partitioner.py +++ b/datasets/flwr_datasets/partitioner/shard_partitioner.py @@ -15,7 +15,7 @@ """Shard partitioner class.""" -# pylint: disable=R0912 +# pylint: disable=R0912, R0914 import math from typing import Dict, List, Optional @@ -31,7 +31,7 @@ class ShardPartitioner(Partitioner): # pylint: disable=R0902 The algorithm works as follows: the dataset is sorted by label e.g. [samples with label 1, samples with labels 2 ...], then the shards are created, with each shard of size = `shard_size` if provided or automatically calculated: - shards_size = len(dataset) / `num_partitions` * `num_shards_per_node`. + shards_size = len(dataset) / `num_partitions` * `num_shards_per_partition`. A shard is just a block (chunk) of a `dataset` that contains `shard_size` consecutive samples. There might be shards that contain samples associated with more @@ -42,18 +42,18 @@ class ShardPartitioner(Partitioner): # pylint: disable=R0902 has samples with more than one unique label is when the shard size is bigger than the number of samples of a certain class. - Each partition is created from `num_shards_per_node` that are chosen randomly. + Each partition is created from `num_shards_per_partition` that are chosen randomly. There are a few ways of partitioning data that result in certain properties (depending on the parameters specification): - 1) same number of shards per nodes + the same shard size (specify: - a) `num_shards_per_nodes`, `shard_size`; or b) `num_shards_per_node`) + 1) same number of shards per partitions + the same shard size (specify: + a) `num_shards_per_partitions`, `shard_size`; or b) `num_shards_per_partition`) In case of b the `shard_size` is calculated as floor(len(dataset) / - (`num_shards_per_nodes` * `num_partitions`)) - 2) possibly different number of shards per node (use nearly all data) + the same - shard size (specify: `shard_size` + `keep_incomplete_shard=False`) - 3) possibly different number of shards per node (use all data) + possibly different - shard size (specify: `shard_size` + `keep_incomplete_shard=True`) + (`num_shards_per_partitions` * `num_partitions`)) + 2) possibly different number of shards per partition (use nearly all data) + the + same shard size (specify: `shard_size` + `keep_incomplete_shard=False`) + 3) possibly different number of shards per partition (use all data) + possibly + different shard size (specify: `shard_size` + `keep_incomplete_shard=True`) Algorithm based on the description in Communication-Efficient Learning of Deep @@ -68,7 +68,7 @@ class ShardPartitioner(Partitioner): # pylint: disable=R0902 The total number of partitions that the data will be divided into. partition_by : str Column name of the labels (targets) based on which Dirichlet sampling works. - num_shards_per_node : Optional[int] + num_shards_per_partition : Optional[int] Number of shards to assign to a single partitioner. It's an alternative to `num_partitions`. shard_size : Optional[int] @@ -79,40 +79,44 @@ class ShardPartitioner(Partitioner): # pylint: disable=R0902 others). If it is dropped each shard is equal size. (It does not mean that each client gets equal number of shards, which only happens if `num_partitions` % `num_shards` = 0). This parameter has no effect if - `num_shards_per_nodes` and `shard_size` are specified. + `num_shards_per_partitions` and `shard_size` are specified. shuffle: bool Whether to randomize the order of samples. Shuffling applied after the - samples assignment to nodes. + samples assignment to partitions. seed: int Seed used for dataset shuffling. It has no effect if `shuffle` is False. Examples -------- - 1) If you need same number of shards per nodes + the same shard size (and you know - both of these values) + 1) If you need same number of shards per partitions + the same shard size (and you + know both of these values) >>> from flwr_datasets import FederatedDataset >>> from flwr_datasets.partitioner import ShardPartitioner >>> >>> partitioner = ShardPartitioner(num_partitions=10, partition_by="label", - >>> num_shards_per_node=2, shard_size=1_000) + >>> num_shards_per_partition=2, shard_size=1_000) >>> fds = FederatedDataset(dataset="mnist", partitioners={"train": partitioner}) >>> partition = fds.load_partition(0) >>> print(partition[0]) # Print the first example {'image': , 'label': 3} - >>> partition_sizes = [len(fds.load_partition(node_id)) for node_id in range(10)] + >>> partition_sizes = [ + >>> len(fds.load_partition(partition_id)) for partition_id in range(10) + >>> ] >>> print(partition_sizes) [2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000] 2) If you want to use nearly all the data and do not need to have the number of - shard per each node to be the same + shard per each partition to be the same >>> from flwr_datasets import FederatedDataset >>> from flwr_datasets.partitioner import ShardPartitioner >>> >>> partitioner = ShardPartitioner(num_partitions=9, partition_by="label", >>> shard_size=1_000) >>> fds = FederatedDataset(dataset="mnist", partitioners={"train": partitioner}) - >>> partition_sizes = [len(fds.load_partition(node_id)) for node_id in range(9)] + >>> partition_sizes = [ + >>> len(fds.load_partition(partition_id)) for partition_id in range(9) + >>> ] >>> print(partition_sizes) [7000, 7000, 7000, 7000, 7000, 7000, 6000, 6000, 6000] @@ -123,7 +127,9 @@ class ShardPartitioner(Partitioner): # pylint: disable=R0902 >>> partitioner = ShardPartitioner(num_partitions=10, partition_by="label", >>> shard_size=990, keep_incomplete_shard=True) >>> fds = FederatedDataset(dataset="mnist", partitioners={"train": partitioner}) - >>> partition_sizes = [len(fds.load_partition(node_id)) for node_id in range(10)] + >>> partition_sizes = [ + >>> len(fds.load_partition(partition_id)) for partition_id in range(10) + >>> ] >>> print(sorted(partition_sizes)) [5550, 5940, 5940, 5940, 5940, 5940, 5940, 5940, 5940, 6930] """ @@ -132,7 +138,7 @@ def __init__( # pylint: disable=R0913 self, num_partitions: int, partition_by: str, - num_shards_per_node: Optional[int] = None, + num_shards_per_partition: Optional[int] = None, shard_size: Optional[int] = None, keep_incomplete_shard: bool = False, shuffle: bool = True, @@ -143,8 +149,10 @@ def __init__( # pylint: disable=R0913 _check_if_natual_number(num_partitions, "num_partitions") self._num_partitions = num_partitions self._partition_by = partition_by - _check_if_natual_number(num_shards_per_node, "num_shards_per_node", True) - self._num_shards_per_node = num_shards_per_node + _check_if_natual_number( + num_shards_per_partition, "num_shards_per_partition", True + ) + self._num_shards_per_partition = num_shards_per_partition self._num_shards_used: Optional[int] = None _check_if_natual_number(shard_size, "shard_size", True) self._shard_size = shard_size @@ -154,15 +162,15 @@ def __init__( # pylint: disable=R0913 # Utility attributes self._rng = np.random.default_rng(seed=self._seed) # NumPy random generator - self._node_id_to_indices: Dict[int, List[int]] = {} - self._node_id_to_indices_determined = False + self._partition_id_to_indices: Dict[int, List[int]] = {} + self._partition_id_to_indices_determined = False - def load_partition(self, node_id: int) -> datasets.Dataset: + def load_partition(self, partition_id: int) -> datasets.Dataset: """Load a partition based on the partition index. Parameters ---------- - node_id : int + partition_id : int the index that corresponds to the requested partition Returns @@ -176,8 +184,8 @@ def load_partition(self, node_id: int) -> datasets.Dataset: self._check_num_partitions_correctness_if_needed() self._check_possibility_of_partitions_creation() self._sort_dataset_if_needed() - self._determine_node_id_to_indices_if_needed() - return self.dataset.select(self._node_id_to_indices[node_id]) + self._determine_partition_id_to_indices_if_needed() + return self.dataset.select(self._partition_id_to_indices[partition_id]) @property def num_partitions(self) -> int: @@ -185,27 +193,29 @@ def num_partitions(self) -> int: self._check_num_partitions_correctness_if_needed() self._check_possibility_of_partitions_creation() self._sort_dataset_if_needed() - self._determine_node_id_to_indices_if_needed() + self._determine_partition_id_to_indices_if_needed() return self._num_partitions - def _determine_node_id_to_indices_if_needed(self) -> None: # pylint: disable=R0914 - """Assign sample indices to each node id. + def _determine_partition_id_to_indices_if_needed( + self, + ) -> None: + """Assign sample indices to each partition id. This method works on sorted datasets. A "shard" is a part of the dataset of consecutive samples (if self._keep_incomplete_shard is False, each shard is same size). """ - # No need to do anything if that node_id_to_indices are already determined - if self._node_id_to_indices_determined: + # No need to do anything if that partition_id_to_indices are already determined + if self._partition_id_to_indices_determined: return - # One of the specification allows to skip the `num_shards_per_node` param - if self._num_shards_per_node is not None: + # One of the specification allows to skip the `num_shards_per_partition` param + if self._num_shards_per_partition is not None: self._num_shards_used = int( - self._num_partitions * self._num_shards_per_node + self._num_partitions * self._num_shards_per_partition ) - num_shards_per_node_array = ( - np.ones(self._num_partitions) * self._num_shards_per_node + num_shards_per_partition_array = ( + np.ones(self._num_partitions) * self._num_shards_per_partition ) if self._shard_size is None: self._compute_shard_size_if_missing() @@ -222,11 +232,11 @@ def _determine_node_id_to_indices_if_needed(self) -> None: # pylint: disable=R0 num_usable_shards_in_dataset = int( math.floor(len(self.dataset) / self._shard_size) ) - elif self._num_shards_per_node is None: + elif self._num_shards_per_partition is None: if self._shard_size is None: raise ValueError( "The shard_size needs to be specified if the " - "num_shards_per_node is None" + "num_shards_per_partition is None" ) if self._keep_incomplete_shard is False: self._num_shards_used = int( @@ -249,22 +259,22 @@ def _determine_node_id_to_indices_if_needed(self) -> None: # pylint: disable=R0 else: raise ValueError( "The keep_incomplete_shards need to be specified " - "when _num_shards_per_node is None." + "when _num_shards_per_partition is None." ) - num_shards_per_node = int(self._num_shards_used / self._num_partitions) - # Assign the shards per nodes (so far, the same as in ideal case) - num_shards_per_node_array = ( - np.ones(self._num_partitions) * num_shards_per_node + num_shards_per_partition = int(self._num_shards_used / self._num_partitions) + # Assign the shards per partitions (so far, the same as in ideal case) + num_shards_per_partition_array = ( + np.ones(self._num_partitions) * num_shards_per_partition ) - num_shards_assigned = self._num_partitions * num_shards_per_node + num_shards_assigned = self._num_partitions * num_shards_per_partition num_shards_to_assign = self._num_shards_used - num_shards_assigned # Assign the "missing" shards for i in range(num_shards_to_assign): - num_shards_per_node_array[i] += 1 + num_shards_per_partition_array[i] += 1 else: raise ValueError( - "The specification of nm_shards_per_node and " + "The specification of nm_shards_per_partition and " "keep_incomplete_shards is not correct." ) @@ -276,35 +286,37 @@ def _determine_node_id_to_indices_if_needed(self) -> None: # pylint: disable=R0 ) indices_on_which_to_split_shards = np.cumsum( - num_shards_per_node_array, dtype=int + num_shards_per_partition_array, dtype=int ) shard_indices_array = self._rng.permutation(num_usable_shards_in_dataset)[ : self._num_shards_used ] - # Randomly assign shards to node_id + # Randomly assign shards to partition_id nid_to_shard_indices = np.split( shard_indices_array, indices_on_which_to_split_shards )[:-1] - node_id_to_indices: Dict[int, List[int]] = { + partition_id_to_indices: Dict[int, List[int]] = { cid: [] for cid in range(self._num_partitions) } - # Compute node_id to sample indices based on the shard indices - for node_id in range(self._num_partitions): - for shard_idx in nid_to_shard_indices[node_id]: + # Compute partition_id to sample indices based on the shard indices + for partition_id in range(self._num_partitions): + for shard_idx in nid_to_shard_indices[partition_id]: start_id = int(shard_idx * self._shard_size) end_id = min(int((shard_idx + 1) * self._shard_size), len(self.dataset)) - node_id_to_indices[node_id].extend(list(range(start_id, end_id))) + partition_id_to_indices[partition_id].extend( + list(range(start_id, end_id)) + ) if self._shuffle: - for indices in node_id_to_indices.values(): + for indices in partition_id_to_indices.values(): # In place shuffling self._rng.shuffle(indices) - self._node_id_to_indices = node_id_to_indices - self._node_id_to_indices_determined = True + self._partition_id_to_indices = partition_id_to_indices + self._partition_id_to_indices_determined = True def _check_num_partitions_correctness_if_needed(self) -> None: """Test num_partitions when the dataset is given (in load_partition).""" - if not self._node_id_to_indices_determined: + if not self._partition_id_to_indices_determined: if self._num_partitions > self.dataset.num_rows: raise ValueError( "The number of partitions needs to be smaller than the number of " @@ -317,7 +329,7 @@ def _sort_dataset_if_needed(self) -> None: Operation only needed to be performed one time. It's required for the creation of shards with the same labels. """ - if self._node_id_to_indices_determined: + if self._partition_id_to_indices_determined: return self._dataset = self.dataset.sort(self._partition_by) @@ -332,9 +344,9 @@ def _compute_shard_size_if_missing(self) -> None: self._shard_size = int(num_rows / self._num_shards_used) def _check_possibility_of_partitions_creation(self) -> None: - if self._shard_size is not None and self._num_shards_per_node is not None: + if self._shard_size is not None and self._num_shards_per_partition is not None: implied_min_dataset_size = ( - self._shard_size * self._num_shards_per_node * self._num_partitions + self._shard_size * self._num_shards_per_partition * self._num_partitions ) if implied_min_dataset_size > len(self.dataset): raise ValueError( diff --git a/datasets/flwr_datasets/partitioner/shard_partitioner_test.py b/datasets/flwr_datasets/partitioner/shard_partitioner_test.py index 47968699bba7..d6fa8b529595 100644 --- a/datasets/flwr_datasets/partitioner/shard_partitioner_test.py +++ b/datasets/flwr_datasets/partitioner/shard_partitioner_test.py @@ -27,7 +27,7 @@ def _dummy_setup( num_rows: int, partition_by: str, num_partitions: int, - num_shards_per_node: Optional[int], + num_shards_per_partition: Optional[int], shard_size: Optional[int], keep_incomplete_shard: bool = False, ) -> Tuple[Dataset, ShardPartitioner]: @@ -39,7 +39,7 @@ def _dummy_setup( dataset = Dataset.from_dict(data) partitioner = ShardPartitioner( num_partitions=num_partitions, - num_shards_per_node=num_shards_per_node, + num_shards_per_partition=num_shards_per_partition, partition_by=partition_by, shard_size=shard_size, keep_incomplete_shard=keep_incomplete_shard, @@ -51,7 +51,7 @@ def _dummy_setup( class TestShardPartitionerSpec1(unittest.TestCase): """Test first possible initialization of ShardPartitioner. - Specify num_shards_per_node and shard_size arguments. + Specify num_shards_per_partition and shard_size arguments. """ def test_correct_num_partitions(self) -> None: @@ -59,19 +59,19 @@ def test_correct_num_partitions(self) -> None: partition_by = "label" num_rows = 113 num_partitions = 3 - num_shards_per_node = 3 + num_shards_per_partition = 3 shard_size = 10 keep_incomplete_shard = False _, partitioner = _dummy_setup( num_rows, partition_by, num_partitions, - num_shards_per_node, + num_shards_per_partition, shard_size, keep_incomplete_shard, ) _ = partitioner.load_partition(0) - num_partitions_created = len(partitioner._node_id_to_indices.keys()) + num_partitions_created = len(partitioner._partition_id_to_indices.keys()) self.assertEqual(num_partitions_created, num_partitions) def test_correct_partition_sizes(self) -> None: @@ -79,14 +79,14 @@ def test_correct_partition_sizes(self) -> None: partition_by = "label" num_rows = 113 num_partitions = 3 - num_shards_per_node = 3 + num_shards_per_partition = 3 shard_size = 10 keep_incomplete_shard = False _, partitioner = _dummy_setup( num_rows, partition_by, num_partitions, - num_shards_per_node, + num_shards_per_partition, shard_size, keep_incomplete_shard, ) @@ -102,14 +102,14 @@ def test_unique_samples(self) -> None: partition_by = "label" num_rows = 113 num_partitions = 3 - num_shards_per_node = 3 + num_shards_per_partition = 3 shard_size = 10 keep_incomplete_shard = False _, partitioner = _dummy_setup( num_rows, partition_by, num_partitions, - num_shards_per_node, + num_shards_per_partition, shard_size, keep_incomplete_shard, ) @@ -133,19 +133,19 @@ def test_correct_num_partitions(self) -> None: partition_by = "label" num_rows = 113 num_partitions = 3 - num_shards_per_node = None + num_shards_per_partition = None shard_size = 10 keep_incomplete_shard = False _, partitioner = _dummy_setup( num_rows, partition_by, num_partitions, - num_shards_per_node, + num_shards_per_partition, shard_size, keep_incomplete_shard, ) _ = partitioner.load_partition(0) - num_partitions_created = len(partitioner._node_id_to_indices.keys()) + num_partitions_created = len(partitioner._partition_id_to_indices.keys()) self.assertEqual(num_partitions_created, num_partitions) def test_correct_partition_sizes(self) -> None: @@ -153,14 +153,14 @@ def test_correct_partition_sizes(self) -> None: partition_by = "label" num_rows = 113 num_partitions = 3 - num_shards_per_node = None + num_shards_per_partition = None shard_size = 10 keep_incomplete_shard = False _, partitioner = _dummy_setup( num_rows, partition_by, num_partitions, - num_shards_per_node, + num_shards_per_partition, shard_size, keep_incomplete_shard, ) @@ -176,14 +176,14 @@ def test_unique_samples(self) -> None: partition_by = "label" num_rows = 113 num_partitions = 3 - num_shards_per_node = None + num_shards_per_partition = None shard_size = 10 keep_incomplete_shard = False _, partitioner = _dummy_setup( num_rows, partition_by, num_partitions, - num_shards_per_node, + num_shards_per_partition, shard_size, keep_incomplete_shard, ) @@ -207,19 +207,19 @@ def test_correct_num_partitions(self) -> None: partition_by = "label" num_rows = 113 num_partitions = 3 - num_shards_per_node = None + num_shards_per_partition = None shard_size = 10 keep_incomplete_shard = True _, partitioner = _dummy_setup( num_rows, partition_by, num_partitions, - num_shards_per_node, + num_shards_per_partition, shard_size, keep_incomplete_shard, ) _ = partitioner.load_partition(0) - num_partitions_created = len(partitioner._node_id_to_indices.keys()) + num_partitions_created = len(partitioner._partition_id_to_indices.keys()) self.assertEqual(num_partitions_created, num_partitions) def test_correct_partition_sizes(self) -> None: @@ -227,14 +227,14 @@ def test_correct_partition_sizes(self) -> None: partition_by = "label" num_rows = 113 num_partitions = 3 - num_shards_per_node = None + num_shards_per_partition = None shard_size = 10 keep_incomplete_shard = True _, partitioner = _dummy_setup( num_rows, partition_by, num_partitions, - num_shards_per_node, + num_shards_per_partition, shard_size, keep_incomplete_shard, ) @@ -250,14 +250,14 @@ def test_unique_samples(self) -> None: partition_by = "label" num_rows = 113 num_partitions = 3 - num_shards_per_node = None + num_shards_per_partition = None shard_size = 10 keep_incomplete_shard = True _, partitioner = _dummy_setup( num_rows, partition_by, num_partitions, - num_shards_per_node, + num_shards_per_partition, shard_size, keep_incomplete_shard, ) @@ -272,7 +272,7 @@ def test_unique_samples(self) -> None: class TestShardPartitionerSpec4(unittest.TestCase): """Test fourth possible initialization of ShardPartitioner. - Specify num_shards_per_node but not shard_size arguments. + Specify num_shards_per_partition but not shard_size arguments. """ def test_correct_num_partitions(self) -> None: @@ -280,19 +280,19 @@ def test_correct_num_partitions(self) -> None: partition_by = "label" num_rows = 113 num_partitions = 3 - num_shards_per_node = 3 + num_shards_per_partition = 3 shard_size = None keep_incomplete_shard = False _, partitioner = _dummy_setup( num_rows, partition_by, num_partitions, - num_shards_per_node, + num_shards_per_partition, shard_size, keep_incomplete_shard, ) _ = partitioner.load_partition(0) - num_partitions_created = len(partitioner._node_id_to_indices.keys()) + num_partitions_created = len(partitioner._partition_id_to_indices.keys()) self.assertEqual(num_partitions_created, num_partitions) def test_correct_partition_sizes(self) -> None: @@ -300,14 +300,14 @@ def test_correct_partition_sizes(self) -> None: partition_by = "label" num_rows = 113 num_partitions = 3 - num_shards_per_node = 3 + num_shards_per_partition = 3 shard_size = None keep_incomplete_shard = False _, partitioner = _dummy_setup( num_rows, partition_by, num_partitions, - num_shards_per_node, + num_shards_per_partition, shard_size, keep_incomplete_shard, ) @@ -323,14 +323,14 @@ def test_unique_samples(self) -> None: partition_by = "label" num_rows = 113 num_partitions = 3 - num_shards_per_node = 3 + num_shards_per_partition = 3 shard_size = None keep_incomplete_shard = False _, partitioner = _dummy_setup( num_rows, partition_by, num_partitions, - num_shards_per_node, + num_shards_per_partition, shard_size, keep_incomplete_shard, ) @@ -354,14 +354,14 @@ def test_incorrect_specification(self) -> None: partition_by = "label" num_rows = 10 num_partitions = 3 - num_shards_per_node = 2 + num_shards_per_partition = 2 shard_size = 10 keep_incomplete_shard = False _, partitioner = _dummy_setup( num_rows, partition_by, num_partitions, - num_shards_per_node, + num_shards_per_partition, shard_size, keep_incomplete_shard, ) @@ -373,14 +373,14 @@ def test_too_big_shard_size(self) -> None: partition_by = "label" num_rows = 20 num_partitions = 3 - num_shards_per_node = None + num_shards_per_partition = None shard_size = 10 keep_incomplete_shard = False _, partitioner = _dummy_setup( num_rows, partition_by, num_partitions, - num_shards_per_node, + num_shards_per_partition, shard_size, keep_incomplete_shard, ) diff --git a/datasets/flwr_datasets/partitioner/size_partitioner.py b/datasets/flwr_datasets/partitioner/size_partitioner.py index 29fc2e5b1add..35937d8b9cc7 100644 --- a/datasets/flwr_datasets/partitioner/size_partitioner.py +++ b/datasets/flwr_datasets/partitioner/size_partitioner.py @@ -24,54 +24,54 @@ class SizePartitioner(Partitioner): - """Base class for the deterministic size partitioning based on the `node_id`. + """Base class for the deterministic size partitioning based on the `partition_id`. - The client with `node_id` has the following relationship regarding the number of - samples. + The client with `partition_id` has the following relationship regarding the number + of samples. - `node_id_to_size_fn(node_id)` ~ number of samples for `node_id` + `partition_id_to_size_fn(partition_id)` ~ number of samples for `partition_id` - If the function doesn't transform the `node_id` it's a linear correlation between - the number of sample for the node and the value of `node_id`. For instance, if the - node ids range from 1 to M, node with id 1 gets 1 unit of data, client 2 gets 2 - units, and so on, up to node M which gets M units. + If the function doesn't transform the `partition_id` it's a linear correlation + between the number of sample for the partition and the value of `partition_id`. For + instance, if the partition ids range from 1 to M, partition with id 1 gets 1 unit of + data, client 2 gets 2 units, and so on, up to partition M which gets M units. - Note that size corresponding to the `node_id` is deterministic, yet in case of - different dataset shuffling the assignment of samples to `node_id` will vary. + Note that size corresponding to the `partition_id` is deterministic, yet in case of + different dataset shuffling the assignment of samples to `partition_id` will vary. Parameters ---------- num_partitions : int The total number of partitions that the data will be divided into. - node_id_to_size_fn : Callable - Function that defines the relationship between node id and the number of + partition_id_to_size_fn : Callable + Function that defines the relationship between partition id and the number of samples. """ def __init__( self, num_partitions: int, - node_id_to_size_fn: Callable, # type: ignore[type-arg] + partition_id_to_size_fn: Callable, # type: ignore[type-arg] ) -> None: super().__init__() if num_partitions <= 0: raise ValueError("The number of partitions must be greater than zero.") self._num_partitions = num_partitions - self._node_id_to_size_fn = node_id_to_size_fn + self._partition_id_to_size_fn = partition_id_to_size_fn - self._node_id_to_size: Dict[int, int] = {} - self._node_id_to_indices: Dict[int, List[int]] = {} + self._partition_id_to_size: Dict[int, int] = {} + self._partition_id_to_indices: Dict[int, List[int]] = {} # A flag to perform only a single compute to determine the indices - self._node_id_to_indices_determined = False + self._partition_id_to_indices_determined = False - def load_partition(self, node_id: int) -> datasets.Dataset: + def load_partition(self, partition_id: int) -> datasets.Dataset: """Load a single partition based on the partition index. - The number of samples is dependent on the partition node_id. + The number of samples is dependent on the partition partition_id. Parameters ---------- - node_id : int + partition_id : int the index that corresponds to the requested partition Returns @@ -81,28 +81,28 @@ def load_partition(self, node_id: int) -> datasets.Dataset: """ # The partitioning is done lazily - only when the first partition is requested. # A single run creates the indices assignments for all the partition indices. - self._determine_node_id_to_indices_if_needed() - return self.dataset.select(self._node_id_to_indices[node_id]) + self._determine_partition_id_to_indices_if_needed() + return self.dataset.select(self._partition_id_to_indices[partition_id]) @property def num_partitions(self) -> int: """Total number of partitions.""" - self._determine_node_id_to_indices_if_needed() + self._determine_partition_id_to_indices_if_needed() return self._num_partitions @property - def node_id_to_size(self) -> Dict[int, int]: + def partition_id_to_size(self) -> Dict[int, int]: """Node id to the number of samples.""" - return self._node_id_to_size + return self._partition_id_to_size @property - def node_id_to_indices(self) -> Dict[int, List[int]]: + def partition_id_to_indices(self) -> Dict[int, List[int]]: """Node id to the list of indices.""" - return self._node_id_to_indices + return self._partition_id_to_indices - def _determine_node_id_to_size(self) -> None: + def _determine_partition_id_to_size(self) -> None: """Determine data quantity associated with partition indices.""" - data_division_in_units = self._node_id_to_size_fn( + data_division_in_units = self._partition_id_to_size_fn( np.linspace(start=1, stop=self._num_partitions, num=self._num_partitions) ) total_units: Union[int, float] = data_division_in_units.sum() @@ -118,25 +118,25 @@ def _determine_node_id_to_size(self) -> None: # If there is any sample(s) left unassigned, assign it to the largest partition. partition_sizes_as_num_of_samples[-1] += left_unassigned_samples for idx, partition_size in enumerate(partition_sizes_as_num_of_samples): - self._node_id_to_size[idx] = partition_size + self._partition_id_to_size[idx] = partition_size - self._check_if_node_id_to_size_possible() + self._check_if_partition_id_to_size_possible() - def _determine_node_id_to_indices_if_needed(self) -> None: + def _determine_partition_id_to_indices_if_needed(self) -> None: """Create an assignment of indices to the partition indices..""" - if self._node_id_to_indices_determined is True: + if self._partition_id_to_indices_determined is True: return - self._determine_node_id_to_size() + self._determine_partition_id_to_size() total_samples_assigned = 0 - for idx, quantity in self._node_id_to_size.items(): - self._node_id_to_indices[idx] = list( + for idx, quantity in self._partition_id_to_size.items(): + self._partition_id_to_indices[idx] = list( range(total_samples_assigned, total_samples_assigned + quantity) ) total_samples_assigned += quantity - self._node_id_to_indices_determined = True + self._partition_id_to_indices_determined = True - def _check_if_node_id_to_size_possible(self) -> None: - all_positive = all(value >= 1 for value in self.node_id_to_size.values()) + def _check_if_partition_id_to_size_possible(self) -> None: + all_positive = all(value >= 1 for value in self.partition_id_to_size.values()) if not all_positive: raise ValueError( f"The given specification of the parameter num_partitions" diff --git a/datasets/flwr_datasets/partitioner/size_partitioner_test.py b/datasets/flwr_datasets/partitioner/size_partitioner_test.py index 390f6a613fce..086ca3731e58 100644 --- a/datasets/flwr_datasets/partitioner/size_partitioner_test.py +++ b/datasets/flwr_datasets/partitioner/size_partitioner_test.py @@ -49,13 +49,13 @@ def test_linear_distribution(self, num_partitions: int, num_rows: int) -> None: partitioner.dataset = dataset # Run a single partition loading to trigger the division _ = partitioner.load_partition(0) - total_samples = sum(partitioner.node_id_to_size.values()) + total_samples = sum(partitioner.partition_id_to_size.values()) self.assertEqual(total_samples, num_rows) # Testing if each partition is getting more than the previous one last_count = 0 for i in range(num_partitions): - current_count = partitioner.node_id_to_size[i] + current_count = partitioner.partition_id_to_size[i] self.assertGreaterEqual(current_count, last_count) last_count = current_count @@ -77,7 +77,7 @@ def test_undivided_samples(self, num_partitions: int, num_rows: int) -> None: actual_samples_in_last_partition = len( partitioner.load_partition(last_partition_id) ) - expected_samples_in_last_partition = partitioner.node_id_to_size[ + expected_samples_in_last_partition = partitioner.partition_id_to_size[ last_partition_id ] self.assertEqual( diff --git a/datasets/flwr_datasets/partitioner/square_partitioner.py b/datasets/flwr_datasets/partitioner/square_partitioner.py index 109b8397870b..4c894e47eedf 100644 --- a/datasets/flwr_datasets/partitioner/square_partitioner.py +++ b/datasets/flwr_datasets/partitioner/square_partitioner.py @@ -21,7 +21,7 @@ class SquarePartitioner(SizePartitioner): - """Partitioner creates partitions of size that are correlated with squared node_id. + """Partitioner creates partitions of size that are correlated with squared id. The amount of data each client gets is correlated with the squared partition ID. For instance, if the IDs range from 1 to M, client with ID 1 gets 1 unit of data, @@ -34,6 +34,8 @@ class SquarePartitioner(SizePartitioner): """ def __init__(self, num_partitions: int) -> None: - super().__init__(num_partitions=num_partitions, node_id_to_size_fn=np.square) + super().__init__( + num_partitions=num_partitions, partition_id_to_size_fn=np.square + ) if num_partitions <= 0: raise ValueError("The number of partitions must be greater than zero.") From 7b1433da2c53719f17e3e03c8c611592f767e8a0 Mon Sep 17 00:00:00 2001 From: Javier Date: Thu, 14 Mar 2024 00:12:30 +0100 Subject: [PATCH 19/57] Add `count_bytes` method to `RecordSets` (#3083) Co-authored-by: Heng Pan --- src/py/flwr/common/record/configsrecord.py | 38 +++++++++++++- src/py/flwr/common/record/metricsrecord.py | 18 ++++++- src/py/flwr/common/record/parametersrecord.py | 17 +++++++ .../common/record/parametersrecord_test.py | 51 ++++++++++++++++--- src/py/flwr/common/record/recordset_test.py | 39 ++++++++++++++ 5 files changed, 155 insertions(+), 8 deletions(-) diff --git a/src/py/flwr/common/record/configsrecord.py b/src/py/flwr/common/record/configsrecord.py index 704657601f50..471c85f0b961 100644 --- a/src/py/flwr/common/record/configsrecord.py +++ b/src/py/flwr/common/record/configsrecord.py @@ -15,7 +15,7 @@ """ConfigsRecord.""" -from typing import Dict, Optional, get_args +from typing import Dict, List, Optional, get_args from flwr.common.typing import ConfigsRecordValues, ConfigsScalar @@ -85,3 +85,39 @@ def __init__( self[k] = configs_dict[k] if not keep_input: del configs_dict[k] + + def count_bytes(self) -> int: + """Return number of Bytes stored in this object. + + This function counts booleans as occupying 1 Byte. + """ + + def get_var_bytes(value: ConfigsScalar) -> int: + """Return Bytes of value passed.""" + if isinstance(value, bool): + var_bytes = 1 + elif isinstance(value, (int, float)): + var_bytes = ( + 8 # the profobufing represents int/floats in ConfigRecords as 64bit + ) + if isinstance(value, (str, bytes)): + var_bytes = len(value) + return var_bytes + + num_bytes = 0 + + for k, v in self.items(): + if isinstance(v, List): + if isinstance(v[0], (bytes, str)): + # not all str are of equal length necessarily + # for both the footprint of each element is 1 Byte + num_bytes += int(sum(len(s) for s in v)) # type: ignore + else: + num_bytes += get_var_bytes(v[0]) * len(v) + else: + num_bytes += get_var_bytes(v) + + # We also count the bytes footprint of the keys + num_bytes += len(k) + + return num_bytes diff --git a/src/py/flwr/common/record/metricsrecord.py b/src/py/flwr/common/record/metricsrecord.py index 81b02303421b..2b6e584be390 100644 --- a/src/py/flwr/common/record/metricsrecord.py +++ b/src/py/flwr/common/record/metricsrecord.py @@ -15,7 +15,7 @@ """MetricsRecord.""" -from typing import Dict, Optional, get_args +from typing import Dict, List, Optional, get_args from flwr.common.typing import MetricsRecordValues, MetricsScalar @@ -84,3 +84,19 @@ def __init__( self[k] = metrics_dict[k] if not keep_input: del metrics_dict[k] + + def count_bytes(self) -> int: + """Return number of Bytes stored in this object.""" + num_bytes = 0 + + for k, v in self.items(): + if isinstance(v, List): + # both int and float normally take 4 bytes + # But MetricRecords are mapped to 64bit int/float + # during protobuffing + num_bytes += 8 * len(v) + else: + num_bytes += 8 + # We also count the bytes footprint of the keys + num_bytes += len(k) + return num_bytes diff --git a/src/py/flwr/common/record/parametersrecord.py b/src/py/flwr/common/record/parametersrecord.py index 17bf3f608db7..a4a71f751f97 100644 --- a/src/py/flwr/common/record/parametersrecord.py +++ b/src/py/flwr/common/record/parametersrecord.py @@ -117,3 +117,20 @@ def __init__( self[k] = array_dict[k] if not keep_input: del array_dict[k] + + def count_bytes(self) -> int: + """Return number of Bytes stored in this object. + + Note that a small amount of Bytes might also be included in this counting that + correspond to metadata of the serialized object (e.g. of NumPy array) needed for + deseralization. + """ + num_bytes = 0 + + for k, v in self.items(): + num_bytes += len(v.data) + + # We also count the bytes footprint of the keys + num_bytes += len(k) + + return num_bytes diff --git a/src/py/flwr/common/record/parametersrecord_test.py b/src/py/flwr/common/record/parametersrecord_test.py index 9633af7bda6d..e840e5e266e4 100644 --- a/src/py/flwr/common/record/parametersrecord_test.py +++ b/src/py/flwr/common/record/parametersrecord_test.py @@ -14,14 +14,26 @@ # ============================================================================== """Unit tests for ParametersRecord and Array.""" - import unittest +from collections import OrderedDict from io import BytesIO +from typing import List import numpy as np +import pytest + +from flwr.common import ndarray_to_bytes from ..constant import SType -from .parametersrecord import Array +from ..typing import NDArray +from .parametersrecord import Array, ParametersRecord + + +def _get_buffer_from_ndarray(array: NDArray) -> bytes: + """Return a bytes buffer froma given NumPy array.""" + buffer = BytesIO() + np.save(buffer, array, allow_pickle=False) + return buffer.getvalue() class TestArray(unittest.TestCase): @@ -31,16 +43,15 @@ def test_numpy_conversion_valid(self) -> None: """Test the numpy method with valid Array instance.""" # Prepare original_array = np.array([1, 2, 3], dtype=np.float32) - buffer = BytesIO() - np.save(buffer, original_array, allow_pickle=False) - buffer.seek(0) + + buffer = _get_buffer_from_ndarray(original_array) # Execute array_instance = Array( dtype=str(original_array.dtype), shape=list(original_array.shape), stype=SType.NUMPY, - data=buffer.read(), + data=buffer, ) converted_array = array_instance.numpy() @@ -60,3 +71,31 @@ def test_numpy_conversion_invalid(self) -> None: # Execute and assert with self.assertRaises(TypeError): array_instance.numpy() + + +@pytest.mark.parametrize( + "shape, dtype", + [ + ([100], "float32"), + ([31, 31], "int8"), + ([31, 153], "bool_"), # bool_ is represented as a whole Byte in NumPy + ], +) +def test_count_bytes(shape: List[int], dtype: str) -> None: + """Test bytes in a ParametersRecord are computed correctly.""" + original_array = np.random.randn(*shape).astype(np.dtype(dtype)) + + buff = ndarray_to_bytes(original_array) + + buffer = _get_buffer_from_ndarray(original_array) + + array_instance = Array( + dtype=str(original_array.dtype), + shape=list(original_array.shape), + stype=SType.NUMPY, + data=buffer, + ) + key_name = "data" + p_record = ParametersRecord(OrderedDict({key_name: array_instance})) + + assert len(buff) + len(key_name) == p_record.count_bytes() diff --git a/src/py/flwr/common/record/recordset_test.py b/src/py/flwr/common/record/recordset_test.py index bcf5c75a1e02..0e0b149881be 100644 --- a/src/py/flwr/common/record/recordset_test.py +++ b/src/py/flwr/common/record/recordset_test.py @@ -359,3 +359,42 @@ def test_set_configs_to_configsrecord_with_incorrect_types( with pytest.raises(TypeError): c_record.update(my_configs) + + +def test_count_bytes_metricsrecord() -> None: + """Test counting bytes in MetricsRecord.""" + data = {"a": 1, "b": 2.0, "c": [1, 2, 3], "d": [1.0, 2.0, 3.0, 4.0, 5.0]} + bytes_in_dict = 8 + 8 + 3 * 8 + 5 * 8 + bytes_in_dict += 4 # represnting the keys + + m_record = MetricsRecord() + m_record.update(OrderedDict(data)) + record_bytest_count = m_record.count_bytes() + assert bytes_in_dict == record_bytest_count + + +def test_count_bytes_configsrecord() -> None: + """Test counting bytes in ConfigsRecord.""" + data = {"a": 1, "b": 2.0, "c": [1, 2, 3], "d": [1.0, 2.0, 3.0, 4.0, 5.0]} + bytes_in_dict = 8 + 8 + 3 * 8 + 5 * 8 + bytes_in_dict += 4 # represnting the keys + + to_add = { + "aa": True, + "bb": "False", + "cc": bytes(9), + "dd": [True, False, False], + "ee": ["True", "False"], + "ff": [bytes(1), bytes(13), bytes(51)], + } + data = {**data, **to_add} + bytes_in_dict += 1 + 5 + 9 + 3 + (4 + 5) + (1 + 13 + 51) + bytes_in_dict += 12 # represnting the keys + + bytes_in_dict = int(bytes_in_dict) + + c_record = ConfigsRecord() + c_record.update(OrderedDict(data)) + + record_bytest_count = c_record.count_bytes() + assert bytes_in_dict == record_bytest_count From 44195b940bd0e54aa36b4de14c212ebf859c68e0 Mon Sep 17 00:00:00 2001 From: Adam Narozniak <51029327+adam-narozniak@users.noreply.github.com> Date: Wed, 13 Mar 2024 23:25:22 +0000 Subject: [PATCH 20/57] Add partitioners property to FederatedDataset (#3096) Co-authored-by: Javier Co-authored-by: Flower <148336023+flwrmachine@users.noreply.github.com> --- datasets/flwr_datasets/federated_dataset.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/datasets/flwr_datasets/federated_dataset.py b/datasets/flwr_datasets/federated_dataset.py index 37f1e084d4c6..ed3d03fd1442 100644 --- a/datasets/flwr_datasets/federated_dataset.py +++ b/datasets/flwr_datasets/federated_dataset.py @@ -213,6 +213,25 @@ def load_split(self, split: str) -> Dataset: self._check_if_split_present(split) return self._dataset[split] + @property + def partitioners(self) -> Dict[str, Partitioner]: + """Dictionary mapping each split to its associated partitioner. + + The returned partitioners have the splits of the dataset assigned to them. + """ + # This function triggers the dataset download (lazy download) and checks + # the partitioner specification correctness (which can also happen lazily only + # after the dataset download). + if not self._dataset_prepared: + self._prepare_dataset() + if self._dataset is None: + raise ValueError("Dataset is not loaded yet.") + partitioners_keys = list(self._partitioners.keys()) + for split in partitioners_keys: + self._check_if_split_present(split) + self._assign_dataset_to_partitioner(split) + return self._partitioners + def _check_if_split_present(self, split: str) -> None: """Check if the split (for partitioning or full return) is in the dataset.""" if self._dataset is None: From 184c10c93f23392629ae7072223292eb06a9a5da Mon Sep 17 00:00:00 2001 From: Javier Date: Thu, 14 Mar 2024 00:38:34 +0100 Subject: [PATCH 21/57] Add mod that logs the size of `ParameterRecords` in a `Message` (#3077) Co-authored-by: Heng Pan --- src/py/flwr/client/mod/__init__.py | 3 ++ src/py/flwr/client/mod/comms_mods.py | 79 ++++++++++++++++++++++++++++ 2 files changed, 82 insertions(+) create mode 100644 src/py/flwr/client/mod/comms_mods.py diff --git a/src/py/flwr/client/mod/__init__.py b/src/py/flwr/client/mod/__init__.py index 69a7d76ce95f..1cd79fa944fe 100644 --- a/src/py/flwr/client/mod/__init__.py +++ b/src/py/flwr/client/mod/__init__.py @@ -16,6 +16,7 @@ from .centraldp_mods import adaptiveclipping_mod, fixedclipping_mod +from .comms_mods import message_size_mod, parameters_size_mod from .localdp_mod import LocalDpMod from .secure_aggregation import secagg_mod, secaggplus_mod from .utils import make_ffn @@ -27,4 +28,6 @@ "make_ffn", "secagg_mod", "secaggplus_mod", + "message_size_mod", + "parameters_size_mod", ] diff --git a/src/py/flwr/client/mod/comms_mods.py b/src/py/flwr/client/mod/comms_mods.py new file mode 100644 index 000000000000..102d2f477262 --- /dev/null +++ b/src/py/flwr/client/mod/comms_mods.py @@ -0,0 +1,79 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Mods that report statistics about message communication.""" + +from logging import INFO + +import numpy as np + +from flwr.client.typing import ClientAppCallable +from flwr.common.context import Context +from flwr.common.logger import log +from flwr.common.message import Message + + +def message_size_mod( + msg: Message, ctxt: Context, call_next: ClientAppCallable +) -> Message: + """Message size mod. + + This mod logs the size in Bytes of the message being transmited. + """ + message_size_in_bytes = 0 + + for p_record in msg.content.parameters_records.values(): + message_size_in_bytes += p_record.count_bytes() + + for c_record in msg.content.configs_records.values(): + message_size_in_bytes += c_record.count_bytes() + + for m_record in msg.content.metrics_records.values(): + message_size_in_bytes += m_record.count_bytes() + + log(INFO, "Message size: %i Bytes", message_size_in_bytes) + + return call_next(msg, ctxt) + + +def parameters_size_mod( + msg: Message, ctxt: Context, call_next: ClientAppCallable +) -> Message: + """Parameters size mod. + + This mod logs the number of parameters transmitted in the message as well as their + size in Bytes. + """ + model_size_stats = {} + parameters_size_in_bytes = 0 + for record_name, p_record in msg.content.parameters_records.items(): + p_record_bytes = p_record.count_bytes() + parameters_size_in_bytes += p_record_bytes + parameter_count = 0 + for array in p_record.values(): + parameter_count += ( + int(np.prod(array.shape)) if array.shape else array.numpy().size + ) + + model_size_stats[f"{record_name}"] = { + "parameters": parameter_count, + "bytes": p_record_bytes, + } + + if model_size_stats: + log(INFO, model_size_stats) + + log(INFO, "Total parameters transmited: %i Bytes", parameters_size_in_bytes) + + return call_next(msg, ctxt) From 0f2df330c06eb28210422b3d343697af1fac5f2d Mon Sep 17 00:00:00 2001 From: Javier Date: Thu, 14 Mar 2024 01:06:02 +0100 Subject: [PATCH 22/57] Make `PyTorch` `flwr run` template use `Flower Datasets` (#3133) --- .../templates/app/code/client.pytorch.py.tpl | 10 ++++- .../templates/app/code/task.pytorch.py.tpl | 44 ++++++++++++------- 2 files changed, 37 insertions(+), 17 deletions(-) diff --git a/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl index bdb5b8fcadf9..187b2301f72b 100644 --- a/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl @@ -1,6 +1,7 @@ """$project_name: A Flower / PyTorch app.""" from flwr.client import NumPyClient, ClientApp +from flwr.cli.flower_toml import load_and_validate_with_defaults from $project_name.task import ( Net, @@ -31,10 +32,17 @@ class FlowerClient(NumPyClient): return loss, len(self.valloader.dataset), {"accuracy": accuracy} +# Load config +cfg, *_ = load_and_validate_with_defaults() + def client_fn(cid: str): # Load model and data net = Net().to(DEVICE) - trainloader, valloader = load_data() + engine = cfg["flower"]["engine"] + num_partitions = 2 + if "simulation" in engine: + num_partitions = engine["simulation"]["supernode"]["num"] + trainloader, valloader = load_data(int(cid), num_partitions) # Return Client instance return FlowerClient(net, trainloader, valloader).to_client() diff --git a/src/py/flwr/cli/new/templates/app/code/task.pytorch.py.tpl b/src/py/flwr/cli/new/templates/app/code/task.pytorch.py.tpl index 1d727599a1e4..b7f69bf7dce7 100644 --- a/src/py/flwr/cli/new/templates/app/code/task.pytorch.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/task.pytorch.py.tpl @@ -8,7 +8,7 @@ import torch.nn.functional as F from torch.utils.data import DataLoader from torchvision.datasets import CIFAR10 from torchvision.transforms import Compose, Normalize, ToTensor - +from flwr_datasets import FederatedDataset DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") @@ -34,27 +34,39 @@ class Net(nn.Module): return self.fc3(x) -def load_data(): - """Load CIFAR-10 (training and test set).""" - trf = Compose([ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) - trainset = CIFAR10("./data", train=True, download=True, transform=trf) - testset = CIFAR10("./data", train=False, download=True, transform=trf) - return DataLoader(trainset, batch_size=32, shuffle=True), DataLoader(testset) +def load_data(partition_id: int, total_partitions: int): + """Load partition CIFAR10 data.""" + fds = FederatedDataset(dataset="cifar10", partitioners={"train": total_partitions}) + partition = fds.load_partition(partition_id) + # Divide data on each node: 80% train, 20% test + partition_train_test = partition.train_test_split(test_size=0.2) + pytorch_transforms = Compose( + [ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] + ) + + def apply_transforms(batch): + """Apply transforms to the partition from FederatedDataset.""" + batch["img"] = [pytorch_transforms(img) for img in batch["img"]] + return batch + + partition_train_test = partition_train_test.with_transform(apply_transforms) + trainloader = DataLoader(partition_train_test["train"], batch_size=32, shuffle=True) + testloader = DataLoader(partition_train_test["test"], batch_size=32) + return trainloader, testloader def train(net, trainloader, valloader, epochs, device): """Train the model on the training set.""" - print("Starting training...") net.to(device) # move model to GPU if available criterion = torch.nn.CrossEntropyLoss().to(device) optimizer = torch.optim.SGD(net.parameters(), lr=0.001, momentum=0.9) net.train() for _ in range(epochs): - for images, labels in trainloader: - images, labels = images.to(device), labels.to(device) + for batch in trainloader: + images = batch["img"] + labels = batch["label"] optimizer.zero_grad() - loss = criterion(net(images), labels) - loss.backward() + criterion(net(images.to(DEVICE)), labels.to(DEVICE)).backward() optimizer.step() train_loss, train_acc = test(net, trainloader) @@ -71,13 +83,13 @@ def train(net, trainloader, valloader, epochs, device): def test(net, testloader): """Validate the model on the test set.""" - net.to(DEVICE) criterion = torch.nn.CrossEntropyLoss() correct, loss = 0, 0.0 with torch.no_grad(): - for images, labels in testloader: - outputs = net(images.to(DEVICE)) - labels = labels.to(DEVICE) + for batch in testloader: + images = batch["img"].to(DEVICE) + labels = batch["label"].to(DEVICE) + outputs = net(images) loss += criterion(outputs, labels).item() correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() accuracy = correct / len(testloader.dataset) From fa853637df62049a84653f056e1584eb19b294d5 Mon Sep 17 00:00:00 2001 From: Javier Date: Thu, 14 Mar 2024 02:06:01 +0100 Subject: [PATCH 23/57] Remove the loading of flower.toml config in app code (#3142) --- .../cli/new/templates/app/code/client.pytorch.py.tpl | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl index 187b2301f72b..d5994716ca74 100644 --- a/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl @@ -1,7 +1,6 @@ """$project_name: A Flower / PyTorch app.""" from flwr.client import NumPyClient, ClientApp -from flwr.cli.flower_toml import load_and_validate_with_defaults from $project_name.task import ( Net, @@ -32,17 +31,10 @@ class FlowerClient(NumPyClient): return loss, len(self.valloader.dataset), {"accuracy": accuracy} -# Load config -cfg, *_ = load_and_validate_with_defaults() - def client_fn(cid: str): # Load model and data net = Net().to(DEVICE) - engine = cfg["flower"]["engine"] - num_partitions = 2 - if "simulation" in engine: - num_partitions = engine["simulation"]["supernode"]["num"] - trainloader, valloader = load_data(int(cid), num_partitions) + trainloader, valloader = load_data(partition_id=int(cid), num_partitions=2) # Return Client instance return FlowerClient(net, trainloader, valloader).to_client() From 115e81a707544f44f14cb1eaae69986a03c37f73 Mon Sep 17 00:00:00 2001 From: "Daniel J. Beutel" Date: Thu, 14 Mar 2024 08:40:46 +0100 Subject: [PATCH 24/57] Fix PyTorch template (#3144) --- src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl | 2 +- src/py/flwr/cli/new/templates/app/code/task.pytorch.py.tpl | 4 ++-- src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl | 4 ++-- .../flwr/cli/new/templates/app/requirements.pytorch.txt.tpl | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl index d5994716ca74..a03b1e77e402 100644 --- a/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl @@ -34,7 +34,7 @@ class FlowerClient(NumPyClient): def client_fn(cid: str): # Load model and data net = Net().to(DEVICE) - trainloader, valloader = load_data(partition_id=int(cid), num_partitions=2) + trainloader, valloader = load_data(int(cid), 2) # Return Client instance return FlowerClient(net, trainloader, valloader).to_client() diff --git a/src/py/flwr/cli/new/templates/app/code/task.pytorch.py.tpl b/src/py/flwr/cli/new/templates/app/code/task.pytorch.py.tpl index b7f69bf7dce7..82e57388fa3e 100644 --- a/src/py/flwr/cli/new/templates/app/code/task.pytorch.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/task.pytorch.py.tpl @@ -34,9 +34,9 @@ class Net(nn.Module): return self.fc3(x) -def load_data(partition_id: int, total_partitions: int): +def load_data(partition_id: int, num_partitions: int): """Load partition CIFAR10 data.""" - fds = FederatedDataset(dataset="cifar10", partitioners={"train": total_partitions}) + fds = FederatedDataset(dataset="cifar10", partitioners={"train": num_partitions}) partition = fds.load_partition(partition_id) # Divide data on each node: 80% train, 20% test partition_train_test = partition.train_test_split(test_size=0.2) diff --git a/src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl index 8c67ec43ac07..da0e15b903f8 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl @@ -15,7 +15,7 @@ readme = "README.md" [tool.poetry.dependencies] python = "^3.9" # Mandatory dependencies -flwr = { version = "^1.8.0", extras = ["simulation"] } -flwr-datasets = { version = "^0.0.2", extras = ["vision"] } +flwr-nightly = { version = "1.8.0.dev20240313", extras = ["simulation"] } +flwr-datasets = { version = "0.0.2", extras = ["vision"] } torch = "2.2.1" torchvision = "0.17.1" diff --git a/src/py/flwr/cli/new/templates/app/requirements.pytorch.txt.tpl b/src/py/flwr/cli/new/templates/app/requirements.pytorch.txt.tpl index 016a84043cbe..ddb8a814447b 100644 --- a/src/py/flwr/cli/new/templates/app/requirements.pytorch.txt.tpl +++ b/src/py/flwr/cli/new/templates/app/requirements.pytorch.txt.tpl @@ -1,4 +1,4 @@ -flwr-nightly[simulation]==1.8.0.dev20240309 +flwr-nightly[simulation]==1.8.0.dev20240313 flwr-datasets[vision]==0.0.2 torch==2.2.1 torchvision==0.17.1 From b6a07a9b5e2e6204f372c1bce6ecff54cfa0fefe Mon Sep 17 00:00:00 2001 From: "Daniel J. Beutel" Date: Thu, 14 Mar 2024 08:56:25 +0100 Subject: [PATCH 25/57] Improve torch template (#3145) --- src/py/flwr/cli/new/new.py | 2 +- src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/py/flwr/cli/new/new.py b/src/py/flwr/cli/new/new.py index 4fa6c48c7ed5..7eb47e3e3548 100644 --- a/src/py/flwr/cli/new/new.py +++ b/src/py/flwr/cli/new/new.py @@ -147,7 +147,7 @@ def new( ) print( typer.style( - f" cd {project_name}\n" + " pip install .\n flwr run\n", + f" cd {project_name}\n" + " pip install -e .\n flwr run\n", fg=typer.colors.BRIGHT_CYAN, bold=True, ) diff --git a/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl index a03b1e77e402..4f2b26ceddea 100644 --- a/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl @@ -41,4 +41,6 @@ def client_fn(cid: str): # Flower ClientApp -app = ClientApp(client_fn) +app = ClientApp( + client_fn, +) From a200622b6ac0432ebaf05b4de7011d46769050b8 Mon Sep 17 00:00:00 2001 From: "Daniel J. Beutel" Date: Thu, 14 Mar 2024 09:07:46 +0100 Subject: [PATCH 26/57] Update CODEOWNERS (#3143) --- .github/CODEOWNERS | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 8dac63a20598..34af632814a3 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -9,6 +9,9 @@ README.md @jafermarq @tanertopal @danieljanes # Flower Baselines /baselines @jafermarq @tanertopal @danieljanes +# Flower Datasets +/datasets @jafermarq @tanertopal @danieljanes + # Flower Examples /examples @jafermarq @tanertopal @danieljanes From f2199aa77a165575f66bcd6e5c5190e2fa3de59b Mon Sep 17 00:00:00 2001 From: Adam Narozniak <51029327+adam-narozniak@users.noreply.github.com> Date: Thu, 14 Mar 2024 09:53:50 +0000 Subject: [PATCH 27/57] Fds add concatenate divisions (#3103) Co-authored-by: jafermarq Co-authored-by: Daniel J. Beutel --- datasets/flwr_datasets/federated_dataset.py | 119 +----------------- .../flwr_datasets/federated_dataset_test.py | 44 +------ datasets/flwr_datasets/utils.py | 69 +++++++++- datasets/flwr_datasets/utils_test.py | 63 ++++++++-- 4 files changed, 124 insertions(+), 171 deletions(-) diff --git a/datasets/flwr_datasets/federated_dataset.py b/datasets/flwr_datasets/federated_dataset.py index ed3d03fd1442..55a7e597f6b4 100644 --- a/datasets/flwr_datasets/federated_dataset.py +++ b/datasets/flwr_datasets/federated_dataset.py @@ -15,7 +15,7 @@ """FederatedDataset.""" -from typing import Dict, List, Optional, Tuple, Union, cast +from typing import Dict, Optional, Tuple, Union import datasets from datasets import Dataset, DatasetDict @@ -25,7 +25,6 @@ _check_if_dataset_tested, _instantiate_partitioners, _instantiate_resplitter_if_needed, - divide_dataset, ) @@ -54,19 +53,6 @@ class FederatedDataset: (representing the number of IID partitions that this split should be partitioned into). One or multiple `Partitioner` objects can be specified in that manner, but at most, one per split. - partition_division : Optional[Union[List[float], Tuple[float, ...], - Dict[str, float], Dict[str, Optional[Union[List[float], Tuple[float, ...], - Dict[str, float]]]]]] - Fractions specifing the division of the partition assiciated with certain split - (and partitioner) that enable returning already divided partition from the - `load_partition` method. You can think of this as on-edge division of the data - into multiple divisions (e.g. into train and validation). You can also name the - divisions by using the Dict or create specify it as a List/Tuple. If you - specified a single partitioner you can provide the simplified form e.g. - [0.8, 0.2] or {"partition_train": 0.8, "partition_test": 0.2} but when multiple - partitioners are specified you need to indicate the result of which partitioner - are further divided e.g. {"train": [0.8, 0.2]} would result in dividing only the - partitions that are created from the "train" split. shuffle : bool Whether to randomize the order of samples. Applied prior to resplitting, speratelly to each of the present splits in the dataset. It uses the `seed` @@ -84,14 +70,6 @@ class FederatedDataset: >>> partition = mnist_fds.load_partition(10, "train") >>> # Use test split for centralized evaluation. >>> centralized = mnist_fds.load_split("test") - - Automatically divde the data returned from `load_partition` - >>> mnist_fds = FederatedDataset( - >>> dataset="mnist", - >>> partitioners={"train": 100}, - >>> partition_division=[0.8, 0.2], - >>> ) - >>> partition_train, partition_test = mnist_fds.load_partition(10, "train") """ # pylint: disable=too-many-instance-attributes @@ -102,17 +80,6 @@ def __init__( subset: Optional[str] = None, resplitter: Optional[Union[Resplitter, Dict[str, Tuple[str, ...]]]] = None, partitioners: Dict[str, Union[Partitioner, int]], - partition_division: Optional[ - Union[ - List[float], - Tuple[float, ...], - Dict[str, float], - Dict[ - str, - Optional[Union[List[float], Tuple[float, ...], Dict[str, float]]], - ], - ] - ] = None, shuffle: bool = True, seed: Optional[int] = 42, ) -> None: @@ -125,9 +92,6 @@ def __init__( self._partitioners: Dict[str, Partitioner] = _instantiate_partitioners( partitioners ) - self._partition_division = self._initialize_partition_division( - partition_division - ) self._shuffle = shuffle self._seed = seed # _dataset is prepared lazily on the first call to `load_partition` @@ -140,7 +104,7 @@ def load_partition( self, partition_id: int, split: Optional[str] = None, - ) -> Union[Dataset, List[Dataset], DatasetDict]: + ) -> Dataset: """Load the partition specified by the idx in the selected split. The dataset is downloaded only when the first call to `load_partition` or @@ -160,13 +124,8 @@ def load_partition( Returns ------- - partition : Union[Dataset, List[Dataset], DatasetDict] - Undivided or divided partition from the dataset split. - If `partition_division` is not specified then `Dataset` is returned. - If `partition_division` is specified as `List` or `Tuple` then - `List[Dataset]` is returned. - If `partition_division` is specified as `Dict` then `DatasetDict` is - returned. + partition : Dataset + Single partition from the dataset split. """ if not self._dataset_prepared: self._prepare_dataset() @@ -179,16 +138,7 @@ def load_partition( self._check_if_split_possible_to_federate(split) partitioner: Partitioner = self._partitioners[split] self._assign_dataset_to_partitioner(split) - partition = partitioner.load_partition(partition_id) - if self._partition_division is None: - return partition - partition_division = self._partition_division.get(split) - if partition_division is None: - return partition - divided_partition: Union[List[Dataset], DatasetDict] = divide_dataset( - partition, partition_division - ) - return divided_partition + return partitioner.load_partition(partition_id) def load_split(self, split: str) -> Dataset: """Load the full split of the dataset. @@ -301,62 +251,3 @@ def _check_if_no_split_keyword_possible(self) -> None: "Please set the `split` argument. You can only omit the split keyword " "if there is exactly one partitioner specified." ) - - def _initialize_partition_division( - self, - partition_division: Optional[ - Union[ - List[float], - Tuple[float, ...], - Dict[str, float], - Dict[ - str, - Optional[Union[List[float], Tuple[float, ...], Dict[str, float]]], - ], - ] - ], - ) -> Optional[ - Dict[ - str, - Optional[Union[List[float], Tuple[float, ...], Dict[str, float]]], - ] - ]: - """Create the partition division in the full format. - - Reduced format (possible if only one partitioner exist): - - Union[List[float], Tuple[float, ...], Dict[str, float] - - Full format: Dict[str, Reduced format] - Full format represents the split to division mapping. - """ - # Check for simple dict, list, or tuple types directly - if isinstance(partition_division, (list, tuple)) or ( - isinstance(partition_division, dict) - and all(isinstance(value, float) for value in partition_division.values()) - ): - if len(self._partitioners) > 1: - raise ValueError( - f"The specified partition_division {partition_division} does not " - f"provide mapping to split but more than one partitioners is " - f"specified. Please adjust the partition_division specification to " - f"have the split names as the keys." - ) - return cast( - Dict[ - str, - Optional[Union[List[float], Tuple[float, ...], Dict[str, float]]], - ], - {list(self._partitioners.keys())[0]: partition_division}, - ) - if isinstance(partition_division, dict): - return cast( - Dict[ - str, - Optional[Union[List[float], Tuple[float, ...], Dict[str, float]]], - ], - partition_division, - ) - if partition_division is None: - return None - raise TypeError("Unsupported type for partition_division") diff --git a/datasets/flwr_datasets/federated_dataset_test.py b/datasets/flwr_datasets/federated_dataset_test.py index fb9958a32008..7ca2b44570ca 100644 --- a/datasets/flwr_datasets/federated_dataset_test.py +++ b/datasets/flwr_datasets/federated_dataset_test.py @@ -17,7 +17,7 @@ import unittest -from typing import Dict, List, Optional, Tuple, Union +from typing import Dict, Union from unittest.mock import Mock, patch import pytest @@ -67,48 +67,6 @@ def test_load_partition_size(self, _: str, train_num_partitions: int) -> None: len(dataset_partition0), len(dataset["train"]) // train_num_partitions ) - @parameterized.expand( # type: ignore - [ - ((0.2, 0.8), 2, False), - ({"train": 0.2, "test": 0.8}, 2, False), - ({"train": {"train": 0.2, "test": 0.8}}, 2, True), - # Not full dataset - ([0.2, 0.1], 2, False), - ({"train": 0.2, "test": 0.1}, 2, False), - (None, None, False), - ], - ) - def test_divide_partition_integration_size( - self, - partition_division: Optional[ - Union[ - List[float], - Tuple[float, ...], - Dict[str, float], - Dict[ - str, - Optional[Union[List[float], Tuple[float, ...], Dict[str, float]]], - ], - ] - ], - expected_length: Optional[int], - add_test_partitioner: bool, - ): - """Test is the `partition_division` create correct data.""" - partitioners: Dict[str, Union[Partitioner, int]] = {"train": 10} - if add_test_partitioner: - partitioners[self.test_split] = 10 - dataset_fds = FederatedDataset( - dataset=self.dataset_name, - partitioners=partitioners, - partition_division=partition_division, - ) - partition = dataset_fds.load_partition(0, "train") - if partition_division is None: - self.assertEqual(expected_length, None) - else: - self.assertEqual(len(partition), expected_length) - def test_load_split(self) -> None: """Test if the load_split works with the correct split name.""" dataset_fds = FederatedDataset( diff --git a/datasets/flwr_datasets/utils.py b/datasets/flwr_datasets/utils.py index 38382508035c..33a9fa903b82 100644 --- a/datasets/flwr_datasets/utils.py +++ b/datasets/flwr_datasets/utils.py @@ -18,7 +18,7 @@ import warnings from typing import Dict, List, Optional, Tuple, Union, cast -from datasets import Dataset, DatasetDict +from datasets import Dataset, DatasetDict, concatenate_datasets from flwr_datasets.partitioner import IidPartitioner, Partitioner from flwr_datasets.resplitter import Resplitter from flwr_datasets.resplitter.merge_resplitter import MergeResplitter @@ -239,3 +239,70 @@ def _check_division_config_correctness( ) -> None: _check_division_config_types_correctness(division) _check_division_config_values_correctness(division) + + +def concatenate_divisions( + partitioner: Partitioner, + partition_division: Union[List[float], Tuple[float, ...], Dict[str, float]], + division_id: Union[int, str], +) -> Dataset: + """Create a dataset by concatenation of all partitions in the same division. + + The divisions are created based on the `partition_division` and accessed based + on the `division_id`. It can be used to create e.g. centralized dataset from + federated on-edge test sets. + + Parameters + ---------- + partitioner : Partitioner + Partitioner object with assigned dataset. + partition_division : Union[List[float], Tuple[float, ...], Dict[str, float]] + Fractions specifying the division of the partitions of a `partitioner`. You can + think of this as on-edge division of the data into multiple divisions + (e.g. into train and validation). E.g. [0.8, 0.2] or + {"partition_train": 0.8, "partition_test": 0.2}. + division_id : Union[int, str] + The way to access the division (from a List or DatasetDict). If your + `partition_division` is specified as a list, then `division_id` represents an + index to an element in that list. If `partition_division` is passed as a + `Dict`, then `division_id` is a key of such dictionary. + + Returns + ------- + concatenated_divisions : Dataset + A dataset created as concatenation of the divisions from all partitions. + """ + divisions = [] + zero_len_divisions = 0 + for partition_id in range(partitioner.num_partitions): + partition = partitioner.load_partition(partition_id) + if isinstance(partition_division, (list, tuple)): + if not isinstance(division_id, int): + raise TypeError( + "The `division_id` needs to be an int in case of " + "`partition_division` specification as List." + ) + partition = divide_dataset(partition, partition_division) + division = partition[division_id] + elif isinstance(partition_division, Dict): + partition = divide_dataset(partition, partition_division) + division = partition[division_id] + else: + raise TypeError( + "The type of partition needs to be List of DatasetDict in this " + "context." + ) + if len(division) == 0: + zero_len_divisions += 1 + divisions.append(division) + + if zero_len_divisions == partitioner.num_partitions: + raise ValueError( + "The concatenated dataset is of length 0. Please change the " + "`partition_division` parameter to change this behavior." + ) + if zero_len_divisions != 0: + warnings.warn( + f"{zero_len_divisions} division(s) have length zero.", stacklevel=1 + ) + return concatenate_datasets(divisions) diff --git a/datasets/flwr_datasets/utils_test.py b/datasets/flwr_datasets/utils_test.py index 26f24519eb76..3bf5afddf978 100644 --- a/datasets/flwr_datasets/utils_test.py +++ b/datasets/flwr_datasets/utils_test.py @@ -19,29 +19,34 @@ from parameterized import parameterized_class from datasets import Dataset, DatasetDict -from flwr_datasets.utils import divide_dataset +from flwr_datasets.partitioner import IidPartitioner +from flwr_datasets.utils import concatenate_divisions, divide_dataset @parameterized_class( ( - "divide", + "partition_division", "sizes", + "division_id", + "expected_concatenation_size", ), [ - ((0.2, 0.8), [8, 32]), - ([0.2, 0.8], [8, 32]), - ({"train": 0.2, "test": 0.8}, [8, 32]), + ((0.8, 0.2), [32, 8], 1, 8), + ([0.8, 0.2], [32, 8], 1, 8), + ({"train": 0.8, "test": 0.2}, [32, 8], "test", 8), # Not full dataset - ([0.2, 0.1], [8, 4]), - ((0.2, 0.1), [8, 4]), - ({"train": 0.2, "test": 0.1}, [8, 4]), + ([0.2, 0.1], [8, 4], 1, 4), + ((0.2, 0.1), [8, 4], 0, 8), + ({"train": 0.2, "test": 0.1}, [8, 4], "test", 4), ], ) class UtilsTests(unittest.TestCase): - """Utils tests.""" + """Utils for tests.""" - divide: Union[List[float], Tuple[float, ...], Dict[str, float]] + partition_division: Union[List[float], Tuple[float, ...], Dict[str, float]] sizes: Tuple[int] + division_id: Union[int, str] + expected_concatenation_size: int def setUp(self) -> None: """Set up a dataset.""" @@ -49,7 +54,7 @@ def setUp(self) -> None: def test_correct_sizes(self) -> None: """Test correct size of the division.""" - divided_dataset = divide_dataset(self.dataset, self.divide) + divided_dataset = divide_dataset(self.dataset, self.partition_division) if isinstance(divided_dataset, (list, tuple)): lengths = [len(split) for split in divided_dataset] else: @@ -59,12 +64,44 @@ def test_correct_sizes(self) -> None: def test_correct_return_types(self) -> None: """Test correct types of the divided dataset based on the config.""" - divided_dataset = divide_dataset(self.dataset, self.divide) - if isinstance(self.divide, (list, tuple)): + divided_dataset = divide_dataset(self.dataset, self.partition_division) + if isinstance(self.partition_division, (list, tuple)): self.assertIsInstance(divided_dataset, list) else: self.assertIsInstance(divided_dataset, DatasetDict) + def test_concatenate_divisions( + self, + ) -> None: + """Test if the length of the divisions match the concatenated dataset.""" + num_partitions = 4 + partitioner = IidPartitioner(num_partitions=num_partitions) + partitioner.dataset = self.dataset + centralized_from_federated_test = concatenate_divisions( + partitioner, + self.partition_division, + self.division_id, + ) + + self.assertEqual( + len(centralized_from_federated_test), self.expected_concatenation_size + ) + + +class TestIncorrectUtilsUsage(unittest.TestCase): + """Test incorrect utils usage.""" + + def test_all_divisions_to_concat_size_zero(self) -> None: + """Test raises when all divisions for concatenations are zero.""" + num_partitions = 4 + partitioner = IidPartitioner(num_partitions=num_partitions) + partitioner.dataset = Dataset.from_dict({"data": range(40)}) + division_id = 1 + partition_division = [0.8, 0.0] + + with self.assertRaises(ValueError): + _ = concatenate_divisions(partitioner, partition_division, division_id) + if __name__ == "__main__": unittest.main() From ca237a35e9ba7be4c1a903f395ed2a37af476ffc Mon Sep 17 00:00:00 2001 From: Adam Narozniak <51029327+adam-narozniak@users.noreply.github.com> Date: Thu, 14 Mar 2024 10:33:53 +0000 Subject: [PATCH 28/57] Bump up version, set status to stable Flower Datasets (#3141) --- datasets/pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/datasets/pyproject.toml b/datasets/pyproject.toml index 0943814ed611..5800faf3f272 100644 --- a/datasets/pyproject.toml +++ b/datasets/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "flwr-datasets" -version = "0.0.2" +version = "0.1.0" description = "Flower Datasets" license = "Apache-2.0" authors = ["The Flower Authors "] @@ -22,7 +22,7 @@ keywords = [ "dataset", ] classifiers = [ - "Development Status :: 3 - Alpha", + "Development Status :: 4 - Beta", "Intended Audience :: Developers", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", From 4c2b2e24f491dd2acea6ee7a4d2796ebad276485 Mon Sep 17 00:00:00 2001 From: Adam Narozniak <51029327+adam-narozniak@users.noreply.github.com> Date: Thu, 14 Mar 2024 11:23:30 +0000 Subject: [PATCH 29/57] Fix federated dataset docs (#3138) --- datasets/flwr_datasets/partitioner/shard_partitioner.py | 5 ++++- datasets/flwr_datasets/utils.py | 2 ++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/datasets/flwr_datasets/partitioner/shard_partitioner.py b/datasets/flwr_datasets/partitioner/shard_partitioner.py index a973f7e5bcb9..31eac2309fff 100644 --- a/datasets/flwr_datasets/partitioner/shard_partitioner.py +++ b/datasets/flwr_datasets/partitioner/shard_partitioner.py @@ -90,6 +90,7 @@ class ShardPartitioner(Partitioner): # pylint: disable=R0902 -------- 1) If you need same number of shards per partitions + the same shard size (and you know both of these values) + >>> from flwr_datasets import FederatedDataset >>> from flwr_datasets.partitioner import ShardPartitioner >>> @@ -108,6 +109,7 @@ class ShardPartitioner(Partitioner): # pylint: disable=R0902 2) If you want to use nearly all the data and do not need to have the number of shard per each partition to be the same + >>> from flwr_datasets import FederatedDataset >>> from flwr_datasets.partitioner import ShardPartitioner >>> @@ -121,7 +123,8 @@ class ShardPartitioner(Partitioner): # pylint: disable=R0902 [7000, 7000, 7000, 7000, 7000, 7000, 6000, 6000, 6000] 3) If you want to use all the data - >>> from flwr_datasets import FederatedDataset + + >>> from flwr_datasets import FederatedDataset >>> from flwr_datasets.partitioner import ShardPartitioner >>> >>> partitioner = ShardPartitioner(num_partitions=10, partition_by="label", diff --git a/datasets/flwr_datasets/utils.py b/datasets/flwr_datasets/utils.py index 33a9fa903b82..a6e4fa8d0f0b 100644 --- a/datasets/flwr_datasets/utils.py +++ b/datasets/flwr_datasets/utils.py @@ -113,6 +113,7 @@ def divide_dataset( Examples -------- Use `divide_dataset` with division specified as a list. + >>> from flwr_datasets import FederatedDataset >>> from flwr_datasets.utils import divide_dataset >>> @@ -122,6 +123,7 @@ def divide_dataset( >>> train, test = divide_dataset(dataset=partition, division=division) Use `divide_dataset` with division specified as a dict. + >>> from flwr_datasets import FederatedDataset >>> from flwr_datasets.utils import divide_dataset >>> From a2da8f0186b82ce26742487ee31232bd470ef102 Mon Sep 17 00:00:00 2001 From: mohammadnaseri Date: Thu, 14 Mar 2024 14:03:17 +0000 Subject: [PATCH 30/57] Fix minor DP docs issue (#3147) --- src/py/flwr/server/strategy/dp_adaptive_clipping.py | 2 +- src/py/flwr/server/strategy/dp_fixed_clipping.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/py/flwr/server/strategy/dp_adaptive_clipping.py b/src/py/flwr/server/strategy/dp_adaptive_clipping.py index c3c3761f9dc7..1acfd4613a0a 100644 --- a/src/py/flwr/server/strategy/dp_adaptive_clipping.py +++ b/src/py/flwr/server/strategy/dp_adaptive_clipping.py @@ -296,7 +296,7 @@ class DifferentialPrivacyClientSideAdaptiveClipping(Strategy): Wrap the strategy with the `DifferentialPrivacyClientSideAdaptiveClipping` wrapper: - >>> DifferentialPrivacyClientSideAdaptiveClipping( + >>> dp_strategy = DifferentialPrivacyClientSideAdaptiveClipping( >>> strategy, cfg.noise_multiplier, cfg.num_sampled_clients >>> ) diff --git a/src/py/flwr/server/strategy/dp_fixed_clipping.py b/src/py/flwr/server/strategy/dp_fixed_clipping.py index c670c26e4977..61e8123e28d7 100644 --- a/src/py/flwr/server/strategy/dp_fixed_clipping.py +++ b/src/py/flwr/server/strategy/dp_fixed_clipping.py @@ -234,7 +234,7 @@ class DifferentialPrivacyClientSideFixedClipping(Strategy): Wrap the strategy with the `DifferentialPrivacyClientSideFixedClipping` wrapper: - >>> DifferentialPrivacyClientSideFixedClipping( + >>> dp_strategy = DifferentialPrivacyClientSideFixedClipping( >>> strategy, cfg.noise_multiplier, cfg.clipping_norm, cfg.num_sampled_clients >>> ) From d391fd6d9826891fe8450a6db88b9a705f60194c Mon Sep 17 00:00:00 2001 From: Charles Beauville Date: Thu, 14 Mar 2024 16:21:09 +0000 Subject: [PATCH 31/57] Remove inconsistent type hints (#3148) --- .../flwr/cli/new/templates/app/code/client.pytorch.py.tpl | 4 ++-- src/py/flwr/cli/new/templates/app/code/task.pytorch.py.tpl | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl index 4f2b26ceddea..7137a7791683 100644 --- a/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl @@ -15,7 +15,7 @@ from $project_name.task import ( # Define Flower Client and client_fn class FlowerClient(NumPyClient): - def __init__(self, net, trainloader, valloader) -> None: + def __init__(self, net, trainloader, valloader): self.net = net self.trainloader = trainloader self.valloader = valloader @@ -31,7 +31,7 @@ class FlowerClient(NumPyClient): return loss, len(self.valloader.dataset), {"accuracy": accuracy} -def client_fn(cid: str): +def client_fn(cid): # Load model and data net = Net().to(DEVICE) trainloader, valloader = load_data(int(cid), 2) diff --git a/src/py/flwr/cli/new/templates/app/code/task.pytorch.py.tpl b/src/py/flwr/cli/new/templates/app/code/task.pytorch.py.tpl index 82e57388fa3e..85460564b6ef 100644 --- a/src/py/flwr/cli/new/templates/app/code/task.pytorch.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/task.pytorch.py.tpl @@ -16,7 +16,7 @@ DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") class Net(nn.Module): """Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz')""" - def __init__(self) -> None: + def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) @@ -25,7 +25,7 @@ class Net(nn.Module): self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) - def forward(self, x: torch.Tensor) -> torch.Tensor: + def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) @@ -34,7 +34,7 @@ class Net(nn.Module): return self.fc3(x) -def load_data(partition_id: int, num_partitions: int): +def load_data(partition_id, num_partitions): """Load partition CIFAR10 data.""" fds = FederatedDataset(dataset="cifar10", partitioners={"train": num_partitions}) partition = fds.load_partition(partition_id) From ce145d08f3f79f5181bf1b11baf0b811d9f87cb1 Mon Sep 17 00:00:00 2001 From: mohammadnaseri Date: Fri, 15 Mar 2024 00:50:41 +0000 Subject: [PATCH 32/57] Add dp secagg demo example (#3134) Co-authored-by: Heng Pan Co-authored-by: Daniel J. Beutel --- examples/fl-dp-sa/README.md | 22 +++++ examples/fl-dp-sa/fl_dp_sa/__init__.py | 1 + examples/fl-dp-sa/fl_dp_sa/client.py | 43 ++++++++++ examples/fl-dp-sa/fl_dp_sa/server.py | 77 +++++++++++++++++ examples/fl-dp-sa/fl_dp_sa/task.py | 110 +++++++++++++++++++++++++ examples/fl-dp-sa/flower.toml | 13 +++ examples/fl-dp-sa/pyproject.toml | 21 +++++ examples/fl-dp-sa/requirements.txt | 4 + 8 files changed, 291 insertions(+) create mode 100644 examples/fl-dp-sa/README.md create mode 100644 examples/fl-dp-sa/fl_dp_sa/__init__.py create mode 100644 examples/fl-dp-sa/fl_dp_sa/client.py create mode 100644 examples/fl-dp-sa/fl_dp_sa/server.py create mode 100644 examples/fl-dp-sa/fl_dp_sa/task.py create mode 100644 examples/fl-dp-sa/flower.toml create mode 100644 examples/fl-dp-sa/pyproject.toml create mode 100644 examples/fl-dp-sa/requirements.txt diff --git a/examples/fl-dp-sa/README.md b/examples/fl-dp-sa/README.md new file mode 100644 index 000000000000..99a0a7e50980 --- /dev/null +++ b/examples/fl-dp-sa/README.md @@ -0,0 +1,22 @@ +# fl_dp_sa + +This is a simple example that utilizes central differential privacy with client-side fixed clipping and secure aggregation. +Note: This example is designed for a small number of rounds and is intended for demonstration purposes. + +## Install dependencies + +```bash +# Using pip +pip install . + +# Or using Poetry +poetry install +``` + +## Run + +The example uses the CIFAR-10 dataset with a total of 100 clients, with 20 clients sampled in each round. The hyperparameters for DP and SecAgg are specified in `server.py`. + +```shell +flower-simulation --server-app fl_dp_sa.server:app --client-app fl_dp_sa.client:app --num-supernodes 100 +``` diff --git a/examples/fl-dp-sa/fl_dp_sa/__init__.py b/examples/fl-dp-sa/fl_dp_sa/__init__.py new file mode 100644 index 000000000000..741260348ab8 --- /dev/null +++ b/examples/fl-dp-sa/fl_dp_sa/__init__.py @@ -0,0 +1 @@ +"""fl_dp_sa: A Flower / PyTorch app.""" diff --git a/examples/fl-dp-sa/fl_dp_sa/client.py b/examples/fl-dp-sa/fl_dp_sa/client.py new file mode 100644 index 000000000000..104264158833 --- /dev/null +++ b/examples/fl-dp-sa/fl_dp_sa/client.py @@ -0,0 +1,43 @@ +"""fl_dp_sa: A Flower / PyTorch app.""" + +from flwr.client import ClientApp, NumPyClient +from flwr.client.mod import fixedclipping_mod, secaggplus_mod + +from fl_dp_sa.task import DEVICE, Net, get_weights, load_data, set_weights, test, train + + +# Load model and data (simple CNN, CIFAR-10) +net = Net().to(DEVICE) + + +# Define FlowerClient and client_fn +class FlowerClient(NumPyClient): + def __init__(self, trainloader, testloader) -> None: + self.trainloader = trainloader + self.testloader = testloader + + def fit(self, parameters, config): + set_weights(net, parameters) + results = train(net, self.trainloader, self.testloader, epochs=1, device=DEVICE) + return get_weights(net), len(self.trainloader.dataset), results + + def evaluate(self, parameters, config): + set_weights(net, parameters) + loss, accuracy = test(net, self.testloader) + return loss, len(self.testloader.dataset), {"accuracy": accuracy} + + +def client_fn(cid: str): + """Create and return an instance of Flower `Client`.""" + trainloader, testloader = load_data(partition_id=int(cid)) + return FlowerClient(trainloader, testloader).to_client() + + +# Flower ClientApp +app = ClientApp( + client_fn=client_fn, + mods=[ + secaggplus_mod, + fixedclipping_mod, + ], +) diff --git a/examples/fl-dp-sa/fl_dp_sa/server.py b/examples/fl-dp-sa/fl_dp_sa/server.py new file mode 100644 index 000000000000..f7da75997e98 --- /dev/null +++ b/examples/fl-dp-sa/fl_dp_sa/server.py @@ -0,0 +1,77 @@ +"""fl_dp_sa: A Flower / PyTorch app.""" + +from typing import List, Tuple + +from flwr.server import Driver, LegacyContext, ServerApp, ServerConfig +from flwr.common import Context, Metrics, ndarrays_to_parameters +from flwr.server.strategy import ( + DifferentialPrivacyClientSideFixedClipping, + FedAvg, +) +from flwr.server.workflow import DefaultWorkflow, SecAggPlusWorkflow + +from fl_dp_sa.task import Net, get_weights + + +# Define metric aggregation function +def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: + examples = [num_examples for num_examples, _ in metrics] + + # Multiply accuracy of each client by number of examples used + train_losses = [num_examples * m["train_loss"] for num_examples, m in metrics] + train_accuracies = [ + num_examples * m["train_accuracy"] for num_examples, m in metrics + ] + val_losses = [num_examples * m["val_loss"] for num_examples, m in metrics] + val_accuracies = [num_examples * m["val_accuracy"] for num_examples, m in metrics] + + # Aggregate and return custom metric (weighted average) + return { + "train_loss": sum(train_losses) / sum(examples), + "train_accuracy": sum(train_accuracies) / sum(examples), + "val_loss": sum(val_losses) / sum(examples), + "val_accuracy": sum(val_accuracies) / sum(examples), + } + + +# Initialize model parameters +ndarrays = get_weights(Net()) +parameters = ndarrays_to_parameters(ndarrays) + + +# Define strategy +strategy = FedAvg( + fraction_fit=0.2, + fraction_evaluate=0.0, # Disable evaluation for demo purpose + min_fit_clients=20, + min_available_clients=20, + fit_metrics_aggregation_fn=weighted_average, + initial_parameters=parameters, +) +strategy = DifferentialPrivacyClientSideFixedClipping( + strategy, noise_multiplier=0.2, clipping_norm=10, num_sampled_clients=20 +) + + +app = ServerApp() + + +@app.main() +def main(driver: Driver, context: Context) -> None: + # Construct the LegacyContext + context = LegacyContext( + state=context.state, + config=ServerConfig(num_rounds=3), + strategy=strategy, + ) + + # Create the train/evaluate workflow + workflow = DefaultWorkflow( + fit_workflow=SecAggPlusWorkflow( + num_shares=7, + reconstruction_threshold=4, + ) + ) + + # Execute + workflow(driver, context) diff --git a/examples/fl-dp-sa/fl_dp_sa/task.py b/examples/fl-dp-sa/fl_dp_sa/task.py new file mode 100644 index 000000000000..3d506263d5a3 --- /dev/null +++ b/examples/fl-dp-sa/fl_dp_sa/task.py @@ -0,0 +1,110 @@ +"""fl_dp_sa: A Flower / PyTorch app.""" + +from collections import OrderedDict +from logging import INFO +from flwr_datasets import FederatedDataset + +import torch +import torch.nn as nn +import torch.nn.functional as F +from flwr.common.logger import log +from torch.utils.data import DataLoader +from torchvision.transforms import Compose, Normalize, ToTensor + + +DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + +class Net(nn.Module): + """Model.""" + + def __init__(self) -> None: + super(Net, self).__init__() + self.conv1 = nn.Conv2d(1, 6, 3, padding=1) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + batch_size = x.size(0) + x = self.pool(F.relu(self.conv1(x))) + x = self.pool(F.relu(self.conv2(x))) + x = x.view(batch_size, -1) + x = F.relu(self.fc1(x)) + x = F.relu(self.fc2(x)) + return self.fc3(x) + + +def load_data(partition_id): + """Load partition CIFAR10 data.""" + fds = FederatedDataset(dataset="mnist", partitioners={"train": 100}) + partition = fds.load_partition(partition_id) + # Divide data on each node: 80% train, 20% test + partition_train_test = partition.train_test_split(test_size=0.2) + pytorch_transforms = Compose([ToTensor(), Normalize((0.5,), (0.5,))]) + + def apply_transforms(batch): + """Apply transforms to the partition from FederatedDataset.""" + batch["image"] = [pytorch_transforms(img) for img in batch["image"]] + return batch + + partition_train_test = partition_train_test.with_transform(apply_transforms) + trainloader = DataLoader(partition_train_test["train"], batch_size=32, shuffle=True) + testloader = DataLoader(partition_train_test["test"], batch_size=32) + return trainloader, testloader + + +def train(net, trainloader, valloader, epochs, device): + """Train the model on the training set.""" + net.to(device) # move model to GPU if available + criterion = torch.nn.CrossEntropyLoss().to(device) + optimizer = torch.optim.Adam(net.parameters()) + net.train() + for _ in range(epochs): + for batch in trainloader: + images = batch["image"].to(device) + labels = batch["label"].to(device) + optimizer.zero_grad() + loss = criterion(net(images), labels) + loss.backward() + optimizer.step() + + train_loss, train_acc = test(net, trainloader) + val_loss, val_acc = test(net, valloader) + + results = { + "train_loss": train_loss, + "train_accuracy": train_acc, + "val_loss": val_loss, + "val_accuracy": val_acc, + } + return results + + +def test(net, testloader): + """Validate the model on the test set.""" + net.to(DEVICE) + criterion = torch.nn.CrossEntropyLoss() + correct, loss = 0, 0.0 + with torch.no_grad(): + for batch in testloader: + images = batch["image"].to(DEVICE) + labels = batch["label"].to(DEVICE) + outputs = net(images.to(DEVICE)) + labels = labels.to(DEVICE) + loss += criterion(outputs, labels).item() + correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() + accuracy = correct / len(testloader.dataset) + return loss, accuracy + + +def get_weights(net): + return [val.cpu().numpy() for _, val in net.state_dict().items()] + + +def set_weights(net, parameters): + params_dict = zip(net.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + net.load_state_dict(state_dict, strict=True) diff --git a/examples/fl-dp-sa/flower.toml b/examples/fl-dp-sa/flower.toml new file mode 100644 index 000000000000..ea2e98206791 --- /dev/null +++ b/examples/fl-dp-sa/flower.toml @@ -0,0 +1,13 @@ +[project] +name = "fl_dp_sa" +version = "1.0.0" +description = "" +license = "Apache-2.0" +authors = [ + "The Flower Authors ", +] +readme = "README.md" + +[flower.components] +serverapp = "fl_dp_sa.server:app" +clientapp = "fl_dp_sa.client:app" diff --git a/examples/fl-dp-sa/pyproject.toml b/examples/fl-dp-sa/pyproject.toml new file mode 100644 index 000000000000..d30fa4675e34 --- /dev/null +++ b/examples/fl-dp-sa/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["poetry-core>=1.4.0"] +build-backend = "poetry.core.masonry.api" + +[tool.poetry] +name = "fl-dp-sa" +version = "0.1.0" +description = "" +license = "Apache-2.0" +authors = [ + "The Flower Authors ", +] +readme = "README.md" + +[tool.poetry.dependencies] +python = "^3.9" +# Mandatory dependencies +flwr-nightly = { version = "1.8.0.dev20240313", extras = ["simulation"] } +flwr-datasets = { version = "0.0.2", extras = ["vision"] } +torch = "2.2.1" +torchvision = "0.17.1" diff --git a/examples/fl-dp-sa/requirements.txt b/examples/fl-dp-sa/requirements.txt new file mode 100644 index 000000000000..ddb8a814447b --- /dev/null +++ b/examples/fl-dp-sa/requirements.txt @@ -0,0 +1,4 @@ +flwr-nightly[simulation]==1.8.0.dev20240313 +flwr-datasets[vision]==0.0.2 +torch==2.2.1 +torchvision==0.17.1 From 4041299b4d75eb478a65e71ac21fe7e3f77ec2fb Mon Sep 17 00:00:00 2001 From: Yan Gao Date: Wed, 20 Mar 2024 15:39:59 +0000 Subject: [PATCH 33/57] Update LLM FlowerTune example readme (#3162) Co-authored-by: jafermarq --- examples/llm-flowertune/README.md | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/examples/llm-flowertune/README.md b/examples/llm-flowertune/README.md index 60e183d2a9c0..4f98072f8c7f 100644 --- a/examples/llm-flowertune/README.md +++ b/examples/llm-flowertune/README.md @@ -1,16 +1,14 @@ -# Federated Large Language Model (LLM) Fine-tuning with Flower +# LLM FlowerTune: Federated LLM Fine-tuning with Flower Large language models (LLMs), which have been trained on vast amounts of publicly accessible data, have shown remarkable effectiveness in a wide range of areas. However, despite the fact that more data typically leads to improved performance, there is a concerning prospect that the supply of high-quality public data will deplete within a few years. Federated LLM training could unlock access to an endless pool of distributed private data by allowing multiple data owners to collaboratively train a shared model without the need to exchange raw data. This introductory example conducts federated instruction tuning with pretrained [LLama2](https://huggingface.co/openlm-research) models on [Alpaca-GPT4](https://huggingface.co/datasets/vicgalle/alpaca-gpt4) dataset. -We use [Flower Datasets](https://flower.dev/docs/datasets/) to download, partition and preprocess the dataset. -The fine-tuning is done using the [🤗PEFT](https://huggingface.co/docs/peft/en/index) library. -We use Flower's Simulation Engine to simulate the LLM fine-tuning process in federated way, +We implement LLM FlowerTune by integrating a bundle of techniques: 1) We use [Flower Datasets](https://flower.dev/docs/datasets/) to download, partition and preprocess the dataset. 2) The fine-tuning is done using the [🤗PEFT](https://huggingface.co/docs/peft/en/index) library. 3) We use Flower's Simulation Engine to simulate the LLM fine-tuning process in federated way, which allows users to perform the training on a single GPU. -## Environments Setup +## Environment Setup Start by cloning the code example. We prepared a single-line command that you can copy into your shell which will checkout the example for you: From ae70af54daa0f2d8122be72f460ac701f24bcab6 Mon Sep 17 00:00:00 2001 From: Danny Date: Sat, 23 Mar 2024 11:45:24 +0100 Subject: [PATCH 34/57] Update emails/domains to flower.ai (#3167) Signed-off-by: Danny Heinrich --- .github/CODE_OF_CONDUCT.md | 2 +- doc/locales/fr/LC_MESSAGES/framework-docs.po | 2 +- examples/quickstart-monai/pyproject.toml | 2 +- src/kotlin/gradle.properties | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md index 0a8c39f4e08f..af8d6265e6b3 100644 --- a/.github/CODE_OF_CONDUCT.md +++ b/.github/CODE_OF_CONDUCT.md @@ -55,7 +55,7 @@ further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at contact@adap.com. All +reported by contacting the project team at hello@flower.ai. All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. diff --git a/doc/locales/fr/LC_MESSAGES/framework-docs.po b/doc/locales/fr/LC_MESSAGES/framework-docs.po index e7c7783c48ff..ebab3cc47e85 100644 --- a/doc/locales/fr/LC_MESSAGES/framework-docs.po +++ b/doc/locales/fr/LC_MESSAGES/framework-docs.po @@ -5,7 +5,7 @@ msgstr "" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2024-02-13 11:23+0100\n" "PO-Revision-Date: 2023-09-05 17:54+0000\n" -"Last-Translator: Charles Beauville \n" +"Last-Translator: Charles Beauville \n" "Language: fr\n" "Language-Team: French \n" diff --git a/examples/quickstart-monai/pyproject.toml b/examples/quickstart-monai/pyproject.toml index 66a56ee2270b..b1713f05f2ef 100644 --- a/examples/quickstart-monai/pyproject.toml +++ b/examples/quickstart-monai/pyproject.toml @@ -6,7 +6,7 @@ build-backend = "poetry.core.masonry.api" name = "quickstart-monai" version = "0.1.0" description = "MONAI Federated Learning Quickstart with Flower" -authors = ["The Flower Authors "] +authors = ["The Flower Authors "] [tool.poetry.dependencies] python = ">=3.8,<3.11" diff --git a/src/kotlin/gradle.properties b/src/kotlin/gradle.properties index 151a5a4112bb..c792dc1c822b 100644 --- a/src/kotlin/gradle.properties +++ b/src/kotlin/gradle.properties @@ -42,6 +42,6 @@ POM_SCM_URL=https://github.com/adap/flower/ POM_SCM_CONNECTION=scm:git:git://github.com/adap/flower.git POM_SCM_DEV_CONNECTION=scm:git:ssh://git@github.com/adap/flower.git -POM_DEVELOPER_ID=flower.dev +POM_DEVELOPER_ID=flower.ai POM_DEVELOPER_NAME=The Flower Authors POM_DEVELOPER_URL=https://github.com/adap/ From 538bc9fd2137560f7bdc48c3e81f6470669be03e Mon Sep 17 00:00:00 2001 From: Charles Beauville Date: Sun, 24 Mar 2024 16:36:28 -0400 Subject: [PATCH 35/57] Remove legacy `flwr_examples` (#3152) --- doc/locales/fr/LC_MESSAGES/framework-docs.po | 6510 ++++-- .../pt_BR/LC_MESSAGES/framework-docs.po | 16665 +++++++++------- .../zh_Hans/LC_MESSAGES/framework-docs.po | 5873 ++++-- .../example-walkthrough-pytorch-mnist.rst | 453 - doc/source/index.rst | 1 - doc/source/ref-example-projects.rst | 106 - pyproject.toml | 1 - src/py/flwr_example/__init__.py | 27 - src/py/flwr_example/pytorch_cifar/__init__.py | 18 - src/py/flwr_example/pytorch_cifar/cifar.py | 153 - .../flwr_example/pytorch_cifar/cifar_test.py | 76 - src/py/flwr_example/pytorch_cifar/client.py | 148 - .../flwr_example/pytorch_cifar/run-clients.sh | 31 - .../flwr_example/pytorch_cifar/run-server.sh | 23 - src/py/flwr_example/pytorch_cifar/server.py | 121 - .../flwr_example/pytorch_imagenet/__init__.py | 18 - .../flwr_example/pytorch_imagenet/client.py | 206 - .../flwr_example/pytorch_imagenet/imagenet.py | 169 - .../pytorch_imagenet/run-clients.sh | 33 - .../pytorch_imagenet/run-server.sh | 26 - .../flwr_example/pytorch_imagenet/server.py | 134 - .../pytorch_save_weights/__init__.py | 15 - .../pytorch_save_weights/cifar.py | 155 - .../pytorch_save_weights/client.py | 98 - .../pytorch_save_weights/run-clients.sh | 28 - .../pytorch_save_weights/run-server.sh | 19 - .../pytorch_save_weights/server.py | 101 - .../quickstart_pytorch/__init__.py | 7 - .../flwr_example/quickstart_pytorch/client.py | 97 - .../flwr_example/quickstart_pytorch/mnist.py | 436 - .../quickstart_pytorch/run-clients.sh | 32 - .../quickstart_pytorch/run-server.sh | 19 - .../flwr_example/quickstart_pytorch/server.py | 20 - .../quickstart_tensorflow/__init__.py | 7 - .../quickstart_tensorflow/client.py | 55 - .../quickstart_tensorflow/run-clients.sh | 29 - .../quickstart_tensorflow/run-server.sh | 19 - .../quickstart_tensorflow/server.py | 4 - .../tensorflow_fashion_mnist/__init__.py | 18 - .../tensorflow_fashion_mnist/client.py | 116 - .../tensorflow_fashion_mnist/download.py | 34 - .../tensorflow_fashion_mnist/fashion_mnist.py | 124 - .../fashion_mnist_test.py | 44 - .../tensorflow_fashion_mnist/run-clients.sh | 33 - .../tensorflow_fashion_mnist/run-server.sh | 23 - .../tensorflow_fashion_mnist/server.py | 115 - 46 files changed, 18746 insertions(+), 13694 deletions(-) delete mode 100644 doc/source/example-walkthrough-pytorch-mnist.rst delete mode 100644 src/py/flwr_example/__init__.py delete mode 100644 src/py/flwr_example/pytorch_cifar/__init__.py delete mode 100644 src/py/flwr_example/pytorch_cifar/cifar.py delete mode 100644 src/py/flwr_example/pytorch_cifar/cifar_test.py delete mode 100644 src/py/flwr_example/pytorch_cifar/client.py delete mode 100755 src/py/flwr_example/pytorch_cifar/run-clients.sh delete mode 100755 src/py/flwr_example/pytorch_cifar/run-server.sh delete mode 100644 src/py/flwr_example/pytorch_cifar/server.py delete mode 100644 src/py/flwr_example/pytorch_imagenet/__init__.py delete mode 100644 src/py/flwr_example/pytorch_imagenet/client.py delete mode 100644 src/py/flwr_example/pytorch_imagenet/imagenet.py delete mode 100755 src/py/flwr_example/pytorch_imagenet/run-clients.sh delete mode 100755 src/py/flwr_example/pytorch_imagenet/run-server.sh delete mode 100644 src/py/flwr_example/pytorch_imagenet/server.py delete mode 100644 src/py/flwr_example/pytorch_save_weights/__init__.py delete mode 100644 src/py/flwr_example/pytorch_save_weights/cifar.py delete mode 100644 src/py/flwr_example/pytorch_save_weights/client.py delete mode 100755 src/py/flwr_example/pytorch_save_weights/run-clients.sh delete mode 100755 src/py/flwr_example/pytorch_save_weights/run-server.sh delete mode 100644 src/py/flwr_example/pytorch_save_weights/server.py delete mode 100644 src/py/flwr_example/quickstart_pytorch/__init__.py delete mode 100644 src/py/flwr_example/quickstart_pytorch/client.py delete mode 100644 src/py/flwr_example/quickstart_pytorch/mnist.py delete mode 100755 src/py/flwr_example/quickstart_pytorch/run-clients.sh delete mode 100755 src/py/flwr_example/quickstart_pytorch/run-server.sh delete mode 100644 src/py/flwr_example/quickstart_pytorch/server.py delete mode 100644 src/py/flwr_example/quickstart_tensorflow/__init__.py delete mode 100644 src/py/flwr_example/quickstart_tensorflow/client.py delete mode 100755 src/py/flwr_example/quickstart_tensorflow/run-clients.sh delete mode 100755 src/py/flwr_example/quickstart_tensorflow/run-server.sh delete mode 100644 src/py/flwr_example/quickstart_tensorflow/server.py delete mode 100644 src/py/flwr_example/tensorflow_fashion_mnist/__init__.py delete mode 100644 src/py/flwr_example/tensorflow_fashion_mnist/client.py delete mode 100644 src/py/flwr_example/tensorflow_fashion_mnist/download.py delete mode 100644 src/py/flwr_example/tensorflow_fashion_mnist/fashion_mnist.py delete mode 100644 src/py/flwr_example/tensorflow_fashion_mnist/fashion_mnist_test.py delete mode 100755 src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh delete mode 100755 src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh delete mode 100644 src/py/flwr_example/tensorflow_fashion_mnist/server.py diff --git a/doc/locales/fr/LC_MESSAGES/framework-docs.po b/doc/locales/fr/LC_MESSAGES/framework-docs.po index ebab3cc47e85..80222e5409d2 100644 --- a/doc/locales/fr/LC_MESSAGES/framework-docs.po +++ b/doc/locales/fr/LC_MESSAGES/framework-docs.po @@ -3,7 +3,7 @@ msgid "" msgstr "" "Project-Id-Version: Flower Docs\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2024-02-13 11:23+0100\n" +"POT-Creation-Date: 2024-03-15 14:23+0000\n" "PO-Revision-Date: 2023-09-05 17:54+0000\n" "Last-Translator: Charles Beauville \n" "Language: fr\n" @@ -13,7 +13,7 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.13.1\n" +"Generated-By: Babel 2.14.0\n" #: ../../source/contributor-explanation-architecture.rst:2 msgid "Flower Architecture" @@ -27,9 +27,7 @@ msgstr "Moteur client Edge" msgid "" "`Flower `_ core framework architecture with Edge " "Client Engine" -msgstr "" -"`Flower `_ architecture de base avec Edge Client " -"Engine" +msgstr "`Flower `_ architecture de base avec Edge Client Engine" #: ../../source/contributor-explanation-architecture.rst:13 msgid "Virtual Client Engine" @@ -40,8 +38,8 @@ msgid "" "`Flower `_ core framework architecture with Virtual " "Client Engine" msgstr "" -"`Flower `_ architecture de base avec moteur de client" -" virtuel" +"`Flower `_ architecture de base avec moteur de client " +"virtuel" #: ../../source/contributor-explanation-architecture.rst:21 msgid "Virtual Client Engine and Edge Client Engine in the same workload" @@ -86,9 +84,8 @@ msgstr "" #: ../../source/contributor-how-to-build-docker-images.rst:19 msgid "" -"Please follow the first section on `Run Flower using Docker " -"`_ " -"which covers this step in more detail." +"Please follow the first section on :doc:`Run Flower using Docker ` which covers this step in more detail." msgstr "" #: ../../source/contributor-how-to-build-docker-images.rst:23 @@ -303,7 +300,7 @@ msgid "" "to help us in our effort to make Federated Learning accessible to as many" " people as possible by contributing to those translations! This might " "also be a great opportunity for those wanting to become open source " -"contributors with little prerequistes." +"contributors with little prerequisites." msgstr "" #: ../../source/contributor-how-to-contribute-translations.rst:13 @@ -355,7 +352,7 @@ msgstr "" #: ../../source/contributor-how-to-contribute-translations.rst:47 msgid "" -"You input your translation in the textbox at the top and then, once you " +"You input your translation in the text box at the top and then, once you " "are happy with it, you either press ``Save and continue`` (to save the " "translation and go to the next untranslated string), ``Save and stay`` " "(to save the translation and stay on the same page), ``Suggest`` (to add " @@ -393,8 +390,8 @@ msgstr "" #: ../../source/contributor-how-to-contribute-translations.rst:69 msgid "" "If you want to add a new language, you will first have to contact us, " -"either on `Slack `_, or by opening an " -"issue on our `GitHub repo `_." +"either on `Slack `_, or by opening an issue" +" on our `GitHub repo `_." msgstr "" #: ../../source/contributor-how-to-create-new-messages.rst:2 @@ -438,12 +435,13 @@ msgid "Message Types for Protocol Buffers" msgstr "Types de messages pour les tampons de protocole" #: ../../source/contributor-how-to-create-new-messages.rst:32 +#, fuzzy msgid "" "The first thing we need to do is to define a message type for the RPC " "system in :code:`transport.proto`. Note that we have to do it for both " "the request and response messages. For more details on the syntax of " -"proto3, please see the `official documentation " -"`_." +"proto3, please see the `official documentation `_." msgstr "" "La première chose à faire est de définir un type de message pour le " "système RPC dans :code:`transport.proto`. Notez que nous devons le faire " @@ -592,9 +590,10 @@ msgstr "" "conteneur." #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:11 +#, fuzzy msgid "" "Source: `Official VSCode documentation " -"`_" +"`_" msgstr "" "Source : `Documentation officielle de VSCode " "`_" @@ -648,9 +647,10 @@ msgstr "" "cas-là, consulte les sources suivantes :" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:23 +#, fuzzy msgid "" "`Developing inside a Container " -"`_" msgstr "" "`Développement à l'intérieur d'un conteneur " @@ -658,9 +658,10 @@ msgstr "" "requirements>`_" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:24 +#, fuzzy msgid "" "`Remote development in Containers " -"`_" +"`_" msgstr "" "`Développement à distance dans les conteneurs " "`_" @@ -961,8 +962,8 @@ msgstr "Ajoute une nouvelle section ``Unreleased`` dans ``changelog.md``." #: ../../source/contributor-how-to-release-flower.rst:25 msgid "" -"Merge the pull request on the same day (i.e., before a new nightly release" -" gets published to PyPI)." +"Merge the pull request on the same day (i.e., before a new nightly " +"release gets published to PyPI)." msgstr "" "Fusionne la pull request le jour même (c'est-à-dire avant qu'une nouvelle" " version nightly ne soit publiée sur PyPI)." @@ -977,11 +978,12 @@ msgstr "Nom de la pré-version" #: ../../source/contributor-how-to-release-flower.rst:33 msgid "" -"PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases " -"MUST use one of the following naming patterns:" +"PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases" +" MUST use one of the following naming patterns:" msgstr "" -"PyPI prend en charge les préversions (alpha, bêta, version candidate). Les" -" préversions DOIVENT utiliser l'un des modèles de dénomination suivants :" +"PyPI prend en charge les préversions (alpha, bêta, version candidate). " +"Les préversions DOIVENT utiliser l'un des modèles de dénomination " +"suivants :" #: ../../source/contributor-how-to-release-flower.rst:35 msgid "Alpha: ``MAJOR.MINOR.PATCHaN``" @@ -1318,21 +1320,23 @@ msgid "Request for Flower Baselines" msgstr "Demande pour une nouvelle Flower Baseline" #: ../../source/contributor-ref-good-first-contributions.rst:25 +#, fuzzy msgid "" "If you are not familiar with Flower Baselines, you should probably check-" -"out our `contributing guide for baselines `_." +"out our `contributing guide for baselines " +"`_." msgstr "" "Si tu n'es pas familier avec les Flower Baselines, tu devrais " "probablement consulter notre `guide de contribution pour les baselines " "`_." #: ../../source/contributor-ref-good-first-contributions.rst:27 +#, fuzzy msgid "" "You should then check out the open `issues " "`_" " for baseline requests. If you find a baseline that you'd like to work on" -" and that has no assignes, feel free to assign it to yourself and start " +" and that has no assignees, feel free to assign it to yourself and start " "working on it!" msgstr "" "Tu devrais ensuite consulter les `issues ouvertes " @@ -1444,9 +1448,8 @@ msgstr "" #, fuzzy msgid "" "If you're familiar with how contributing on GitHub works, you can " -"directly checkout our `getting started guide for contributors " -"`_." +"directly checkout our :doc:`getting started guide for contributors " +"`." msgstr "" "Si tu es familier avec le fonctionnement des contributions sur GitHub, tu" " peux directement consulter notre `guide de démarrage pour les " @@ -1454,21 +1457,22 @@ msgstr "" "contributors.html>`_ et des exemples de `bonnes premières contributions " "`_." -#: ../../source/contributor-tutorial-contribute-on-github.rst:11 +#: ../../source/contributor-tutorial-contribute-on-github.rst:10 msgid "Setting up the repository" msgstr "Mise en place du référentiel" -#: ../../source/contributor-tutorial-contribute-on-github.rst:22 +#: ../../source/contributor-tutorial-contribute-on-github.rst:21 msgid "**Create a GitHub account and setup Git**" msgstr "**Créer un compte GitHub et configurer Git**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:14 +#: ../../source/contributor-tutorial-contribute-on-github.rst:13 +#, fuzzy msgid "" "Git is a distributed version control tool. This allows for an entire " "codebase's history to be stored and every developer's machine. It is a " "software that will need to be installed on your local machine, you can " -"follow this `guide `_ to set it up." +"follow this `guide `_ to set it up." msgstr "" "Git est un outil de contrôle de version distribué. Il permet de stocker " "l'historique d'une base de code entière sur la machine de chaque " @@ -1476,7 +1480,7 @@ msgstr "" "locale, tu peux suivre ce `guide `_ pour le mettre en place." -#: ../../source/contributor-tutorial-contribute-on-github.rst:17 +#: ../../source/contributor-tutorial-contribute-on-github.rst:16 msgid "" "GitHub, itself, is a code hosting platform for version control and " "collaboration. It allows for everyone to collaborate and work from " @@ -1486,7 +1490,7 @@ msgstr "" "contrôle des versions et la collaboration. Il permet à chacun de " "collaborer et de travailler de n'importe où sur des dépôts à distance." -#: ../../source/contributor-tutorial-contribute-on-github.rst:19 +#: ../../source/contributor-tutorial-contribute-on-github.rst:18 msgid "" "If you haven't already, you will need to create an account on `GitHub " "`_." @@ -1494,7 +1498,7 @@ msgstr "" "Si ce n'est pas déjà fait, tu devras créer un compte sur `GitHub " "`_." -#: ../../source/contributor-tutorial-contribute-on-github.rst:21 +#: ../../source/contributor-tutorial-contribute-on-github.rst:20 msgid "" "The idea behind the generic Git and GitHub workflow boils down to this: " "you download code from a remote repository on GitHub, make changes " @@ -1506,14 +1510,15 @@ msgstr "" " des modifications localement et tu en gardes une trace à l'aide de Git, " "puis tu télécharges ton nouvel historique à nouveau sur GitHub." -#: ../../source/contributor-tutorial-contribute-on-github.rst:33 +#: ../../source/contributor-tutorial-contribute-on-github.rst:32 msgid "**Forking the Flower repository**" msgstr "**Fourche le dépôt de Flower**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:25 +#: ../../source/contributor-tutorial-contribute-on-github.rst:24 +#, fuzzy msgid "" "A fork is a personal copy of a GitHub repository. To create one for " -"Flower, you must navigate to https://github.com/adap/flower (while " +"Flower, you must navigate to ``_ (while " "connected to your GitHub account) and click the ``Fork`` button situated " "on the top right of the page." msgstr "" @@ -1522,7 +1527,7 @@ msgstr "" "étant connecté à ton compte GitHub) et cliquer sur le bouton ``Fork`` " "situé en haut à droite de la page." -#: ../../source/contributor-tutorial-contribute-on-github.rst:30 +#: ../../source/contributor-tutorial-contribute-on-github.rst:29 msgid "" "You can change the name if you want, but this is not necessary as this " "version of Flower will be yours and will sit inside your own account " @@ -1535,11 +1540,11 @@ msgstr "" " devrais voir dans le coin supérieur gauche que tu es en train de " "regarder ta propre version de Flower." -#: ../../source/contributor-tutorial-contribute-on-github.rst:48 +#: ../../source/contributor-tutorial-contribute-on-github.rst:47 msgid "**Cloning your forked repository**" msgstr "**Clonage de ton dépôt forké**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:36 +#: ../../source/contributor-tutorial-contribute-on-github.rst:35 msgid "" "The next step is to download the forked repository on your machine to be " "able to make changes to it. On your forked repository page, you should " @@ -1551,7 +1556,7 @@ msgstr "" "forké, tu dois d'abord cliquer sur le bouton ``Code`` à droite, ce qui te" " permettra de copier le lien HTTPS du dépôt." -#: ../../source/contributor-tutorial-contribute-on-github.rst:42 +#: ../../source/contributor-tutorial-contribute-on-github.rst:41 msgid "" "Once you copied the \\, you can open a terminal on your machine, " "navigate to the place you want to download the repository to and type:" @@ -1560,7 +1565,7 @@ msgstr "" "machine, naviguer jusqu'à l'endroit où tu veux télécharger le référentiel" " et taper :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:48 +#: ../../source/contributor-tutorial-contribute-on-github.rst:47 #, fuzzy msgid "" "This will create a ``flower/`` (or the name of your fork if you renamed " @@ -1569,15 +1574,15 @@ msgstr "" "Cela créera un dossier `flower/` (ou le nom de ta fourche si tu l'as " "renommée) dans le répertoire de travail actuel." -#: ../../source/contributor-tutorial-contribute-on-github.rst:67 +#: ../../source/contributor-tutorial-contribute-on-github.rst:66 msgid "**Add origin**" msgstr "**Ajouter l'origine**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:51 +#: ../../source/contributor-tutorial-contribute-on-github.rst:50 msgid "You can then go into the repository folder:" msgstr "Tu peux ensuite aller dans le dossier du référentiel :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:57 +#: ../../source/contributor-tutorial-contribute-on-github.rst:56 msgid "" "And here we will need to add an origin to our repository. The origin is " "the \\ of the remote fork repository. To obtain it, we can do as " @@ -1589,7 +1594,7 @@ msgstr "" "indiqué précédemment en allant sur notre dépôt fork sur notre compte " "GitHub et en copiant le lien." -#: ../../source/contributor-tutorial-contribute-on-github.rst:62 +#: ../../source/contributor-tutorial-contribute-on-github.rst:61 msgid "" "Once the \\ is copied, we can type the following command in our " "terminal:" @@ -1597,26 +1602,27 @@ msgstr "" "Une fois que le \\ est copié, nous pouvons taper la commande " "suivante dans notre terminal :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:91 +#: ../../source/contributor-tutorial-contribute-on-github.rst:90 msgid "**Add upstream**" msgstr "**Ajouter en amont**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:70 +#: ../../source/contributor-tutorial-contribute-on-github.rst:69 +#, fuzzy msgid "" "Now we will add an upstream address to our repository. Still in the same " -"directroy, we must run the following command:" +"directory, we must run the following command:" msgstr "" "Nous allons maintenant ajouter une adresse en amont à notre dépôt. " "Toujours dans le même directroy, nous devons exécuter la commande " "suivante :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:77 +#: ../../source/contributor-tutorial-contribute-on-github.rst:76 msgid "The following diagram visually explains what we did in the previous steps:" msgstr "" "Le schéma suivant explique visuellement ce que nous avons fait dans les " "étapes précédentes :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:81 +#: ../../source/contributor-tutorial-contribute-on-github.rst:80 msgid "" "The upstream is the GitHub remote address of the parent repository (in " "this case Flower), i.e. the one we eventually want to contribute to and " @@ -1630,7 +1636,7 @@ msgstr "" "simplement l'adresse distante GitHub du dépôt forké que nous avons créé, " "c'est-à-dire la copie (fork) dans notre propre compte." -#: ../../source/contributor-tutorial-contribute-on-github.rst:85 +#: ../../source/contributor-tutorial-contribute-on-github.rst:84 msgid "" "To make sure our local version of the fork is up-to-date with the latest " "changes from the Flower repository, we can execute the following command:" @@ -1639,27 +1645,28 @@ msgstr "" "dernières modifications du dépôt Flower, nous pouvons exécuter la " "commande suivante :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:94 +#: ../../source/contributor-tutorial-contribute-on-github.rst:93 msgid "Setting up the coding environment" msgstr "Mise en place de l'environnement de codage" -#: ../../source/contributor-tutorial-contribute-on-github.rst:96 +#: ../../source/contributor-tutorial-contribute-on-github.rst:95 +#, fuzzy msgid "" -"This can be achieved by following this `getting started guide for " -"contributors`_ (note that you won't need to clone the repository). Once " -"you are able to write code and test it, you can finally start making " -"changes!" +"This can be achieved by following this :doc:`getting started guide for " +"contributors ` (note " +"that you won't need to clone the repository). Once you are able to write " +"code and test it, you can finally start making changes!" msgstr "" "Pour ce faire, tu peux suivre ce `guide de démarrage pour les " "contributeurs`_ (note que tu n'auras pas besoin de cloner le dépôt). Une " "fois que tu es capable d'écrire du code et de le tester, tu peux enfin " "commencer à faire des changements !" -#: ../../source/contributor-tutorial-contribute-on-github.rst:101 +#: ../../source/contributor-tutorial-contribute-on-github.rst:100 msgid "Making changes" msgstr "Apporter des changements" -#: ../../source/contributor-tutorial-contribute-on-github.rst:103 +#: ../../source/contributor-tutorial-contribute-on-github.rst:102 msgid "" "Before making any changes make sure you are up-to-date with your " "repository:" @@ -1667,15 +1674,15 @@ msgstr "" "Avant de faire des changements, assure-toi que tu es à jour avec ton " "référentiel :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:109 +#: ../../source/contributor-tutorial-contribute-on-github.rst:108 msgid "And with Flower's repository:" msgstr "Et avec le référentiel de Flower :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:123 +#: ../../source/contributor-tutorial-contribute-on-github.rst:122 msgid "**Create a new branch**" msgstr "**Créer une nouvelle branche**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:116 +#: ../../source/contributor-tutorial-contribute-on-github.rst:115 msgid "" "To make the history cleaner and easier to work with, it is good practice " "to create a new branch for each feature/project that needs to be " @@ -1685,7 +1692,7 @@ msgstr "" "une bonne pratique de créer une nouvelle branche pour chaque " "fonctionnalité/projet qui doit être mis en œuvre." -#: ../../source/contributor-tutorial-contribute-on-github.rst:119 +#: ../../source/contributor-tutorial-contribute-on-github.rst:118 msgid "" "To do so, just run the following command inside the repository's " "directory:" @@ -1693,21 +1700,21 @@ msgstr "" "Pour ce faire, il suffit d'exécuter la commande suivante dans le " "répertoire du référentiel :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:126 +#: ../../source/contributor-tutorial-contribute-on-github.rst:125 msgid "**Make changes**" msgstr "**Apporter des modifications**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:126 +#: ../../source/contributor-tutorial-contribute-on-github.rst:125 msgid "Write great code and create wonderful changes using your favorite editor!" msgstr "" "Écris du bon code et crée de merveilleuses modifications à l'aide de ton " "éditeur préféré !" -#: ../../source/contributor-tutorial-contribute-on-github.rst:139 +#: ../../source/contributor-tutorial-contribute-on-github.rst:138 msgid "**Test and format your code**" msgstr "**Teste et mets en forme ton code**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:129 +#: ../../source/contributor-tutorial-contribute-on-github.rst:128 msgid "" "Don't forget to test and format your code! Otherwise your code won't be " "able to be merged into the Flower repository. This is done so the " @@ -1717,15 +1724,15 @@ msgstr "" "pourra pas être fusionné dans le dépôt Flower, et ce, afin que la base de" " code reste cohérente et facile à comprendre." -#: ../../source/contributor-tutorial-contribute-on-github.rst:132 +#: ../../source/contributor-tutorial-contribute-on-github.rst:131 msgid "To do so, we have written a few scripts that you can execute:" msgstr "Pour ce faire, nous avons écrit quelques scripts que tu peux exécuter :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:151 +#: ../../source/contributor-tutorial-contribute-on-github.rst:150 msgid "**Stage changes**" msgstr "**Changements de scène**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:142 +#: ../../source/contributor-tutorial-contribute-on-github.rst:141 msgid "" "Before creating a commit that will update your history, you must specify " "to Git which files it needs to take into account." @@ -1733,11 +1740,11 @@ msgstr "" "Avant de créer un commit qui mettra à jour ton historique, tu dois " "spécifier à Git les fichiers qu'il doit prendre en compte." -#: ../../source/contributor-tutorial-contribute-on-github.rst:144 +#: ../../source/contributor-tutorial-contribute-on-github.rst:143 msgid "This can be done with:" msgstr "Cela peut se faire avec :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:150 +#: ../../source/contributor-tutorial-contribute-on-github.rst:149 msgid "" "To check which files have been modified compared to the last version " "(last commit) and to see which files are staged for commit, you can use " @@ -1747,11 +1754,11 @@ msgstr "" "version (last commit) et pour voir quels fichiers sont mis à disposition " "pour le commit, tu peux utiliser la commande :code:`git status`." -#: ../../source/contributor-tutorial-contribute-on-github.rst:161 +#: ../../source/contributor-tutorial-contribute-on-github.rst:160 msgid "**Commit changes**" msgstr "**Commit changes**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:154 +#: ../../source/contributor-tutorial-contribute-on-github.rst:153 msgid "" "Once you have added all the files you wanted to commit using :code:`git " "add`, you can finally create your commit using this command:" @@ -1760,7 +1767,7 @@ msgstr "" "l'aide de :code:`git add`, tu peux enfin créer ta livraison à l'aide de " "cette commande :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:160 +#: ../../source/contributor-tutorial-contribute-on-github.rst:159 msgid "" "The \\ is there to explain to others what the commit " "does. It should be written in an imperative style and be concise. An " @@ -1770,11 +1777,11 @@ msgstr "" "commit. Il doit être écrit dans un style impératif et être concis. Un " "exemple serait :code:`git commit -m \"Ajouter des images au README\"`." -#: ../../source/contributor-tutorial-contribute-on-github.rst:172 +#: ../../source/contributor-tutorial-contribute-on-github.rst:171 msgid "**Push the changes to the fork**" msgstr "**Pousser les changements vers la fourche**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:164 +#: ../../source/contributor-tutorial-contribute-on-github.rst:163 msgid "" "Once we have committed our changes, we have effectively updated our local" " history, but GitHub has no way of knowing this unless we push our " @@ -1785,7 +1792,7 @@ msgstr "" "moyen de le savoir à moins que nous ne poussions nos modifications vers " "l'adresse distante de notre origine :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:171 +#: ../../source/contributor-tutorial-contribute-on-github.rst:170 msgid "" "Once this is done, you will see on the GitHub that your forked repo was " "updated with the changes you have made." @@ -1793,15 +1800,15 @@ msgstr "" "Une fois que c'est fait, tu verras sur GitHub que ton repo forké a été " "mis à jour avec les modifications que tu as apportées." -#: ../../source/contributor-tutorial-contribute-on-github.rst:175 +#: ../../source/contributor-tutorial-contribute-on-github.rst:174 msgid "Creating and merging a pull request (PR)" msgstr "Créer et fusionner une pull request (PR)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:206 +#: ../../source/contributor-tutorial-contribute-on-github.rst:205 msgid "**Create the PR**" msgstr "**Créer le PR**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:178 +#: ../../source/contributor-tutorial-contribute-on-github.rst:177 msgid "" "Once you have pushed changes, on the GitHub webpage of your repository " "you should see the following message:" @@ -1809,12 +1816,12 @@ msgstr "" "Une fois que tu as poussé les modifications, sur la page web GitHub de " "ton dépôt, tu devrais voir le message suivant :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:182 +#: ../../source/contributor-tutorial-contribute-on-github.rst:181 #, fuzzy msgid "Otherwise you can always find this option in the ``Branches`` page." msgstr "Sinon, tu peux toujours trouver cette option dans la page `Branches`." -#: ../../source/contributor-tutorial-contribute-on-github.rst:184 +#: ../../source/contributor-tutorial-contribute-on-github.rst:183 #, fuzzy msgid "" "Once you click the ``Compare & pull request`` button, you should see " @@ -1823,13 +1830,13 @@ msgstr "" "Une fois que tu as cliqué sur le bouton `Compare & pull request`, tu " "devrais voir quelque chose de similaire à ceci :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:188 +#: ../../source/contributor-tutorial-contribute-on-github.rst:187 msgid "At the top you have an explanation of which branch will be merged where:" msgstr "" "En haut, tu as une explication de quelle branche sera fusionnée à quel " "endroit :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:192 +#: ../../source/contributor-tutorial-contribute-on-github.rst:191 msgid "" "In this example you can see that the request is to merge the branch " "``doc-fixes`` from my forked repository to branch ``main`` from the " @@ -1839,7 +1846,7 @@ msgstr "" "branche ``doc-fixes`` de mon dépôt forké à la branche ``main`` du dépôt " "Flower." -#: ../../source/contributor-tutorial-contribute-on-github.rst:194 +#: ../../source/contributor-tutorial-contribute-on-github.rst:193 msgid "" "The input box in the middle is there for you to describe what your PR " "does and to link it to existing issues. We have placed comments (that " @@ -1851,7 +1858,7 @@ msgstr "" "commentaires (qui ne seront pas rendus une fois le PR ouvert) pour te " "guider tout au long du processus." -#: ../../source/contributor-tutorial-contribute-on-github.rst:197 +#: ../../source/contributor-tutorial-contribute-on-github.rst:196 msgid "" "It is important to follow the instructions described in comments. For " "instance, in order to not break how our changelog system works, you " @@ -1860,7 +1867,7 @@ msgid "" ":ref:`changelogentry` appendix." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:201 +#: ../../source/contributor-tutorial-contribute-on-github.rst:200 msgid "" "At the bottom you will find the button to open the PR. This will notify " "reviewers that a new PR has been opened and that they should look over it" @@ -1870,7 +1877,7 @@ msgstr "" "qui informera les réviseurs qu'un nouveau PR a été ouvert et qu'ils " "doivent le consulter pour le fusionner ou demander des modifications." -#: ../../source/contributor-tutorial-contribute-on-github.rst:204 +#: ../../source/contributor-tutorial-contribute-on-github.rst:203 msgid "" "If your PR is not yet ready for review, and you don't want to notify " "anyone, you have the option to create a draft pull request:" @@ -1879,11 +1886,11 @@ msgstr "" " personne, tu as la possibilité de créer un brouillon de demande de " "traction :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:208 msgid "**Making new changes**" msgstr "**Faire de nouveaux changements**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:208 msgid "" "Once the PR has been opened (as draft or not), you can still push new " "commits to it the same way we did before, by making changes to the branch" @@ -1893,11 +1900,11 @@ msgstr "" "toujours y pousser de nouveaux commits de la même manière qu'auparavant, " "en apportant des modifications à la branche associée au PR." -#: ../../source/contributor-tutorial-contribute-on-github.rst:231 +#: ../../source/contributor-tutorial-contribute-on-github.rst:230 msgid "**Review the PR**" msgstr "**Review the PR**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:212 +#: ../../source/contributor-tutorial-contribute-on-github.rst:211 msgid "" "Once the PR has been opened or once the draft PR has been marked as " "ready, a review from code owners will be automatically requested:" @@ -1906,7 +1913,7 @@ msgstr "" " étant prêt, une révision des propriétaires de code sera automatiquement " "demandée :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:216 +#: ../../source/contributor-tutorial-contribute-on-github.rst:215 msgid "" "Code owners will then look into the code, ask questions, request changes " "or validate the PR." @@ -1914,11 +1921,11 @@ msgstr "" "Les propriétaires du code vont alors se pencher sur le code, poser des " "questions, demander des modifications ou valider le RP." -#: ../../source/contributor-tutorial-contribute-on-github.rst:218 +#: ../../source/contributor-tutorial-contribute-on-github.rst:217 msgid "Merging will be blocked if there are ongoing requested changes." msgstr "La fusion sera bloquée s'il y a des changements demandés en cours." -#: ../../source/contributor-tutorial-contribute-on-github.rst:222 +#: ../../source/contributor-tutorial-contribute-on-github.rst:221 msgid "" "To resolve them, just push the necessary changes to the branch associated" " with the PR:" @@ -1926,11 +1933,11 @@ msgstr "" "Pour les résoudre, il suffit de pousser les changements nécessaires vers " "la branche associée au PR :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:226 +#: ../../source/contributor-tutorial-contribute-on-github.rst:225 msgid "And resolve the conversation:" msgstr "Et résous la conversation :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:230 +#: ../../source/contributor-tutorial-contribute-on-github.rst:229 msgid "" "Once all the conversations have been resolved, you can re-request a " "review." @@ -1938,11 +1945,11 @@ msgstr "" "Une fois que toutes les conversations ont été résolues, tu peux " "redemander un examen." -#: ../../source/contributor-tutorial-contribute-on-github.rst:251 +#: ../../source/contributor-tutorial-contribute-on-github.rst:250 msgid "**Once the PR is merged**" msgstr "**Une fois que le PR est fusionné**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:234 +#: ../../source/contributor-tutorial-contribute-on-github.rst:233 msgid "" "If all the automatic tests have passed and reviewers have no more changes" " to request, they can approve the PR and merge it." @@ -1951,7 +1958,7 @@ msgstr "" " de modifications à demander, ils peuvent approuver le PR et le " "fusionner." -#: ../../source/contributor-tutorial-contribute-on-github.rst:238 +#: ../../source/contributor-tutorial-contribute-on-github.rst:237 msgid "" "Once it is merged, you can delete the branch on GitHub (a button should " "appear to do so) and also delete it locally by doing:" @@ -1960,36 +1967,38 @@ msgstr "" "(un bouton devrait apparaître pour le faire) et aussi la supprimer " "localement en faisant :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:245 +#: ../../source/contributor-tutorial-contribute-on-github.rst:244 msgid "Then you should update your forked repository by doing:" msgstr "Ensuite, tu dois mettre à jour ton dépôt forké en faisant :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:254 +#: ../../source/contributor-tutorial-contribute-on-github.rst:253 msgid "Example of first contribution" msgstr "Exemple de première contribution" -#: ../../source/contributor-tutorial-contribute-on-github.rst:257 +#: ../../source/contributor-tutorial-contribute-on-github.rst:256 msgid "Problem" msgstr "Problème" -#: ../../source/contributor-tutorial-contribute-on-github.rst:259 +#: ../../source/contributor-tutorial-contribute-on-github.rst:258 +#, fuzzy msgid "" -"For our documentation, we’ve started to use the `Diàtaxis framework " +"For our documentation, we've started to use the `Diàtaxis framework " "`_." msgstr "" "Pour notre documentation, nous avons commencé à utiliser le cadre " "`Diàtaxis `_." -#: ../../source/contributor-tutorial-contribute-on-github.rst:261 +#: ../../source/contributor-tutorial-contribute-on-github.rst:260 +#, fuzzy msgid "" -"Our “How to” guides should have titles that continue the sencence “How to" -" …”, for example, “How to upgrade to Flower 1.0”." +"Our \"How to\" guides should have titles that continue the sentence \"How" +" to …\", for example, \"How to upgrade to Flower 1.0\"." msgstr "" "Nos guides \"Comment faire\" devraient avoir des titres qui poursuivent " "la phrase \"Comment faire pour...\", par exemple, \"Comment passer à " "Flower 1.0\"." -#: ../../source/contributor-tutorial-contribute-on-github.rst:263 +#: ../../source/contributor-tutorial-contribute-on-github.rst:262 msgid "" "Most of our guides do not follow this new format yet, and changing their " "title is (unfortunately) more involved than one might think." @@ -1998,50 +2007,55 @@ msgstr "" "changer leur titre est (malheureusement) plus compliqué qu'on ne le " "pense." -#: ../../source/contributor-tutorial-contribute-on-github.rst:265 +#: ../../source/contributor-tutorial-contribute-on-github.rst:264 +#, fuzzy msgid "" -"This issue is about changing the title of a doc from present continious " +"This issue is about changing the title of a doc from present continuous " "to present simple." msgstr "" "Cette question porte sur le changement du titre d'un document du présent " "continu au présent simple." -#: ../../source/contributor-tutorial-contribute-on-github.rst:267 +#: ../../source/contributor-tutorial-contribute-on-github.rst:266 +#, fuzzy msgid "" -"Let's take the example of “Saving Progress” which we changed to “Save " -"Progress”. Does this pass our check?" +"Let's take the example of \"Saving Progress\" which we changed to \"Save " +"Progress\". Does this pass our check?" msgstr "" "Prenons l'exemple de \"Sauvegarder la progression\" que nous avons " "remplacé par \"Sauvegarder la progression\". Est-ce que cela passe notre " "contrôle ?" -#: ../../source/contributor-tutorial-contribute-on-github.rst:269 -msgid "Before: ”How to saving progress” ❌" +#: ../../source/contributor-tutorial-contribute-on-github.rst:268 +#, fuzzy +msgid "Before: \"How to saving progress\" ❌" msgstr "Avant : \"Comment sauvegarder les progrès\" ❌" -#: ../../source/contributor-tutorial-contribute-on-github.rst:271 -msgid "After: ”How to save progress” ✅" +#: ../../source/contributor-tutorial-contribute-on-github.rst:270 +#, fuzzy +msgid "After: \"How to save progress\" ✅" msgstr "Après : \"Comment sauvegarder la progression\" ✅" -#: ../../source/contributor-tutorial-contribute-on-github.rst:274 +#: ../../source/contributor-tutorial-contribute-on-github.rst:273 msgid "Solution" msgstr "Solution" -#: ../../source/contributor-tutorial-contribute-on-github.rst:276 +#: ../../source/contributor-tutorial-contribute-on-github.rst:275 +#, fuzzy msgid "" -"This is a tiny change, but it’ll allow us to test your end-to-end setup. " -"After cloning and setting up the Flower repo, here’s what you should do:" +"This is a tiny change, but it'll allow us to test your end-to-end setup. " +"After cloning and setting up the Flower repo, here's what you should do:" msgstr "" "C'est un tout petit changement, mais il nous permettra de tester ta " "configuration de bout en bout. Après avoir cloné et configuré le repo " "Flower, voici ce que tu dois faire :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:278 +#: ../../source/contributor-tutorial-contribute-on-github.rst:277 #, fuzzy msgid "Find the source file in ``doc/source``" msgstr "Trouve le fichier source dans `doc/source`" -#: ../../source/contributor-tutorial-contribute-on-github.rst:279 +#: ../../source/contributor-tutorial-contribute-on-github.rst:278 #, fuzzy msgid "" "Make the change in the ``.rst`` file (beware, the dashes under the title " @@ -2050,20 +2064,20 @@ msgstr "" "Effectue la modification dans le fichier `.rst` (attention, les tirets " "sous le titre doivent être de la même longueur que le titre lui-même)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:280 +#: ../../source/contributor-tutorial-contribute-on-github.rst:279 +#, fuzzy msgid "" -"Build the docs and check the result: ``_" msgstr "" -"Construis les documents et vérifie le résultat : " -"``_" +"Construis les documents et vérifie le résultat : ``_" -#: ../../source/contributor-tutorial-contribute-on-github.rst:283 +#: ../../source/contributor-tutorial-contribute-on-github.rst:282 msgid "Rename file" msgstr "Renommer le fichier" -#: ../../source/contributor-tutorial-contribute-on-github.rst:285 +#: ../../source/contributor-tutorial-contribute-on-github.rst:284 msgid "" "You might have noticed that the file name still reflects the old wording." " If we just change the file, then we break all existing links to it - it " @@ -2076,21 +2090,22 @@ msgstr "" "important** d'éviter cela, car briser des liens peut nuire à notre " "classement dans les moteurs de recherche." -#: ../../source/contributor-tutorial-contribute-on-github.rst:288 -msgid "Here’s how to change the file name:" +#: ../../source/contributor-tutorial-contribute-on-github.rst:287 +#, fuzzy +msgid "Here's how to change the file name:" msgstr "Voici comment changer le nom du fichier :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:290 +#: ../../source/contributor-tutorial-contribute-on-github.rst:289 #, fuzzy msgid "Change the file name to ``save-progress.rst``" msgstr "Change le nom du fichier en `save-progress.rst`" -#: ../../source/contributor-tutorial-contribute-on-github.rst:291 +#: ../../source/contributor-tutorial-contribute-on-github.rst:290 #, fuzzy msgid "Add a redirect rule to ``doc/source/conf.py``" msgstr "Ajouter une règle de redirection à `doc/source/conf.py`" -#: ../../source/contributor-tutorial-contribute-on-github.rst:293 +#: ../../source/contributor-tutorial-contribute-on-github.rst:292 #, fuzzy msgid "" "This will cause a redirect from ``saving-progress.html`` to ``save-" @@ -2099,11 +2114,11 @@ msgstr "" "Cela entraînera une redirection de `saving-progress.html` vers `save-" "progress.html`, les anciens liens continueront à fonctionner." -#: ../../source/contributor-tutorial-contribute-on-github.rst:296 +#: ../../source/contributor-tutorial-contribute-on-github.rst:295 msgid "Apply changes in the index file" msgstr "Applique les changements dans le fichier d'index" -#: ../../source/contributor-tutorial-contribute-on-github.rst:298 +#: ../../source/contributor-tutorial-contribute-on-github.rst:297 #, fuzzy msgid "" "For the lateral navigation bar to work properly, it is very important to " @@ -2114,46 +2129,47 @@ msgstr "" "très important de mettre également à jour le fichier `index.rst`. C'est " "là que nous définissons toute l'arborescence de la barre de navigation." -#: ../../source/contributor-tutorial-contribute-on-github.rst:301 +#: ../../source/contributor-tutorial-contribute-on-github.rst:300 #, fuzzy msgid "Find and modify the file name in ``index.rst``" msgstr "Trouve et modifie le nom du fichier dans `index.rst`" -#: ../../source/contributor-tutorial-contribute-on-github.rst:304 +#: ../../source/contributor-tutorial-contribute-on-github.rst:303 msgid "Open PR" msgstr "Open PR" -#: ../../source/contributor-tutorial-contribute-on-github.rst:306 +#: ../../source/contributor-tutorial-contribute-on-github.rst:305 +#, fuzzy msgid "" -"Commit the changes (commit messages are always imperative: “Do " -"something”, in this case “Change …”)" +"Commit the changes (commit messages are always imperative: \"Do " +"something\", in this case \"Change …\")" msgstr "" "Valide les modifications (les messages de validation sont toujours " "impératifs : \"Fais quelque chose\", dans ce cas \"Modifie...\")" -#: ../../source/contributor-tutorial-contribute-on-github.rst:307 +#: ../../source/contributor-tutorial-contribute-on-github.rst:306 msgid "Push the changes to your fork" msgstr "Transmets les changements à ta fourchette" -#: ../../source/contributor-tutorial-contribute-on-github.rst:308 +#: ../../source/contributor-tutorial-contribute-on-github.rst:307 msgid "Open a PR (as shown above)" msgstr "Ouvre un RP (comme indiqué ci-dessus)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:309 +#: ../../source/contributor-tutorial-contribute-on-github.rst:308 msgid "Wait for it to be approved!" msgstr "Attends qu'elle soit approuvée !" -#: ../../source/contributor-tutorial-contribute-on-github.rst:310 +#: ../../source/contributor-tutorial-contribute-on-github.rst:309 msgid "Congrats! 🥳 You're now officially a Flower contributor!" msgstr "" "Félicitations 🥳 Tu es désormais officiellement une contributrice de " "Flower !" -#: ../../source/contributor-tutorial-contribute-on-github.rst:314 +#: ../../source/contributor-tutorial-contribute-on-github.rst:313 msgid "How to write a good PR title" msgstr "Comment écrire un bon titre de PR" -#: ../../source/contributor-tutorial-contribute-on-github.rst:316 +#: ../../source/contributor-tutorial-contribute-on-github.rst:315 msgid "" "A well-crafted PR title helps team members quickly understand the purpose" " and scope of the changes being proposed. Here's a guide to help you " @@ -2163,7 +2179,7 @@ msgstr "" "comprendre l'intérêt et le scope des changements proposés. Voici un guide" " pour vous aider à écrire des bons titres de PR :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:318 +#: ../../source/contributor-tutorial-contribute-on-github.rst:317 msgid "" "1. Be Clear and Concise: Provide a clear summary of the changes in a " "concise manner. 1. Use Actionable Verbs: Start with verbs like \"Add,\" " @@ -2181,7 +2197,7 @@ msgstr "" "capitalisation et une ponctuation : Suivre les règles de grammaire pour " "la clarté." -#: ../../source/contributor-tutorial-contribute-on-github.rst:324 +#: ../../source/contributor-tutorial-contribute-on-github.rst:323 msgid "" "Let's start with a few examples for titles that should be avoided because" " they do not provide meaningful information:" @@ -2189,27 +2205,27 @@ msgstr "" "Commençons par quelques exemples de titres qui devraient être évités " "parce qu'ils ne fournissent pas d'information significative :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:326 +#: ../../source/contributor-tutorial-contribute-on-github.rst:325 msgid "Implement Algorithm" msgstr "Implement Algorithm" -#: ../../source/contributor-tutorial-contribute-on-github.rst:327 +#: ../../source/contributor-tutorial-contribute-on-github.rst:326 msgid "Database" msgstr "Database" -#: ../../source/contributor-tutorial-contribute-on-github.rst:328 +#: ../../source/contributor-tutorial-contribute-on-github.rst:327 msgid "Add my_new_file.py to codebase" msgstr "Add my_new_file.py to codebase" -#: ../../source/contributor-tutorial-contribute-on-github.rst:329 +#: ../../source/contributor-tutorial-contribute-on-github.rst:328 msgid "Improve code in module" msgstr "Improve code in module" -#: ../../source/contributor-tutorial-contribute-on-github.rst:330 +#: ../../source/contributor-tutorial-contribute-on-github.rst:329 msgid "Change SomeModule" msgstr "Change SomeModule" -#: ../../source/contributor-tutorial-contribute-on-github.rst:332 +#: ../../source/contributor-tutorial-contribute-on-github.rst:331 msgid "" "Here are a few positive examples which provide helpful information " "without repeating how they do it, as that is already visible in the " @@ -2219,24 +2235,24 @@ msgstr "" "répéter comment ils le font, comme cela est déjà visible dans la section " "\"Files changed\" de la PR :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:334 +#: ../../source/contributor-tutorial-contribute-on-github.rst:333 msgid "Update docs banner to mention Flower Summit 2023" msgstr "Update docs banner to mention Flower Summit 2023" -#: ../../source/contributor-tutorial-contribute-on-github.rst:335 +#: ../../source/contributor-tutorial-contribute-on-github.rst:334 msgid "Remove unnecessary XGBoost dependency" msgstr "Remove unnecessary XGBoost dependency" -#: ../../source/contributor-tutorial-contribute-on-github.rst:336 +#: ../../source/contributor-tutorial-contribute-on-github.rst:335 msgid "Remove redundant attributes in strategies subclassing FedAvg" msgstr "Remove redundant attributes in strategies subclassing FedAvg" -#: ../../source/contributor-tutorial-contribute-on-github.rst:337 +#: ../../source/contributor-tutorial-contribute-on-github.rst:336 #, fuzzy msgid "Add CI job to deploy the staging system when the ``main`` branch changes" msgstr "Add CI job to deploy the staging system when the `main` branch changes" -#: ../../source/contributor-tutorial-contribute-on-github.rst:338 +#: ../../source/contributor-tutorial-contribute-on-github.rst:337 msgid "" "Add new amazing library which will be used to improve the simulation " "engine" @@ -2244,7 +2260,7 @@ msgstr "" "Add new amazing library which will be used to improve the simulation " "engine" -#: ../../source/contributor-tutorial-contribute-on-github.rst:342 +#: ../../source/contributor-tutorial-contribute-on-github.rst:341 #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:548 #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:946 #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:727 @@ -2253,7 +2269,7 @@ msgstr "" msgid "Next steps" msgstr "Prochaines étapes" -#: ../../source/contributor-tutorial-contribute-on-github.rst:344 +#: ../../source/contributor-tutorial-contribute-on-github.rst:343 msgid "" "Once you have made your first PR, and want to contribute more, be sure to" " check out the following :" @@ -2261,148 +2277,149 @@ msgstr "" "Une fois que tu auras fait ton premier RP, et que tu voudras contribuer " "davantage, ne manque pas de consulter les sites suivants :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:346 +#: ../../source/contributor-tutorial-contribute-on-github.rst:345 +#, fuzzy msgid "" -"`Good first contributions `_, where you should particularly look " -"into the :code:`baselines` contributions." +":doc:`Good first contributions `, where you should particularly look into the " +":code:`baselines` contributions." msgstr "" "`Bonnes premières contributions `_, où vous devriez " "particulièrement regarder les contributions :code:`baselines`." -#: ../../source/contributor-tutorial-contribute-on-github.rst:350 +#: ../../source/contributor-tutorial-contribute-on-github.rst:349 #: ../../source/fed/0000-20200102-fed-template.md:60 msgid "Appendix" msgstr "Annexe" -#: ../../source/contributor-tutorial-contribute-on-github.rst:355 +#: ../../source/contributor-tutorial-contribute-on-github.rst:354 #, fuzzy msgid "Changelog entry" msgstr "Changelog" -#: ../../source/contributor-tutorial-contribute-on-github.rst:357 +#: ../../source/contributor-tutorial-contribute-on-github.rst:356 msgid "" "When opening a new PR, inside its description, there should be a " "``Changelog entry`` header." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:359 +#: ../../source/contributor-tutorial-contribute-on-github.rst:358 msgid "" "Above this header you should see the following comment that explains how " "to write your changelog entry:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:361 +#: ../../source/contributor-tutorial-contribute-on-github.rst:360 msgid "" "Inside the following 'Changelog entry' section, you should put the " "description of your changes that will be added to the changelog alongside" " your PR title." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:364 +#: ../../source/contributor-tutorial-contribute-on-github.rst:363 msgid "" -"If the section is completely empty (without any token) or non-existant, " +"If the section is completely empty (without any token) or non-existent, " "the changelog will just contain the title of the PR for the changelog " "entry, without any description." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:367 +#: ../../source/contributor-tutorial-contribute-on-github.rst:366 msgid "" "If the section contains some text other than tokens, it will use it to " "add a description to the change." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:369 +#: ../../source/contributor-tutorial-contribute-on-github.rst:368 msgid "" "If the section contains one of the following tokens it will ignore any " "other text and put the PR under the corresponding section of the " "changelog:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:371 +#: ../../source/contributor-tutorial-contribute-on-github.rst:370 msgid " is for classifying a PR as a general improvement." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:373 +#: ../../source/contributor-tutorial-contribute-on-github.rst:372 msgid " is to not add the PR to the changelog" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:375 +#: ../../source/contributor-tutorial-contribute-on-github.rst:374 msgid " is to add a general baselines change to the PR" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:377 +#: ../../source/contributor-tutorial-contribute-on-github.rst:376 msgid " is to add a general examples change to the PR" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:379 +#: ../../source/contributor-tutorial-contribute-on-github.rst:378 msgid " is to add a general sdk change to the PR" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:381 +#: ../../source/contributor-tutorial-contribute-on-github.rst:380 msgid " is to add a general simulations change to the PR" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:383 +#: ../../source/contributor-tutorial-contribute-on-github.rst:382 msgid "Note that only one token should be used." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:385 +#: ../../source/contributor-tutorial-contribute-on-github.rst:384 msgid "" "Its content must have a specific format. We will break down what each " "possibility does:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:387 +#: ../../source/contributor-tutorial-contribute-on-github.rst:386 msgid "" "If the ``### Changelog entry`` section contains nothing or doesn't exist," " the following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:391 +#: ../../source/contributor-tutorial-contribute-on-github.rst:390 msgid "" "If the ``### Changelog entry`` section contains a description (and no " "token), the following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:397 +#: ../../source/contributor-tutorial-contribute-on-github.rst:396 msgid "" "If the ``### Changelog entry`` section contains ````, nothing will " "change in the changelog." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:399 +#: ../../source/contributor-tutorial-contribute-on-github.rst:398 msgid "" "If the ``### Changelog entry`` section contains ````, the " "following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:403 +#: ../../source/contributor-tutorial-contribute-on-github.rst:402 msgid "" "If the ``### Changelog entry`` section contains ````, the " "following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:407 +#: ../../source/contributor-tutorial-contribute-on-github.rst:406 msgid "" "If the ``### Changelog entry`` section contains ````, the " "following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:411 +#: ../../source/contributor-tutorial-contribute-on-github.rst:410 msgid "" "If the ``### Changelog entry`` section contains ````, the following " "text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:415 +#: ../../source/contributor-tutorial-contribute-on-github.rst:414 msgid "" "If the ``### Changelog entry`` section contains ````, the " "following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:419 +#: ../../source/contributor-tutorial-contribute-on-github.rst:418 msgid "" "Note that only one token must be provided, otherwise, only the first " "action (in the order listed above), will be performed." @@ -2436,10 +2453,11 @@ msgstr "" "virtualenv>`_" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:12 +#, fuzzy msgid "" "Flower uses :code:`pyproject.toml` to manage dependencies and configure " "development tools (the ones which support it). Poetry is a build tool " -"which supports `PEP 517 `_." +"which supports `PEP 517 `_." msgstr "" "Flower utilise un fichier :code:`pyproject.toml` pour gérer les " "dependences et configurer les outils de développement (du moins ceux qui " @@ -2645,9 +2663,9 @@ msgid "" "`_, a federated training strategy " "designed for non-iid data. We are using PyTorch to train a Convolutional " "Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " -"When applying FedBN, only few changes needed compared to `Example: " -"PyTorch - From Centralized To Federated `_." +"When applying FedBN, only few changes needed compared to :doc:`Example: " +"PyTorch - From Centralized To Federated `." msgstr "" "Ce tutoriel te montrera comment utiliser Flower pour construire une " "version fédérée d'une charge de travail d'apprentissage automatique " @@ -2668,10 +2686,10 @@ msgstr "Formation centralisée" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:10 #, fuzzy msgid "" -"All files are revised based on `Example: PyTorch - From Centralized To " -"Federated `_. The only thing to do is modifying the file called " -":code:`cifar.py`, revised part is shown below:" +"All files are revised based on :doc:`Example: PyTorch - From Centralized " +"To Federated `. The only " +"thing to do is modifying the file called :code:`cifar.py`, revised part " +"is shown below:" msgstr "" "Tous les fichiers sont révisés sur la base de `Exemple : PyTorch - From " "Centralized To Federated `_, the following parts are easy to follow, onyl " -":code:`get_parameters` and :code:`set_parameters` function in " -":code:`client.py` needed to revise. If not, please read the `Example: " -"PyTorch - From Centralized To Federated `_. first." +"If you have read :doc:`Example: PyTorch - From Centralized To Federated " +"`, the following parts are" +" easy to follow, only :code:`get_parameters` and :code:`set_parameters` " +"function in :code:`client.py` needed to revise. If not, please read the " +":doc:`Example: PyTorch - From Centralized To Federated `. first." msgstr "" "Si vous avez lu `Exemple : PyTorch - From Centralized To Federated " "`_. This " -"will allow you see how easy it is to wrap your code with Flower and begin" -" training in a federated way. We provide you with two helper scripts, " -"namely *run-server.sh*, and *run-clients.sh*. Don't be afraid to look " -"inside, they are simple enough =)." -msgstr "" -"Maintenant que nous avons installé toutes nos dépendances, lançons un " -"simple entraînement distribué avec deux clients et un serveur. Notre " -"procédure d'entraînement et l'architecture de notre réseau sont basées " -"sur l'exemple MNIST de base de PyTorch " -"`_. Cela te " -"permettra de voir à quel point il est facile d'envelopper ton code avec " -"Flower et de commencer l'entraînement de manière fédérée. Nous te " -"fournissons deux scripts d'aide, à savoir *run-server.sh*, et *run-" -"clients.sh*. N'aie pas peur de regarder à l'intérieur, ils sont assez " -"simples =)." - -#: ../../source/example-walkthrough-pytorch-mnist.rst:31 -msgid "" -"Go ahead and launch on a terminal the *run-server.sh* script first as " -"follows:" -msgstr "Lance sur un terminal le script *run-server.sh* d'abord comme suit :" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:38 -msgid "Now that the server is up and running, go ahead and launch the clients." -msgstr "Maintenant que le serveur est opérationnel, vas-y et lance les clients." - -#: ../../source/example-walkthrough-pytorch-mnist.rst:45 -msgid "" -"Et voilà! You should be seeing the training procedure and, after a few " -"iterations, the test accuracy for each client." -msgstr "" -"Et voilà ! Tu devrais voir la procédure d'entraînement et, après quelques" -" itérations, la précision du test pour chaque client." - -#: ../../source/example-walkthrough-pytorch-mnist.rst:66 -msgid "Now, let's see what is really happening inside." -msgstr "Maintenant, voyons ce qui se passe réellement à l'intérieur." - -#: ../../source/example-walkthrough-pytorch-mnist.rst:69 -#: ../../source/tutorial-quickstart-ios.rst:129 -#: ../../source/tutorial-quickstart-mxnet.rst:226 -#: ../../source/tutorial-quickstart-pytorch.rst:203 -#: ../../source/tutorial-quickstart-scikitlearn.rst:157 -#: ../../source/tutorial-quickstart-tensorflow.rst:98 -#: ../../source/tutorial-quickstart-xgboost.rst:309 -msgid "Flower Server" -msgstr "Serveur de Flower" +#: ../../source/explanation-differential-privacy.rst:2 +#: ../../source/explanation-differential-privacy.rst:11 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 +#, fuzzy +msgid "Differential Privacy" +msgstr "Confidentialité différentielle" -#: ../../source/example-walkthrough-pytorch-mnist.rst:71 +#: ../../source/explanation-differential-privacy.rst:3 msgid "" -"Inside the server helper script *run-server.sh* you will find the " -"following code that basically runs the :code:`server.py`" +"The information in datasets like healthcare, financial transactions, user" +" preferences, etc., is valuable and has the potential for scientific " +"breakthroughs and provides important business insights. However, such " +"data is also sensitive and there is a risk of compromising individual " +"privacy." msgstr "" -"Dans le script d'aide au serveur *run-server.sh*, tu trouveras le code " -"suivant qui exécute le fichier :code:`server.py`" -#: ../../source/example-walkthrough-pytorch-mnist.rst:78 +#: ../../source/explanation-differential-privacy.rst:6 msgid "" -"We can go a bit deeper and see that :code:`server.py` simply launches a " -"server that will coordinate three rounds of training. Flower Servers are " -"very customizable, but for simple workloads, we can start a server using " -"the :ref:`start_server ` function and " -"leave all the configuration possibilities at their default values, as " -"seen below." +"Traditional methods like anonymization alone would not work because of " +"attacks like Re-identification and Data Linkage. That's where " +"differential privacy comes in. It provides the possibility of analyzing " +"data while ensuring the privacy of individuals." msgstr "" -"Nous pouvons aller un peu plus loin et voir que :code:`server.py` lance " -"simplement un serveur qui coordonnera trois tours de formation. Flower " -"Les serveurs sont très personnalisables, mais pour les charges de travail" -" simples, nous pouvons démarrer un serveur à l'aide de la fonction " -":ref:`start_server ` et laisser toutes " -"les possibilités de configuration à leurs valeurs par défaut, comme on " -"peut le voir ci-dessous." - -#: ../../source/example-walkthrough-pytorch-mnist.rst:89 -#: ../../source/tutorial-quickstart-ios.rst:34 -#: ../../source/tutorial-quickstart-mxnet.rst:36 -#: ../../source/tutorial-quickstart-pytorch.rst:37 -#: ../../source/tutorial-quickstart-scikitlearn.rst:40 -#: ../../source/tutorial-quickstart-tensorflow.rst:29 -#: ../../source/tutorial-quickstart-xgboost.rst:55 -msgid "Flower Client" -msgstr "Client de la fleur" -#: ../../source/example-walkthrough-pytorch-mnist.rst:91 +#: ../../source/explanation-differential-privacy.rst:12 msgid "" -"Next, let's take a look at the *run-clients.sh* file. You will see that " -"it contains the main loop that starts a set of *clients*." +"Imagine two datasets that are identical except for a single record (for " +"instance, Alice's data). Differential Privacy (DP) guarantees that any " +"analysis (M), like calculating the average income, will produce nearly " +"identical results for both datasets (O and O' would be similar). This " +"preserves group patterns while obscuring individual details, ensuring the" +" individual's information remains hidden in the crowd." msgstr "" -"Ensuite, jetons un coup d'œil au fichier *run-clients.sh*. Tu verras " -"qu'il contient la boucle principale qui démarre un ensemble de *clients*." -#: ../../source/example-walkthrough-pytorch-mnist.rst:100 -msgid "" -"**cid**: is the client ID. It is an integer that uniquely identifies " -"client identifier." +#: ../../source/explanation-differential-privacy.rst:-1 +msgid "DP Intro" msgstr "" -"**cid** : c'est l'identifiant du client. C'est un nombre entier qui " -"identifie de façon unique l'identifiant du client." -#: ../../source/example-walkthrough-pytorch-mnist.rst:101 -msgid "**sever_address**: String that identifies IP and port of the server." -msgstr "**sever_address** : Chaîne qui identifie l'IP et le port du serveur." - -#: ../../source/example-walkthrough-pytorch-mnist.rst:102 +#: ../../source/explanation-differential-privacy.rst:22 msgid "" -"**nb_clients**: This defines the number of clients being created. This " -"piece of information is not required by the client, but it helps us " -"partition the original MNIST dataset to make sure that every client is " -"working on unique subsets of both *training* and *test* sets." +"One of the most commonly used mechanisms to achieve DP is adding enough " +"noise to the output of the analysis to mask the contribution of each " +"individual in the data while preserving the overall accuracy of the " +"analysis." msgstr "" -"**Cette information n'est pas requise par le client, mais elle nous aide " -"à partitionner l'ensemble de données MNIST original pour nous assurer que" -" chaque client travaille sur des sous-ensembles uniques des ensembles " -"*formation* et *test*." -#: ../../source/example-walkthrough-pytorch-mnist.rst:104 +#: ../../source/explanation-differential-privacy.rst:25 #, fuzzy -msgid "" -"Again, we can go deeper and look inside :code:`flwr_example/quickstart-" -"pytorch/client.py`. After going through the argument parsing code at the " -"beginning of our :code:`main` function, you will find a call to " -":code:`mnist.load_data`. This function is responsible for partitioning " -"the original MNIST datasets (*training* and *test*) and returning a " -":code:`torch.utils.data.DataLoader` s for each of them. We then " -"instantiate a :code:`PytorchMNISTClient` object with our client ID, our " -"DataLoaders, the number of epochs in each round, and which device we want" -" to use for training (CPU or GPU)." -msgstr "" -"Encore une fois, nous pouvons aller plus loin et regarder dans " -":code:`flwr_example/quickstart-pytorch/client.py`. Après avoir parcouru " -"le code d'analyse des arguments au début de notre fonction :code:`main`, " -"tu trouveras un appel à :code:`mnist.load_data`. Cette fonction est " -"responsable du partitionnement des ensembles de données MNIST originaux " -"(*training* et *test*) et renvoie un :code:`torch.utils.data.DataLoader` " -"s pour chacun d'entre eux. Nous instancions ensuite un objet " -":code:`PytorchMNISTClient` avec notre ID client, nos DataLoaders, le " -"nombre d'époques dans chaque tour et le périphérique que nous voulons " -"utiliser pour l'entraînement (CPU ou GPU)." +msgid "Formal Definition" +msgstr "Compiler les définitions ProtoBuf" -#: ../../source/example-walkthrough-pytorch-mnist.rst:119 +#: ../../source/explanation-differential-privacy.rst:26 msgid "" -"The :code:`PytorchMNISTClient` object when finally passed to " -":code:`fl.client.start_client` along with the server's address as the " -"training process begins." +"Differential Privacy (DP) provides statistical guarantees against the " +"information an adversary can infer through the output of a randomized " +"algorithm. It provides an unconditional upper bound on the influence of a" +" single individual on the output of the algorithm by adding noise [1]. A " +"randomized mechanism M provides (:math:`\\epsilon`, " +":math:`\\delta`)-differential privacy if for any two neighboring " +"databases, D :sub:`1` and D :sub:`2`, that differ in only a single " +"record, and for all possible outputs S ⊆ Range(A):" msgstr "" -"L'objet :code:`PytorchMNISTClient` est finalement transmis à " -":code:`fl.client.start_client` avec l'adresse du serveur lorsque le " -"processus de formation commence." -#: ../../source/example-walkthrough-pytorch-mnist.rst:123 -msgid "A Closer Look" -msgstr "Regarder de plus près" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:125 -#, fuzzy +#: ../../source/explanation-differential-privacy.rst:32 msgid "" -"Now, let's look closely into the :code:`PytorchMNISTClient` inside " -":code:`flwr_example.quickstart-pytorch.mnist` and see what it is doing:" +"\\small\n" +"P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" msgstr "" -"Maintenant, examinons de près le :code:`PytorchMNISTClient` à l'intérieur" -" du :code:`flwr_example.quickstart-pytorch.mnist` et voyons ce qu'il fait" -" :" -#: ../../source/example-walkthrough-pytorch-mnist.rst:226 +#: ../../source/explanation-differential-privacy.rst:38 msgid "" -"The first thing to notice is that :code:`PytorchMNISTClient` instantiates" -" a CNN model inside its constructor" +"The :math:`\\epsilon` parameter, also known as the privacy budget, is a " +"metric of privacy loss. It also controls the privacy-utility trade-off; " +"lower :math:`\\epsilon` values indicate higher levels of privacy but are " +"likely to reduce utility as well. The :math:`\\delta` parameter accounts " +"for a small probability on which the upper bound :math:`\\epsilon` does " +"not hold. The amount of noise needed to achieve differential privacy is " +"proportional to the sensitivity of the output, which measures the maximum" +" change in the output due to the inclusion or removal of a single record." msgstr "" -"La première chose à remarquer est que :code:`PytorchMNISTClient` " -"instancie un modèle CNN dans son constructeur" -#: ../../source/example-walkthrough-pytorch-mnist.rst:244 +#: ../../source/explanation-differential-privacy.rst:45 #, fuzzy -msgid "" -"The code for the CNN is available under :code:`quickstart-pytorch.mnist` " -"and it is reproduced below. It is the same network found in `Basic MNIST " -"Example `_." -msgstr "" -"Le code du CNN est disponible sous :code:`quickstart-pytorch.mnist` et il" -" est reproduit ci-dessous. Il s'agit du même réseau que celui que l'on " -"trouve dans `Exemple basique de MNIST " -"`_." - -#: ../../source/example-walkthrough-pytorch-mnist.rst:290 -msgid "" -"The second thing to notice is that :code:`PytorchMNISTClient` class " -"inherits from the :code:`fl.client.Client`, and hence it must implement " -"the following methods:" -msgstr "" -"La deuxième chose à noter est que la classe :code:`PytorchMNISTClient` " -"hérite de :code:`fl.client.Client`, et qu'elle doit donc implémenter les " -"méthodes suivantes :" +msgid "Differential Privacy in Machine Learning" +msgstr "Confidentialité différentielle" -#: ../../source/example-walkthrough-pytorch-mnist.rst:315 +#: ../../source/explanation-differential-privacy.rst:46 msgid "" -"When comparing the abstract class to its derived class " -":code:`PytorchMNISTClient` you will notice that :code:`fit` calls a " -":code:`train` function and that :code:`evaluate` calls a :code:`test`: " -"function." +"DP can be utilized in machine learning to preserve the privacy of the " +"training data. Differentially private machine learning algorithms are " +"designed in a way to prevent the algorithm to learn any specific " +"information about any individual data points and subsequently prevent the" +" model from revealing sensitive information. Depending on the stage at " +"which noise is introduced, various methods exist for applying DP to " +"machine learning algorithms. One approach involves adding noise to the " +"training data (either to the features or labels), while another method " +"entails injecting noise into the gradients of the loss function during " +"model training. Additionally, such noise can be incorporated into the " +"model's output." msgstr "" -"En comparant la classe abstraite à sa classe dérivée " -":code:`PytorchMNISTClient`, tu remarqueras que :code:`fit` appelle une " -"fonction :code:`train` et que :code:`evaluate` appelle une fonction " -":code:`test` :." -#: ../../source/example-walkthrough-pytorch-mnist.rst:317 +#: ../../source/explanation-differential-privacy.rst:53 #, fuzzy -msgid "" -"These functions can both be found inside the same :code:`quickstart-" -"pytorch.mnist` module:" -msgstr "" -"Ces fonctions se trouvent toutes deux dans le même module :code" -":`quickstart-pytorch.mnist` :" +msgid "Differential Privacy in Federated Learning" +msgstr "Mise à l'échelle de l'apprentissage fédéré" -#: ../../source/example-walkthrough-pytorch-mnist.rst:437 +#: ../../source/explanation-differential-privacy.rst:54 msgid "" -"Observe that these functions encapsulate regular training and test loops " -"and provide :code:`fit` and :code:`evaluate` with final statistics for " -"each round. You could substitute them with your custom train and test " -"loops and change the network architecture, and the entire example would " -"still work flawlessly. As a matter of fact, why not try and modify the " -"code to an example of your liking?" +"Federated learning is a data minimization approach that allows multiple " +"parties to collaboratively train a model without sharing their raw data. " +"However, federated learning also introduces new privacy challenges. The " +"model updates between parties and the central server can leak information" +" about the local data. These leaks can be exploited by attacks such as " +"membership inference and property inference attacks, or model inversion " +"attacks." msgstr "" -"Observe que ces fonctions encapsulent les boucles d'entraînement et de " -"test habituelles et fournissent à :code:`fit` et :code:`evaluate` les " -"statistiques finales pour chaque tour. Tu pourrais les remplacer par tes " -"boucles d'entraînement et de test personnalisées et changer " -"l'architecture du réseau, et l'ensemble de l'exemple fonctionnerait " -"toujours parfaitement. En fait, pourquoi ne pas essayer de modifier le " -"code pour en faire un exemple qui te plairait ?" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:444 -msgid "Give It a Try" -msgstr "Fais un essai" -#: ../../source/example-walkthrough-pytorch-mnist.rst:445 +#: ../../source/explanation-differential-privacy.rst:58 msgid "" -"Looking through the quickstart code description above will have given a " -"good understanding of how *clients* and *servers* work in Flower, how to " -"run a simple experiment, and the internals of a client wrapper. Here are " -"a few things you could try on your own and get more experience with " -"Flower:" +"DP can play a crucial role in federated learning to provide privacy for " +"the clients' data." msgstr "" -"En parcourant la description du code de démarrage rapide ci-dessus, tu " -"auras acquis une bonne compréhension du fonctionnement des *clients* et " -"des *serveurs* dans Flower, de l'exécution d'une expérience simple et de " -"la structure interne d'un wrapper client. Voici quelques exemples que tu " -"peux essayer par toi-même pour acquérir plus d'expérience avec Flower :" -#: ../../source/example-walkthrough-pytorch-mnist.rst:448 +#: ../../source/explanation-differential-privacy.rst:60 msgid "" -"Try and change :code:`PytorchMNISTClient` so it can accept different " -"architectures." +"Depending on the granularity of privacy provision or the location of " +"noise addition, different forms of DP exist in federated learning. In " +"this explainer, we focus on two approaches of DP utilization in federated" +" learning based on where the noise is added: at the server (also known as" +" the center) or at the client (also known as the local)." msgstr "" -"Essaie de modifier :code:`PytorchMNISTClient` pour qu'il puisse accepter " -"différentes architectures." -#: ../../source/example-walkthrough-pytorch-mnist.rst:449 -msgid "Modify the :code:`train` function so that it accepts different optimizers" -msgstr "" -"Modifie la fonction :code:`train` pour qu'elle accepte différents " -"optimiseurs" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:450 +#: ../../source/explanation-differential-privacy.rst:63 msgid "" -"Modify the :code:`test` function so that it proves not only the top-1 " -"(regular accuracy) but also the top-5 accuracy?" +"**Central Differential Privacy**: DP is applied by the server and the " +"goal is to prevent the aggregated model from leaking information about " +"each client's data." msgstr "" -"Modifie la fonction :code:`test` pour qu'elle prouve non seulement le " -"top-1 (précision normale) mais aussi le top-5 ?" -#: ../../source/example-walkthrough-pytorch-mnist.rst:451 +#: ../../source/explanation-differential-privacy.rst:65 msgid "" -"Go larger! Try to adapt the code to larger images and datasets. Why not " -"try training on ImageNet with a ResNet-50?" +"**Local Differential Privacy**: DP is applied on the client side before " +"sending any information to the server and the goal is to prevent the " +"updates that are sent to the server from leaking any information about " +"the client's data." msgstr "" -"Essaie d'adapter le code à des images et à des ensembles de données plus " -"grands. Pourquoi ne pas essayer de s'entraîner sur ImageNet avec un " -"ResNet-50 ?" -#: ../../source/example-walkthrough-pytorch-mnist.rst:453 -msgid "You are ready now. Enjoy learning in a federated way!" -msgstr "Tu es prêt maintenant. Profite de l'apprentissage de manière fédérée !" - -#: ../../source/explanation-differential-privacy.rst:2 +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:68 +#: ../../source/how-to-use-differential-privacy.rst:11 #, fuzzy -msgid "Differential privacy" +msgid "Central Differential Privacy" msgstr "Confidentialité différentielle" -#: ../../source/explanation-differential-privacy.rst:4 -msgid "" -"Flower provides differential privacy (DP) wrapper classes for the easy " -"integration of the central DP guarantees provided by DP-FedAvg into " -"training pipelines defined in any of the various ML frameworks that " -"Flower is compatible with." -msgstr "" -"Flower fournit des classes d'enveloppe de confidentialité différentielle " -"(DP) pour l'intégration facile des garanties centrales de DP fournies par" -" DP-FedAvg dans les pipelines de formation définis dans n'importe lequel " -"des divers cadres de ML avec lesquels Flower est compatible." - -#: ../../source/explanation-differential-privacy.rst:7 -#, fuzzy -msgid "" -"Please note that these components are still experimental; the correct " -"configuration of DP for a specific task is still an unsolved problem." -msgstr "" -"Note que ces composants sont encore expérimentaux, la configuration " -"correcte du DP pour une tâche spécifique est encore un problème non " -"résolu." - -#: ../../source/explanation-differential-privacy.rst:10 +#: ../../source/explanation-differential-privacy.rst:69 msgid "" -"The name DP-FedAvg is misleading since it can be applied on top of any FL" -" algorithm that conforms to the general structure prescribed by the " -"FedOpt family of algorithms." +"In this approach, which is also known as user-level DP, the central " +"server is responsible for adding noise to the globally aggregated " +"parameters. It should be noted that trust in the server is required." msgstr "" -"Le nom DP-FedAvg est trompeur car il peut être appliqué à n'importe quel " -"algorithme FL qui se conforme à la structure générale prescrite par la " -"famille d'algorithmes FedOpt." -#: ../../source/explanation-differential-privacy.rst:13 -msgid "DP-FedAvg" -msgstr "DP-FedAvg" - -#: ../../source/explanation-differential-privacy.rst:15 +#: ../../source/explanation-differential-privacy.rst:76 msgid "" -"DP-FedAvg, originally proposed by McMahan et al. [mcmahan]_ and extended " -"by Andrew et al. [andrew]_, is essentially FedAvg with the following " -"modifications." +"While there are various ways to implement central DP in federated " +"learning, we concentrate on the algorithms proposed by [2] and [3]. The " +"overall approach is to clip the model updates sent by the clients and add" +" some amount of noise to the aggregated model. In each iteration, a " +"random set of clients is chosen with a specific probability for training." +" Each client performs local training on its own data. The update of each " +"client is then clipped by some value `S` (sensitivity `S`). This would " +"limit the impact of any individual client which is crucial for privacy " +"and often beneficial for robustness. A common approach to achieve this is" +" by restricting the `L2` norm of the clients' model updates, ensuring " +"that larger updates are scaled down to fit within the norm `S`." msgstr "" -"DP-FedAvg, proposé à l'origine par McMahan et al. [mcmahan]_ et étendu " -"par Andrew et al. [andrew]_, est essentiellement FedAvg avec les " -"modifications suivantes." -#: ../../source/explanation-differential-privacy.rst:17 -msgid "" -"**Clipping** : The influence of each client's update is bounded by " -"clipping it. This is achieved by enforcing a cap on the L2 norm of the " -"update, scaling it down if needed." +#: ../../source/explanation-differential-privacy.rst:-1 +msgid "clipping" msgstr "" -"**Clipping** : L'influence de la mise à jour de chaque client est limitée" -" en l'écrêtant. Ceci est réalisé en imposant un plafond à la norme L2 de " -"la mise à jour, en la réduisant si nécessaire." -#: ../../source/explanation-differential-privacy.rst:18 +#: ../../source/explanation-differential-privacy.rst:89 msgid "" -"**Noising** : Gaussian noise, calibrated to the clipping threshold, is " -"added to the average computed at the server." +"Afterwards, the Gaussian mechanism is used to add noise in order to " +"distort the sum of all clients' updates. The amount of noise is scaled to" +" the sensitivity value to obtain a privacy guarantee. The Gaussian " +"mechanism is used with a noise sampled from `N (0, σ²)` where `σ = ( " +"noise_scale * S ) / (number of sampled clients)`." msgstr "" -"**Bruit** : un bruit gaussien, calibré sur le seuil d'écrêtage, est " -"ajouté à la moyenne calculée au niveau du serveur." -#: ../../source/explanation-differential-privacy.rst:20 -#, fuzzy -msgid "" -"The distribution of the update norm has been shown to vary from task-to-" -"task and to evolve as training progresses. This variability is crucial in" -" understanding its impact on differential privacy guarantees, emphasizing" -" the need for an adaptive approach [andrew]_ that continuously adjusts " -"the clipping threshold to track a prespecified quantile of the update " -"norm distribution." +#: ../../source/explanation-differential-privacy.rst:94 +msgid "Clipping" msgstr "" -"Il a été démontré que la distribution de la norme de mise à jour varie " -"d'une tâche à l'autre et évolue au fur et à mesure de la formation. C'est" -" pourquoi nous utilisons une approche adaptative [andrew]_ qui ajuste " -"continuellement le seuil d'écrêtage pour suivre un quantile prédéfini de " -"la distribution de la norme de mise à jour." -#: ../../source/explanation-differential-privacy.rst:23 -msgid "Simplifying Assumptions" -msgstr "Simplifier les hypothèses" - -#: ../../source/explanation-differential-privacy.rst:25 -#, fuzzy +#: ../../source/explanation-differential-privacy.rst:96 msgid "" -"We make (and attempt to enforce) a number of assumptions that must be " -"satisfied to ensure that the training process actually realizes the " -":math:`(\\epsilon, \\delta)` guarantees the user has in mind when " -"configuring the setup." +"There are two forms of clipping commonly used in Central DP: Fixed " +"Clipping and Adaptive Clipping." msgstr "" -"Nous formulons (et tentons d'appliquer) un certain nombre d'hypothèses " -"qui doivent être satisfaites pour que le processus de formation réalise " -"réellement les garanties :math:`(\\epsilon, \\delta)` que l'utilisateur a" -" à l'esprit lorsqu'il configure l'installation." -#: ../../source/explanation-differential-privacy.rst:27 +#: ../../source/explanation-differential-privacy.rst:98 msgid "" -"**Fixed-size subsampling** :Fixed-size subsamples of the clients must be " -"taken at each round, as opposed to variable-sized Poisson subsamples." +"**Fixed Clipping** : A predefined fix threshold is set for the magnitude " +"of clients' updates. Any update exceeding this threshold is clipped back " +"to the threshold value." msgstr "" -"**Sous-échantillonnage de taille fixe** :Des sous-échantillons de taille " -"fixe des clients doivent être prélevés à chaque tour, par opposition aux " -"sous-échantillons de Poisson de taille variable." -#: ../../source/explanation-differential-privacy.rst:28 +#: ../../source/explanation-differential-privacy.rst:100 msgid "" -"**Unweighted averaging** : The contributions from all the clients must " -"weighted equally in the aggregate to eliminate the requirement for the " -"server to know in advance the sum of the weights of all clients available" -" for selection." +"**Adaptive Clipping** : The clipping threshold dynamically adjusts based " +"on the observed update distribution [4]. It means that the clipping value" +" is tuned during the rounds with respect to the quantile of the update " +"norm distribution." msgstr "" -"**Moyenne non pondérée** : Les contributions de tous les clients doivent " -"être pondérées de façon égale dans l'ensemble afin que le serveur n'ait " -"pas à connaître à l'avance la somme des poids de tous les clients " -"disponibles pour la sélection." -#: ../../source/explanation-differential-privacy.rst:29 +#: ../../source/explanation-differential-privacy.rst:102 msgid "" -"**No client failures** : The set of available clients must stay constant " -"across all rounds of training. In other words, clients cannot drop out or" -" fail." +"The choice between fixed and adaptive clipping depends on various factors" +" such as privacy requirements, data distribution, model complexity, and " +"others." msgstr "" -"**Aucune défaillance de client** : L'ensemble des clients disponibles " -"doit rester constant pendant toutes les séries de formation. En d'autres " -"termes, les clients ne peuvent pas abandonner ou échouer." -#: ../../source/explanation-differential-privacy.rst:31 +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:105 +#: ../../source/how-to-use-differential-privacy.rst:96 #, fuzzy -msgid "" -"The first two are useful for eliminating a multitude of complications " -"associated with calibrating the noise to the clipping threshold, while " -"the third one is required to comply with the assumptions of the privacy " -"analysis." -msgstr "" -"Les deux premiers sont utiles pour éliminer une multitude de " -"complications liées au calibrage du bruit en fonction du seuil " -"d'écrêtage, tandis que le troisième est nécessaire pour se conformer aux " -"hypothèses de l'analyse de la vie privée." +msgid "Local Differential Privacy" +msgstr "Confidentialité différentielle" -#: ../../source/explanation-differential-privacy.rst:34 +#: ../../source/explanation-differential-privacy.rst:107 msgid "" -"These restrictions are in line with constraints imposed by Andrew et al. " -"[andrew]_." +"In this approach, each client is responsible for performing DP. Local DP " +"avoids the need for a fully trusted aggregator, but it should be noted " +"that local DP leads to a decrease in accuracy but better privacy in " +"comparison to central DP." msgstr "" -"Ces restrictions sont conformes aux contraintes imposées par Andrew et " -"al. [andrew]_." - -#: ../../source/explanation-differential-privacy.rst:37 -msgid "Customizable Responsibility for Noise injection" -msgstr "Responsabilité personnalisable pour l'injection de bruit" - -#: ../../source/explanation-differential-privacy.rst:38 -msgid "" -"In contrast to other implementations where the addition of noise is " -"performed at the server, you can configure the site of noise injection to" -" better match your threat model. We provide users with the flexibility to" -" set up the training such that each client independently adds a small " -"amount of noise to the clipped update, with the result that simply " -"aggregating the noisy updates is equivalent to the explicit addition of " -"noise to the non-noisy aggregate at the server." -msgstr "" -"Contrairement à d'autres implémentations où l'ajout de bruit est effectué" -" au niveau du serveur, tu peux configurer le site d'injection de bruit " -"pour qu'il corresponde mieux à ton modèle de menace. Nous offrons aux " -"utilisateurs la possibilité de configurer l'entraînement de telle sorte " -"que chaque client ajoute indépendamment une petite quantité de bruit à la" -" mise à jour écrêtée, ce qui fait que le simple fait d'agréger les mises " -"à jour bruyantes équivaut à l'ajout explicite de bruit à l'agrégat non " -"bruyant au niveau du serveur." - -#: ../../source/explanation-differential-privacy.rst:41 -msgid "" -"To be precise, if we let :math:`m` be the number of clients sampled each " -"round and :math:`\\sigma_\\Delta` be the scale of the total Gaussian " -"noise that needs to be added to the sum of the model updates, we can use " -"simple maths to show that this is equivalent to each client adding noise " -"with scale :math:`\\sigma_\\Delta/\\sqrt{m}`." -msgstr "" -"Pour être précis, si nous laissons :math:`m` être le nombre de clients " -"échantillonnés à chaque tour et :math:\\sigma_\\Delta` être l'échelle du " -"bruit gaussien total qui doit être ajouté à la somme des mises à jour du " -"modèle, nous pouvons utiliser des mathématiques simples pour montrer que " -"cela équivaut à ce que chaque client ajoute du bruit avec l'échelle " -":math:\\sigma_\\Delta/\\sqrt{m}`." - -#: ../../source/explanation-differential-privacy.rst:44 -msgid "Wrapper-based approach" -msgstr "Approche basée sur l'enveloppe" - -#: ../../source/explanation-differential-privacy.rst:46 -msgid "" -"Introducing DP to an existing workload can be thought of as adding an " -"extra layer of security around it. This inspired us to provide the " -"additional server and client-side logic needed to make the training " -"process differentially private as wrappers for instances of the " -":code:`Strategy` and :code:`NumPyClient` abstract classes respectively. " -"This wrapper-based approach has the advantage of being easily composable " -"with other wrappers that someone might contribute to the Flower library " -"in the future, e.g., for secure aggregation. Using Inheritance instead " -"can be tedious because that would require the creation of new sub- " -"classes every time a new class implementing :code:`Strategy` or " -":code:`NumPyClient` is defined." -msgstr "" -"L'introduction du DP dans une charge de travail existante peut être " -"considérée comme l'ajout d'une couche de sécurité supplémentaire autour " -"d'elle. Cela nous a incités à fournir la logique supplémentaire côté " -"serveur et côté client nécessaire pour rendre le processus de formation " -"différentiellement privé en tant qu'enveloppes pour les instances des " -"classes abstraites :code:`Strategy` et :code:`NumPyClient` " -"respectivement. Cette approche basée sur l'enveloppe a l'avantage d'être " -"facilement composable avec d'autres enveloppes que quelqu'un pourrait " -"contribuer à la bibliothèque Flower à l'avenir, par exemple, pour " -"l'agrégation sécurisée. L'utilisation de l'héritage à la place peut être " -"fastidieuse car cela nécessiterait la création de nouvelles sous-classes " -"chaque fois qu'une nouvelle classe mettant en œuvre :code:`Strategy` ou " -":code:`NumPyClient` est définie." - -#: ../../source/explanation-differential-privacy.rst:49 -msgid "Server-side logic" -msgstr "Logique côté serveur" -#: ../../source/explanation-differential-privacy.rst:51 -#, fuzzy -msgid "" -"The first version of our solution was to define a decorator whose " -"constructor accepted, among other things, a boolean-valued variable " -"indicating whether adaptive clipping was to be enabled or not. We quickly" -" realized that this would clutter its :code:`__init__()` function with " -"variables corresponding to hyperparameters of adaptive clipping that " -"would remain unused when it was disabled. A cleaner implementation could " -"be achieved by splitting the functionality into two decorators, " -":code:`DPFedAvgFixed` and :code:`DPFedAvgAdaptive`, with the latter sub- " -"classing the former. The constructors for both classes accept a boolean " -"parameter :code:`server_side_noising`, which, as the name suggests, " -"determines where noising is to be performed." -msgstr "" -"La première version de notre solution consistait à définir un décorateur " -"dont le constructeur acceptait, entre autres, une variable à valeur " -"booléenne indiquant si l'écrêtage adaptatif devait être activé ou non. " -"Nous nous sommes rapidement rendu compte que cela encombrerait sa " -"fonction :code:`__init__()` avec des variables correspondant aux " -"hyperparamètres de l'écrêtage adaptatif qui resteraient inutilisées " -"lorsque celui-ci était désactivé. Une implémentation plus propre pourrait" -" être obtenue en divisant la fonctionnalité en deux décorateurs, " -":code:`DPFedAvgFixed` et :code:`DPFedAvgAdaptive`, le second sous-" -"classant le premier. Les constructeurs des deux classes acceptent un " -"paramètre booléen :code:`server_side_noising` qui, comme son nom " -"l'indique, détermine l'endroit où le noising doit être effectué." - -#: ../../source/explanation-differential-privacy.rst:54 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 -msgid "DPFedAvgFixed" -msgstr "DPFedAvgFixed" - -#: ../../source/explanation-differential-privacy.rst:56 -msgid "" -"The server-side capabilities required for the original version of DP-" -"FedAvg, i.e., the one which performed fixed clipping, can be completely " -"captured with the help of wrapper logic for just the following two " -"methods of the :code:`Strategy` abstract class." +#: ../../source/explanation-differential-privacy.rst:116 +msgid "In this explainer, we focus on two forms of achieving Local DP:" msgstr "" -"Les capacités côté serveur requises pour la version originale de DP-" -"FedAvg, c'est-à-dire celle qui effectue un écrêtage fixe, peuvent être " -"entièrement capturées à l'aide d'une logique d'enveloppement pour les " -"deux méthodes suivantes de la classe abstraite :code:`Strategy`." - -#: ../../source/explanation-differential-privacy.rst:58 -msgid "" -":code:`configure_fit()` : The config dictionary being sent by the wrapped" -" :code:`Strategy` to each client needs to be augmented with an additional" -" value equal to the clipping threshold (keyed under " -":code:`dpfedavg_clip_norm`) and, if :code:`server_side_noising=true`, " -"another one equal to the scale of the Gaussian noise that needs to be " -"added at the client (keyed under :code:`dpfedavg_noise_stddev`). This " -"entails *post*-processing of the results returned by the wrappee's " -"implementation of :code:`configure_fit()`." -msgstr "" -":code:`configure_fit()` : Le dictionnaire de configuration envoyé par la " -":code:`Strategy` enveloppée à chaque client doit être augmenté d'une " -"valeur supplémentaire égale au seuil d'écrêtage (indiqué sous " -":code:`dpfedavg_clip_norm`) et, si :code:`server_side_noising=true`, " -"d'une autre égale à l'échelle du bruit gaussien qui doit être ajouté au " -"client (indiqué sous :code:`dpfedavg_noise_stddev`)." - -#: ../../source/explanation-differential-privacy.rst:59 -#, fuzzy -msgid "" -":code:`aggregate_fit()`: We check whether any of the sampled clients " -"dropped out or failed to upload an update before the round timed out. In " -"that case, we need to abort the current round, discarding any successful " -"updates that were received, and move on to the next one. On the other " -"hand, if all clients responded successfully, we must force the averaging " -"of the updates to happen in an unweighted manner by intercepting the " -":code:`parameters` field of :code:`FitRes` for each received update and " -"setting it to 1. Furthermore, if :code:`server_side_noising=true`, each " -"update is perturbed with an amount of noise equal to what it would have " -"been subjected to had client-side noising being enabled. This entails " -"*pre*-processing of the arguments to this method before passing them on " -"to the wrappee's implementation of :code:`aggregate_fit()`." -msgstr "" -":code:`aggregate_fit()`: We check whether any of the sampled clients " -"dropped out or failed to upload an update before the round timed out. In " -"that case, we need to abort the current round, discarding any successful " -"updates that were received, and move on to the next one. On the other " -"hand, if all clients responded successfully, we must force the averaging " -"of the updates to happen in an unweighted manner by intercepting the " -":code:`parameters` field of :code:`FitRes` for each received update and " -"setting it to 1. Furthermore, if :code:`server_side_noising=true`, each " -"update is perturbed with an amount of noise equal to what it would have " -"been subjected to had client-side noising being enabled. This entails " -"*pre*-processing of the arguments to this method before passing them on " -"to the wrappee's implementation of :code:`aggregate_fit()`." - -#: ../../source/explanation-differential-privacy.rst:62 -msgid "" -"We can't directly change the aggregation function of the wrapped strategy" -" to force it to add noise to the aggregate, hence we simulate client-side" -" noising to implement server-side noising." -msgstr "" -"Nous ne pouvons pas modifier directement la fonction d'agrégation de la " -"stratégie enveloppée pour la forcer à ajouter du bruit à l'agrégat, c'est" -" pourquoi nous simulons le bruit côté client pour mettre en œuvre le " -"bruit côté serveur." - -#: ../../source/explanation-differential-privacy.rst:64 -msgid "" -"These changes have been put together into a class called " -":code:`DPFedAvgFixed`, whose constructor accepts the strategy being " -"decorated, the clipping threshold and the number of clients sampled every" -" round as compulsory arguments. The user is expected to specify the " -"clipping threshold since the order of magnitude of the update norms is " -"highly dependent on the model being trained and providing a default value" -" would be misleading. The number of clients sampled at every round is " -"required to calculate the amount of noise that must be added to each " -"individual update, either by the server or the clients." -msgstr "" -"Ces modifications ont été regroupées dans une classe appelée " -":code:`DPFedAvgFixed`, dont le constructeur accepte la stratégie décorée," -" le seuil d'écrêtage et le nombre de clients échantillonnés à chaque tour" -" comme arguments obligatoires. L'utilisateur est censé spécifier le seuil" -" d'écrêtage car l'ordre de grandeur des normes de mise à jour dépend " -"fortement du modèle formé et fournir une valeur par défaut serait " -"trompeur. Le nombre de clients échantillonnés à chaque tour est " -"nécessaire pour calculer la quantité de bruit qui doit être ajoutée à " -"chaque mise à jour individuelle, que ce soit par le serveur ou par les " -"clients." - -#: ../../source/explanation-differential-privacy.rst:67 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 -msgid "DPFedAvgAdaptive" -msgstr "DPFedAvgAdaptive" -#: ../../source/explanation-differential-privacy.rst:69 +#: ../../source/explanation-differential-privacy.rst:118 msgid "" -"The additional functionality required to facilitate adaptive clipping has" -" been provided in :code:`DPFedAvgAdaptive`, a subclass of " -":code:`DPFedAvgFixed`. It overrides the above-mentioned methods to do the" -" following." -msgstr "" -"La fonctionnalité supplémentaire nécessaire pour faciliter l'écrêtage " -"adaptatif a été fournie dans :code:`DPFedAvgAdaptive`, une sous-classe de" -" :code:`DPFedAvgFixed`. Elle remplace les méthodes mentionnées ci-dessus " -"pour effectuer les opérations suivantes." - -#: ../../source/explanation-differential-privacy.rst:71 -msgid "" -":code:`configure_fit()` : It intercepts the config dict returned by " -":code:`super.configure_fit()` to add the key-value pair " -":code:`dpfedavg_adaptive_clip_enabled:True` to it, which the client " -"interprets as an instruction to include an indicator bit (1 if update " -"norm <= clipping threshold, 0 otherwise) in the results returned by it." -msgstr "" -":code:`configure_fit()` : Il intercepte le dict de configuration renvoyé " -"par :code:`super.configure_fit()` pour y ajouter la paire clé-valeur " -":code:`dpfedavg_adaptive_clip_enabled:True`, que le client interprète " -"comme une instruction d'inclure un bit indicateur (1 si la norme de mise " -"à jour <= seuil d'écrêtage, 0 sinon) dans les résultats qu'il renvoie." - -#: ../../source/explanation-differential-privacy.rst:73 -msgid "" -":code:`aggregate_fit()` : It follows a call to " -":code:`super.aggregate_fit()` with one to :code:`__update_clip_norm__()`," -" a procedure which adjusts the clipping threshold on the basis of the " -"indicator bits received from the sampled clients." -msgstr "" -":code:`aggregate_fit()` : Il fait suivre un appel à " -":code:`super.aggregate_fit()` d'un appel à " -":code:`__update_clip_norm__()`, une procédure qui ajuste le seuil " -"d'écrêtage sur la base des bits indicateurs reçus des clients " -"échantillonnés." - -#: ../../source/explanation-differential-privacy.rst:77 -msgid "Client-side logic" -msgstr "Logique côté client" +"Each client adds noise to the local updates before sending them to the " +"server. To achieve (:math:`\\epsilon`, :math:`\\delta`)-DP, considering " +"the sensitivity of the local model to be ∆, Gaussian noise is applied " +"with a noise scale of σ where:" +msgstr "" -#: ../../source/explanation-differential-privacy.rst:79 +#: ../../source/explanation-differential-privacy.rst:120 msgid "" -"The client-side capabilities required can be completely captured through " -"wrapper logic for just the :code:`fit()` method of the " -":code:`NumPyClient` abstract class. To be precise, we need to *post-" -"process* the update computed by the wrapped client to clip it, if " -"necessary, to the threshold value supplied by the server as part of the " -"config dictionary. In addition to this, it may need to perform some extra" -" work if either (or both) of the following keys are also present in the " -"dict." +"\\small\n" +"\\frac{∆ \\times \\sqrt{2 \\times " +"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" +"\n" msgstr "" -"Les capacités requises côté client peuvent être entièrement capturées par" -" une logique de wrapper pour la seule méthode :code:`fit()` de la classe " -"abstraite :code:`NumPyClient`. Pour être précis, nous devons *post-" -"traiter* la mise à jour calculée par le client wrapped pour l'écrêter, si" -" nécessaire, à la valeur seuil fournie par le serveur dans le cadre du " -"dictionnaire de configuration. En plus de cela, il peut avoir besoin " -"d'effectuer un travail supplémentaire si l'une des clés suivantes (ou les" -" deux) est également présente dans le dict." -#: ../../source/explanation-differential-privacy.rst:81 +#: ../../source/explanation-differential-privacy.rst:125 msgid "" -":code:`dpfedavg_noise_stddev` : Generate and add the specified amount of " -"noise to the clipped update." +"Each client adds noise to the gradients of the model during the local " +"training (DP-SGD). More specifically, in this approach, gradients are " +"clipped and an amount of calibrated noise is injected into the gradients." msgstr "" -":code:`dpfedavg_noise_stddev` : Génère et ajoute la quantité de bruit " -"spécifiée à la mise à jour de l'écrêtage." -#: ../../source/explanation-differential-privacy.rst:82 +#: ../../source/explanation-differential-privacy.rst:128 msgid "" -":code:`dpfedavg_adaptive_clip_enabled` : Augment the metrics dict in the " -":code:`FitRes` object being returned to the server with an indicator bit," -" calculated as described earlier." +"Please note that these two approaches are providing privacy at different " +"levels." msgstr "" -":code:`dpfedavg_adaptive_clip_enabled` : Complète les métriques dict dans" -" l'objet :code:`FitRes` renvoyé au serveur avec un bit indicateur, " -"calculé comme décrit précédemment." -#: ../../source/explanation-differential-privacy.rst:86 -msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" -msgstr "Effectuer l'analyse :math:`(\\epsilon, \\delta)`" +#: ../../source/explanation-differential-privacy.rst:131 +#, fuzzy +msgid "**References:**" +msgstr "Référence" -#: ../../source/explanation-differential-privacy.rst:88 -msgid "" -"Assume you have trained for :math:`n` rounds with sampling fraction " -":math:`q` and noise multiplier :math:`z`. In order to calculate the " -":math:`\\epsilon` value this would result in for a particular " -":math:`\\delta`, the following script may be used." +#: ../../source/explanation-differential-privacy.rst:133 +msgid "[1] Dwork et al. The Algorithmic Foundations of Differential Privacy." msgstr "" -"Supposons que tu te sois entraîné pendant :math:`n` tours avec la " -"fraction d'échantillonnage :math:`q` et le multiplicateur de bruit " -":math:`z`. Afin de calculer la valeur :math:`epsilon` qui en résulterait " -"pour un :math:`\\delta` particulier, le script suivant peut être utilisé." -#: ../../source/explanation-differential-privacy.rst:98 +#: ../../source/explanation-differential-privacy.rst:135 #, fuzzy msgid "" -"McMahan et al. \"Learning Differentially Private Recurrent Language " -"Models.\" International Conference on Learning Representations (ICLR), " -"2017." +"[2] McMahan et al. Learning Differentially Private Recurrent Language " +"Models." msgstr "" "McMahan, H. Brendan, et al. \"Learning differentially private recurrent " "language models\", arXiv preprint arXiv:1710.06963 (2017)." -#: ../../source/explanation-differential-privacy.rst:100 -#, fuzzy +#: ../../source/explanation-differential-privacy.rst:137 msgid "" -"Andrew, Galen, et al. \"Differentially Private Learning with Adaptive " -"Clipping.\" Advances in Neural Information Processing Systems (NeurIPS), " -"2021." +"[3] Geyer et al. Differentially Private Federated Learning: A Client " +"Level Perspective." +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:139 +#, fuzzy +msgid "[4] Galen et al. Differentially Private Learning with Adaptive Clipping." msgstr "" "Andrew, Galen, et al. \"Differentially private learning with adaptive " "clipping\" Advances in Neural Information Processing Systems 34 (2021) : " @@ -5161,6 +4653,7 @@ msgid "As a reference, this document follows the above structure." msgstr "À titre de référence, ce document suit la structure ci-dessus." #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 +#: ../../source/ref-api/flwr.common.Metadata.rst:2 msgid "Metadata" msgstr "Métadonnées" @@ -5598,13 +5091,12 @@ msgstr "" #, fuzzy msgid "" "This can be achieved by customizing an existing strategy or by " -"`implementing a custom strategy from scratch " -"`_. " -"Here's a nonsensical example that customizes :code:`FedAvg` by adding a " -"custom ``\"hello\": \"world\"`` configuration key/value pair to the " -"config dict of a *single client* (only the first client in the list, the " -"other clients in this round to not receive this \"special\" config " -"value):" +":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes :code:`FedAvg`" +" by adding a custom ``\"hello\": \"world\"`` configuration key/value pair" +" to the config dict of a *single client* (only the first client in the " +"list, the other clients in this round to not receive this \"special\" " +"config value):" msgstr "" "Ceci peut être réalisé en personnalisant une stratégie existante ou en " "`mettant en œuvre une stratégie personnalisée à partir de zéro " @@ -6048,11 +5540,12 @@ msgstr "" "modèle global actuel :code:`parameters` et :code:`config` dict" #: ../../source/how-to-implement-strategies.rst:236 +#, fuzzy msgid "" "More sophisticated implementations can use :code:`configure_fit` to " "implement custom client selection logic. A client will only participate " "in a round if the corresponding :code:`ClientProxy` is included in the " -"the list returned from :code:`configure_fit`." +"list returned from :code:`configure_fit`." msgstr "" "Les implémentations plus sophistiquées peuvent utiliser " ":code:`configure_fit` pour mettre en œuvre une logique de sélection des " @@ -6154,11 +5647,12 @@ msgstr "" "le modèle global actuel :code:`parameters` et :code:`config` dict" #: ../../source/how-to-implement-strategies.rst:283 +#, fuzzy msgid "" "More sophisticated implementations can use :code:`configure_evaluate` to " "implement custom client selection logic. A client will only participate " "in a round if the corresponding :code:`ClientProxy` is included in the " -"the list returned from :code:`configure_evaluate`." +"list returned from :code:`configure_evaluate`." msgstr "" "Les implémentations plus sophistiquées peuvent utiliser " ":code:`configure_evaluate` pour mettre en œuvre une logique de sélection " @@ -6334,9 +5828,7 @@ msgid "Install via Docker" msgstr "Installer Flower" #: ../../source/how-to-install-flower.rst:60 -msgid "" -"`How to run Flower using Docker `_" +msgid ":doc:`How to run Flower using Docker `" msgstr "" #: ../../source/how-to-install-flower.rst:63 @@ -6689,17 +6181,17 @@ msgid "Resources" msgstr "Ressources" #: ../../source/how-to-monitor-simulation.rst:234 +#, fuzzy msgid "" -"Ray Dashboard: ``_" +"Ray Dashboard: ``_" msgstr "" "Tableau de bord Ray : ``_" #: ../../source/how-to-monitor-simulation.rst:236 -msgid "" -"Ray Metrics: ``_" +#, fuzzy +msgid "Ray Metrics: ``_" msgstr "" "Ray Metrics : ``_" @@ -7695,7 +7187,8 @@ msgstr "" msgid "" "Remove \"placeholder\" methods from subclasses of ``Client`` or " "``NumPyClient``. If you, for example, use server-side evaluation, then " -"empty placeholder implementations of ``evaluate`` are no longer necessary." +"empty placeholder implementations of ``evaluate`` are no longer " +"necessary." msgstr "" "Supprime les méthodes \"placeholder\" des sous-classes de ``Client`` ou " "de ``NumPyClient``. Si tu utilises, par exemple, l'évaluation côté " @@ -7848,23 +7341,173 @@ msgid "" msgstr "" #: ../../source/how-to-use-built-in-mods.rst:89 -msgid "Enjoy building more robust and flexible ``ClientApp``s with mods!" +msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" msgstr "" -#: ../../source/how-to-use-strategies.rst:2 +#: ../../source/how-to-use-differential-privacy.rst:2 #, fuzzy -msgid "Use strategies" -msgstr "Stratégies personnalisées" +msgid "Use Differential Privacy" +msgstr "Confidentialité différentielle" -#: ../../source/how-to-use-strategies.rst:4 +#: ../../source/how-to-use-differential-privacy.rst:3 msgid "" -"Flower allows full customization of the learning process through the " -":code:`Strategy` abstraction. A number of built-in strategies are " -"provided in the core framework." +"This guide explains how you can utilize differential privacy in the " +"Flower framework. If you are not yet familiar with differential privacy, " +"you can refer to :doc:`explanation-differential-privacy`." msgstr "" -"Flower permet une personnalisation complète du processus d'apprentissage " -"grâce à l'abstraction :code:`Stratégie`. Un certain nombre de stratégies " -"intégrées sont fournies dans le cadre principal." + +#: ../../source/how-to-use-differential-privacy.rst:7 +msgid "" +"Differential Privacy in Flower is in a preview phase. If you plan to use " +"these features in a production environment with sensitive data, feel free" +" contact us to discuss your requirements and to receive guidance on how " +"to best use these features." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:12 +msgid "" +"This approach consists of two seprate phases: clipping of the updates and" +" adding noise to the aggregated model. For the clipping phase, Flower " +"framework has made it possible to decide whether to perform clipping on " +"the server side or the client side." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:15 +msgid "" +"**Server-side Clipping**: This approach has the advantage of the server " +"enforcing uniform clipping across all clients' updates and reducing the " +"communication overhead for clipping values. However, it also has the " +"disadvantage of increasing the computational load on the server due to " +"the need to perform the clipping operation for all clients." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:16 +msgid "" +"**Client-side Clipping**: This approach has the advantage of reducing the" +" computational overhead on the server. However, it also has the " +"disadvantage of lacking centralized control, as the server has less " +"control over the clipping process." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:21 +#, fuzzy +msgid "Server-side Clipping" +msgstr "Logique côté serveur" + +#: ../../source/how-to-use-differential-privacy.rst:22 +msgid "" +"For central DP with server-side clipping, there are two :code:`Strategy` " +"classes that act as wrappers around the actual :code:`Strategy` instance " +"(for example, :code:`FedAvg`). The two wrapper classes are " +":code:`DifferentialPrivacyServerSideFixedClipping` and " +":code:`DifferentialPrivacyServerSideAdaptiveClipping` for fixed and " +"adaptive clipping." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:-1 +#, fuzzy +msgid "server side clipping" +msgstr "Logique côté serveur" + +#: ../../source/how-to-use-differential-privacy.rst:31 +msgid "" +"The code sample below enables the :code:`FedAvg` strategy to use server-" +"side fixed clipping using the " +":code:`DifferentialPrivacyServerSideFixedClipping` wrapper class. The " +"same approach can be used with " +":code:`DifferentialPrivacyServerSideAdaptiveClipping` by adjusting the " +"corresponding input parameters." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:52 +#, fuzzy +msgid "Client-side Clipping" +msgstr "Logique côté client" + +#: ../../source/how-to-use-differential-privacy.rst:53 +msgid "" +"For central DP with client-side clipping, the server sends the clipping " +"value to selected clients on each round. Clients can use existing Flower " +":code:`Mods` to perform the clipping. Two mods are available for fixed " +"and adaptive client-side clipping: :code:`fixedclipping_mod` and " +":code:`adaptiveclipping_mod` with corresponding server-side wrappers " +":code:`DifferentialPrivacyClientSideFixedClipping` and " +":code:`DifferentialPrivacyClientSideAdaptiveClipping`." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:-1 +#, fuzzy +msgid "client side clipping" +msgstr "Logique côté client" + +#: ../../source/how-to-use-differential-privacy.rst:63 +msgid "" +"The code sample below enables the :code:`FedAvg` strategy to use " +"differential privacy with client-side fixed clipping using both the " +":code:`DifferentialPrivacyClientSideFixedClipping` wrapper class and, on " +"the client, :code:`fixedclipping_mod`:" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:80 +msgid "" +"In addition to the server-side strategy wrapper, the :code:`ClientApp` " +"needs to configure the matching :code:`fixedclipping_mod` to perform the " +"client-side clipping:" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:97 +msgid "" +"To utilize local differential privacy (DP) and add noise to the client " +"model parameters before transmitting them to the server in Flower, you " +"can use the `LocalDpMod`. The following hyperparameters need to be set: " +"clipping norm value, sensitivity, epsilon, and delta." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:-1 +msgid "local DP mod" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:104 +msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:122 +msgid "" +"Please note that the order of mods, especially those that modify " +"parameters, is important when using multiple modifiers. Typically, " +"differential privacy (DP) modifiers should be the last to operate on " +"parameters." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:125 +msgid "Local Training using Privacy Engines" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:126 +msgid "" +"For ensuring data instance-level privacy during local model training on " +"the client side, consider leveraging privacy engines such as Opacus and " +"TensorFlow Privacy. For examples of using Flower with these engines, " +"please refer to the Flower examples directory (`Opacus " +"`_, `Tensorflow" +" Privacy `_)." +msgstr "" + +#: ../../source/how-to-use-strategies.rst:2 +#, fuzzy +msgid "Use strategies" +msgstr "Stratégies personnalisées" + +#: ../../source/how-to-use-strategies.rst:4 +msgid "" +"Flower allows full customization of the learning process through the " +":code:`Strategy` abstraction. A number of built-in strategies are " +"provided in the core framework." +msgstr "" +"Flower permet une personnalisation complète du processus d'apprentissage " +"grâce à l'abstraction :code:`Stratégie`. Un certain nombre de stratégies " +"intégrées sont fournies dans le cadre principal." #: ../../source/how-to-use-strategies.rst:6 msgid "" @@ -8004,11 +7647,11 @@ msgstr "Quickstart tutorials" msgid "How-to guides" msgstr "Guides" -#: ../../source/index.rst:97 +#: ../../source/index.rst:98 msgid "Legacy example guides" msgstr "" -#: ../../source/index.rst:108 ../../source/index.rst:112 +#: ../../source/index.rst:109 ../../source/index.rst:113 msgid "Explanations" msgstr "Explications" @@ -8016,26 +7659,26 @@ msgstr "Explications" msgid "API reference" msgstr "Référence pour l'API" -#: ../../source/index.rst:137 +#: ../../source/index.rst:138 msgid "Reference docs" msgstr "Référence pour la documentation" -#: ../../source/index.rst:153 +#: ../../source/index.rst:154 #, fuzzy msgid "Contributor tutorials" msgstr "Configuration du contributeur" -#: ../../source/index.rst:160 +#: ../../source/index.rst:161 #, fuzzy msgid "Contributor how-to guides" msgstr "Guide pour les contributeurs" -#: ../../source/index.rst:173 +#: ../../source/index.rst:174 #, fuzzy msgid "Contributor explanations" msgstr "Explications" -#: ../../source/index.rst:179 +#: ../../source/index.rst:180 #, fuzzy msgid "Contributor references" msgstr "Configuration du contributeur" @@ -8144,7 +7787,7 @@ msgstr "" "Guides orientés sur la résolutions étapes par étapes de problèmes ou " "objectifs specifiques." -#: ../../source/index.rst:110 +#: ../../source/index.rst:111 msgid "" "Understanding-oriented concept guides explain and discuss key topics and " "underlying ideas behind Flower and collaborative AI." @@ -8152,29 +7795,29 @@ msgstr "" "Guides orientés sur la compréhension et l'explication des sujets et idées" " de fonds sur lesquels sont construits Flower et l'IA collaborative." -#: ../../source/index.rst:120 +#: ../../source/index.rst:121 #, fuzzy msgid "References" msgstr "Référence" -#: ../../source/index.rst:122 +#: ../../source/index.rst:123 msgid "Information-oriented API reference and other reference material." msgstr "Référence de l'API orientée sur l'information pure." -#: ../../source/index.rst:131::1 +#: ../../source/index.rst:132::1 msgid ":py:obj:`flwr `\\" msgstr "" -#: ../../source/index.rst:131::1 flwr:1 of +#: ../../source/index.rst:132::1 flwr:1 of msgid "Flower main package." msgstr "" -#: ../../source/index.rst:148 +#: ../../source/index.rst:149 #, fuzzy msgid "Contributor docs" msgstr "Configuration du contributeur" -#: ../../source/index.rst:150 +#: ../../source/index.rst:151 #, fuzzy msgid "" "The Flower community welcomes contributions. The following docs are " @@ -8201,12 +7844,22 @@ msgstr "flower-driver-api" msgid "flower-fleet-api" msgstr "flower-fleet-api" +#: ../../source/ref-api-cli.rst:37 +#, fuzzy +msgid "flower-client-app" +msgstr "Flower ClientApp." + +#: ../../source/ref-api-cli.rst:47 +#, fuzzy +msgid "flower-server-app" +msgstr "flower-driver-api" + #: ../../source/ref-api/flwr.rst:2 #, fuzzy msgid "flwr" msgstr "Fleur" -#: ../../source/ref-api/flwr.rst:25 ../../source/ref-api/flwr.server.rst:48 +#: ../../source/ref-api/flwr.rst:25 ../../source/ref-api/flwr.server.rst:52 msgid "Modules" msgstr "" @@ -8232,7 +7885,7 @@ msgid ":py:obj:`flwr.server `\\" msgstr "" #: ../../source/ref-api/flwr.rst:35::1 -#: ../../source/ref-api/flwr.server.rst:37::1 flwr.server:1 +#: ../../source/ref-api/flwr.server.rst:41::1 flwr.server:1 #: flwr.server.server.Server:1 of #, fuzzy msgid "Flower server." @@ -8253,7 +7906,6 @@ msgstr "client" #: ../../source/ref-api/flwr.client.rst:13 #: ../../source/ref-api/flwr.common.rst:13 -#: ../../source/ref-api/flwr.server.driver.rst:13 #: ../../source/ref-api/flwr.server.rst:13 #: ../../source/ref-api/flwr.simulation.rst:13 #, fuzzy @@ -8293,10 +7945,10 @@ msgid "Start a Flower NumPyClient which connects to a gRPC server." msgstr "" #: ../../source/ref-api/flwr.client.rst:26 -#: ../../source/ref-api/flwr.common.rst:31 -#: ../../source/ref-api/flwr.server.driver.rst:24 -#: ../../source/ref-api/flwr.server.rst:28 +#: ../../source/ref-api/flwr.common.rst:32 +#: ../../source/ref-api/flwr.server.rst:29 #: ../../source/ref-api/flwr.server.strategy.rst:17 +#: ../../source/ref-api/flwr.server.workflow.rst:17 msgid "Classes" msgstr "" @@ -8311,7 +7963,7 @@ msgstr "" #: ../../source/ref-api/flwr.client.rst:33::1 msgid "" -":py:obj:`ClientApp `\\ \\(client\\_fn\\[\\, " +":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " "mods\\]\\)" msgstr "" @@ -8339,8 +7991,12 @@ msgstr "" #: ../../source/ref-api/flwr.client.Client.rst:15 #: ../../source/ref-api/flwr.client.ClientApp.rst:15 #: ../../source/ref-api/flwr.client.NumPyClient.rst:15 +#: ../../source/ref-api/flwr.common.Array.rst:15 #: ../../source/ref-api/flwr.common.ClientMessage.rst:15 +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:15 +#: ../../source/ref-api/flwr.common.Context.rst:15 #: ../../source/ref-api/flwr.common.DisconnectRes.rst:15 +#: ../../source/ref-api/flwr.common.Error.rst:15 #: ../../source/ref-api/flwr.common.EvaluateIns.rst:15 #: ../../source/ref-api/flwr.common.EvaluateRes.rst:15 #: ../../source/ref-api/flwr.common.FitIns.rst:15 @@ -8349,20 +8005,32 @@ msgstr "" #: ../../source/ref-api/flwr.common.GetParametersRes.rst:15 #: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:15 #: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:15 +#: ../../source/ref-api/flwr.common.Message.rst:15 +#: ../../source/ref-api/flwr.common.MessageType.rst:15 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:15 +#: ../../source/ref-api/flwr.common.Metadata.rst:15 +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:15 #: ../../source/ref-api/flwr.common.Parameters.rst:15 +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:15 #: ../../source/ref-api/flwr.common.ReconnectIns.rst:15 +#: ../../source/ref-api/flwr.common.RecordSet.rst:15 #: ../../source/ref-api/flwr.common.ServerMessage.rst:15 #: ../../source/ref-api/flwr.common.Status.rst:15 #: ../../source/ref-api/flwr.server.ClientManager.rst:15 +#: ../../source/ref-api/flwr.server.Driver.rst:15 #: ../../source/ref-api/flwr.server.History.rst:15 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:15 #: ../../source/ref-api/flwr.server.Server.rst:15 +#: ../../source/ref-api/flwr.server.ServerApp.rst:15 #: ../../source/ref-api/flwr.server.ServerConfig.rst:15 #: ../../source/ref-api/flwr.server.SimpleClientManager.rst:15 -#: ../../source/ref-api/flwr.server.driver.Driver.rst:15 -#: ../../source/ref-api/flwr.server.driver.GrpcDriver.rst:15 #: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:15 #: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:15 #: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:15 #: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:15 #: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:15 #: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:15 @@ -8380,6 +8048,9 @@ msgstr "" #: ../../source/ref-api/flwr.server.strategy.Krum.rst:15 #: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:15 #: ../../source/ref-api/flwr.server.strategy.Strategy.rst:15 +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 msgid "Methods" msgstr "" @@ -8459,9 +8130,12 @@ msgstr "" #: ../../source/ref-api/flwr.client.Client.rst:46 #: ../../source/ref-api/flwr.client.NumPyClient.rst:46 +#: ../../source/ref-api/flwr.common.Array.rst:28 #: ../../source/ref-api/flwr.common.ClientMessage.rst:25 #: ../../source/ref-api/flwr.common.Code.rst:19 +#: ../../source/ref-api/flwr.common.Context.rst:25 #: ../../source/ref-api/flwr.common.DisconnectRes.rst:25 +#: ../../source/ref-api/flwr.common.Error.rst:25 #: ../../source/ref-api/flwr.common.EvaluateIns.rst:25 #: ../../source/ref-api/flwr.common.EvaluateRes.rst:25 #: ../../source/ref-api/flwr.common.EventType.rst:19 @@ -8471,10 +8145,16 @@ msgstr "" #: ../../source/ref-api/flwr.common.GetParametersRes.rst:25 #: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:25 #: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:25 +#: ../../source/ref-api/flwr.common.Message.rst:37 +#: ../../source/ref-api/flwr.common.MessageType.rst:25 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:25 +#: ../../source/ref-api/flwr.common.Metadata.rst:25 #: ../../source/ref-api/flwr.common.Parameters.rst:25 #: ../../source/ref-api/flwr.common.ReconnectIns.rst:25 +#: ../../source/ref-api/flwr.common.RecordSet.rst:25 #: ../../source/ref-api/flwr.common.ServerMessage.rst:25 #: ../../source/ref-api/flwr.common.Status.rst:25 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:25 #: ../../source/ref-api/flwr.server.ServerConfig.rst:25 msgid "Attributes" msgstr "" @@ -8492,14 +8172,25 @@ msgstr "" #: flwr.client.numpy_client.NumPyClient.fit #: flwr.client.numpy_client.NumPyClient.get_parameters #: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.server.app.start_server +#: flwr.common.context.Context flwr.common.message.Error +#: flwr.common.message.Message flwr.common.message.Message.create_error_reply +#: flwr.common.message.Message.create_reply flwr.common.message.Metadata +#: flwr.common.record.parametersrecord.Array flwr.server.app.start_server #: flwr.server.client_manager.ClientManager.register #: flwr.server.client_manager.ClientManager.unregister #: flwr.server.client_manager.SimpleClientManager.register #: flwr.server.client_manager.SimpleClientManager.unregister #: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.app.start_driver flwr.server.driver.driver.Driver +#: flwr.server.compat.app.start_driver flwr.server.driver.driver.Driver +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive #: flwr.server.strategy.bulyan.Bulyan +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit #: flwr.server.strategy.fedadagrad.FedAdagrad @@ -8515,7 +8206,10 @@ msgstr "" #: flwr.server.strategy.strategy.Strategy.configure_fit #: flwr.server.strategy.strategy.Strategy.evaluate #: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.simulation.app.start_simulation of +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow +#: flwr.simulation.app.start_simulation +#: flwr.simulation.run_simulation.run_simulation of #, fuzzy msgid "Parameters" msgstr "Paramètres du modèle." @@ -8534,13 +8228,17 @@ msgstr "" #: flwr.client.numpy_client.NumPyClient.fit #: flwr.client.numpy_client.NumPyClient.get_parameters #: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.server.app.start_server +#: flwr.common.message.Message.create_reply flwr.server.app.start_server #: flwr.server.client_manager.ClientManager.num_available #: flwr.server.client_manager.ClientManager.register #: flwr.server.client_manager.SimpleClientManager.num_available #: flwr.server.client_manager.SimpleClientManager.register #: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.app.start_driver +#: flwr.server.compat.app.start_driver +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit #: flwr.server.strategy.strategy.Strategy.aggregate_evaluate @@ -8565,13 +8263,17 @@ msgstr "" #: flwr.client.client.Client.get_properties #: flwr.client.numpy_client.NumPyClient.get_parameters #: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.server.app.start_server +#: flwr.common.message.Message.create_reply flwr.server.app.start_server #: flwr.server.client_manager.ClientManager.num_available #: flwr.server.client_manager.ClientManager.register #: flwr.server.client_manager.SimpleClientManager.num_available #: flwr.server.client_manager.SimpleClientManager.register #: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.app.start_driver +#: flwr.server.compat.app.start_driver +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit #: flwr.server.strategy.strategy.Strategy.aggregate_evaluate @@ -8623,23 +8325,38 @@ msgstr "" msgid "ClientApp" msgstr "client" -#: flwr.client.client_app.ClientApp:1 flwr.common.typing.ClientMessage:1 +#: flwr.client.client_app.ClientApp:1 flwr.common.constant.MessageType:1 +#: flwr.common.constant.MessageTypeLegacy:1 flwr.common.context.Context:1 +#: flwr.common.message.Error:1 flwr.common.message.Message:1 +#: flwr.common.message.Metadata:1 flwr.common.record.parametersrecord.Array:1 +#: flwr.common.record.recordset.RecordSet:1 flwr.common.typing.ClientMessage:1 #: flwr.common.typing.DisconnectRes:1 flwr.common.typing.EvaluateIns:1 #: flwr.common.typing.EvaluateRes:1 flwr.common.typing.FitIns:1 #: flwr.common.typing.FitRes:1 flwr.common.typing.GetParametersIns:1 #: flwr.common.typing.GetParametersRes:1 flwr.common.typing.GetPropertiesIns:1 #: flwr.common.typing.GetPropertiesRes:1 flwr.common.typing.Parameters:1 #: flwr.common.typing.ReconnectIns:1 flwr.common.typing.ServerMessage:1 -#: flwr.common.typing.Status:1 flwr.server.app.ServerConfig:1 -#: flwr.server.driver.driver.Driver:1 -#: flwr.server.driver.grpc_driver.GrpcDriver:1 flwr.server.history.History:1 -#: flwr.server.server.Server:1 of +#: flwr.common.typing.Status:1 flwr.server.driver.driver.Driver:1 +#: flwr.server.history.History:1 flwr.server.server.Server:1 +#: flwr.server.server_app.ServerApp:1 flwr.server.server_config.ServerConfig:1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of msgid "Bases: :py:class:`object`" msgstr "" -#: flwr.client.app.start_client:33 flwr.client.app.start_numpy_client:36 -#: flwr.client.client_app.ClientApp:4 flwr.server.app.start_server:41 -#: flwr.server.driver.app.start_driver:30 of +#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 +#: flwr.client.client_app.ClientApp:4 +#: flwr.client.client_app.ClientApp.evaluate:4 +#: flwr.client.client_app.ClientApp.query:4 +#: flwr.client.client_app.ClientApp.train:4 flwr.server.app.start_server:41 +#: flwr.server.compat.app.start_driver:32 flwr.server.server_app.ServerApp:4 +#: flwr.server.server_app.ServerApp.main:4 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:29 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:21 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:14 +#: of #, fuzzy msgid "Examples" msgstr "Exemples de PyTorch" @@ -8663,6 +8380,34 @@ msgid "" "global attribute `app` that points to an object of type `ClientApp`." msgstr "" +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`evaluate `\\ \\(\\)" +msgstr "" + +#: flwr.client.client_app.ClientApp.evaluate:1 +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid "Return a decorator that registers the evaluate fn with the client app." +msgstr "" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`query `\\ \\(\\)" +msgstr "" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.query:1 of +msgid "Return a decorator that registers the query fn with the client app." +msgstr "" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +#, fuzzy +msgid ":py:obj:`train `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.train:1 of +msgid "Return a decorator that registers the train fn with the client app." +msgstr "" + #: ../../source/ref-api/flwr.client.NumPyClient.rst:2 msgid "NumPyClient" msgstr "NumPyClient" @@ -8866,7 +8611,7 @@ msgid "" msgstr "" #: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 -#: flwr.server.driver.app.start_driver:21 of +#: flwr.server.compat.app.start_driver:21 of msgid "" "The PEM-encoded root certificates as a byte string or a path string. If " "provided, a secure connection using the certificates will be established " @@ -8886,15 +8631,29 @@ msgid "" "(experimental) - 'rest': HTTP (experimental)" msgstr "" -#: flwr.client.app.start_client:34 flwr.client.app.start_numpy_client:37 of +#: flwr.client.app.start_client:31 of +msgid "" +"The maximum number of times the client will try to connect to the server " +"before giving up in case of a connection error. If set to None, there is " +"no limit to the number of tries." +msgstr "" + +#: flwr.client.app.start_client:35 of +msgid "" +"The maximum duration before the client stops trying to connect to the " +"server in case of connection error. If set to None, there is no limit to " +"the total time." +msgstr "" + +#: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of msgid "Starting a gRPC client with an insecure server connection:" msgstr "" -#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:44 of +#: flwr.client.app.start_client:49 flwr.client.app.start_numpy_client:44 of msgid "Starting an SSL-enabled gRPC client using system certificates:" msgstr "" -#: flwr.client.app.start_client:52 flwr.client.app.start_numpy_client:52 of +#: flwr.client.app.start_client:60 flwr.client.app.start_numpy_client:52 of msgid "Starting an SSL-enabled gRPC client using provided certificates:" msgstr "" @@ -8919,77 +8678,87 @@ msgstr "" msgid "common" msgstr "commun" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.record.conversion_utils.array_from_numpy:1 of +#, fuzzy +msgid "Create Array from NumPy ndarray." +msgstr "Convertit l'objet des paramètres en ndarrays NumPy." + +#: ../../source/ref-api/flwr.common.rst:30::1 msgid ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.parameter.bytes_to_ndarray:1 of msgid "Deserialize NumPy ndarray from bytes." msgstr "Désérialise le tableau numérique NumPy à partir d'octets." -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" ":py:obj:`configure `\\ \\(identifier\\[\\, " "filename\\, host\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.logger.configure:1 of msgid "Configure logging to file and/or remote log server." msgstr "" "Configure la journalisation vers un fichier et/ou un serveur de " "journalisation distant." -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" ":py:obj:`event `\\ \\(event\\_type\\[\\, " "event\\_details\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.telemetry.event:1 of msgid "Submit create_event to ThreadPoolExecutor to avoid blocking." msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" ":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " "\\*\\*kwargs\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 logging.Logger.log:1 +#: ../../source/ref-api/flwr.common.rst:30::1 logging.Logger.log:1 #: of msgid "Log 'msg % args' with the integer severity 'level'." msgstr "Enregistre 'msg % args' avec le niveau de sévérité entier 'level'." -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.parameter.ndarray_to_bytes:1 of msgid "Serialize NumPy ndarray to bytes." msgstr "Sérialise le tableau numérique NumPy en octets." -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid ":py:obj:`now `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.date.now:1 of msgid "Construct a datetime from time.time() with time zone set to UTC." msgstr "" "Construit une date à partir de time.time() avec le fuseau horaire réglé " "sur UTC." -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" ":py:obj:`ndarrays_to_parameters `\\ " "\\(ndarrays\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.parameter.ndarrays_to_parameters:1 #: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 #: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarrays_to_parameters:1 @@ -8997,191 +8766,372 @@ msgstr "" msgid "Convert NumPy ndarrays to parameters object." msgstr "Convertit les ndarrays NumPy en objets de paramètres." -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" ":py:obj:`parameters_to_ndarrays `\\ " "\\(parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.parameter.parameters_to_ndarrays:1 of msgid "Convert parameters object to NumPy ndarrays." msgstr "Convertit l'objet des paramètres en ndarrays NumPy." -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " +"data\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.record.parametersrecord.Array:1 of +msgid "Array type." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`ClientMessage `\\ " "\\(\\[get\\_properties\\_res\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.ClientMessage:1 of msgid "ClientMessage is a container used to hold one result message." msgstr "" "ClientMessage est un conteneur utilisé pour contenir un message de " "résultat." -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`Code `\\ \\(value\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.Code:1 of msgid "Client status codes." msgstr "Codes d'état du client." -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +#, fuzzy +msgid "" +":py:obj:`ConfigsRecord `\\ " +"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" +msgstr "" +"Flower 1.0 : ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.record.configsrecord.ConfigsRecord:1 of +#, fuzzy +msgid "Configs record." +msgstr "Configurer les clients" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`Context `\\ \\(state\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.context.Context:1 of +msgid "State of your run." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`DisconnectRes `\\ \\(reason\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.DisconnectRes:1 of msgid "DisconnectRes message from client to server." msgstr "Message DisconnectRes envoyé par le client au serveur." -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`EvaluateIns `\\ \\(parameters\\, " "config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.EvaluateIns:1 of msgid "Evaluate instructions for a client." msgstr "Évaluer les instructions pour un client." -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " "num\\_examples\\, metrics\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.EvaluateRes:1 of msgid "Evaluate response from a client." msgstr "Évaluer la réponse d'un client." -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`EventType `\\ \\(value\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.telemetry.EventType:1 of msgid "Types of telemetry events." msgstr "Types d'événements télémétriques." -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.FitIns:1 of msgid "Fit instructions for a client." msgstr "Instructions d'ajustement pour un client." -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`FitRes `\\ \\(status\\, parameters\\, " "num\\_examples\\, metrics\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.FitRes:1 of msgid "Fit response from a client." msgstr "Réponse adaptée d'un client." -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.message.Error:1 of +msgid "A dataclass that stores information about an error that occurred." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`GetParametersIns `\\ \\(config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.GetParametersIns:1 of msgid "Parameters request for a client." msgstr "Demande de paramètres pour un client." -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`GetParametersRes `\\ \\(status\\, " "parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.GetParametersRes:1 of msgid "Response when asked to return parameters." msgstr "Réponse lorsqu'on te demande de renvoyer des paramètres." -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`GetPropertiesIns `\\ \\(config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.GetPropertiesIns:1 of msgid "Properties request for a client." msgstr "Demande de propriétés pour un client." -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`GetPropertiesRes `\\ \\(status\\, " "properties\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.GetPropertiesRes:1 of msgid "Properties response from a client." msgstr "Réponse des propriétés d'un client." -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " +"error\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.message.Message:1 of +msgid "State of your application from the viewpoint of the entity using it." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`MessageType `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.constant.MessageType:1 of +msgid "Message type." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`MessageTypeLegacy `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.constant.MessageTypeLegacy:1 of +msgid "Legacy message type." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`Metadata `\\ \\(run\\_id\\, " +"message\\_id\\, src\\_node\\_id\\, ...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.message.Metadata:1 of +msgid "A dataclass holding metadata associated with the current message." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`MetricsRecord `\\ " +"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.record.metricsrecord.MetricsRecord:1 of +msgid "Metrics record." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`NDArray `\\" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" "alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " ":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`Parameters `\\ \\(tensors\\, " "tensor\\_type\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.Parameters:1 of msgid "Model parameters." msgstr "Paramètres du modèle." -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`ParametersRecord `\\ " +"\\(\\[array\\_dict\\, keep\\_input\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.record.parametersrecord.ParametersRecord:1 of +#, fuzzy +msgid "Parameters record." +msgstr "Paramètres du modèle." + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`ReconnectIns `\\ \\(seconds\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.ReconnectIns:1 of msgid "ReconnectIns message from server to client." msgstr "Message de reconnexion du serveur au client." -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`RecordSet `\\ " +"\\(\\[parameters\\_records\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.record.recordset.RecordSet:1 of +msgid "RecordSet stores groups of parameters, metrics and configs." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`ServerMessage `\\ " "\\(\\[get\\_properties\\_ins\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.ServerMessage:1 of msgid "ServerMessage is a container used to hold one instruction message." msgstr "" "ServerMessage est un conteneur utilisé pour contenir un message " "d'instruction." -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`Status `\\ \\(code\\, message\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.Status:1 of msgid "Client status." msgstr "Statut du client." +#: ../../source/ref-api/flwr.common.Array.rst:2 +msgid "Array" +msgstr "" + +#: flwr.common.record.parametersrecord.Array:3 of +msgid "" +"A dataclass containing serialized data from an array-like or tensor-like " +"object along with some metadata about it." +msgstr "" + +#: flwr.common.record.parametersrecord.Array:6 of +msgid "" +"A string representing the data type of the serialised object (e.g. " +"`np.float32`)" +msgstr "" + +#: flwr.common.record.parametersrecord.Array:8 of +msgid "" +"A list representing the shape of the unserialized array-like object. This" +" is used to deserialize the data (depending on the serialization method) " +"or simply as a metadata field." +msgstr "" + +#: flwr.common.record.parametersrecord.Array:12 of +msgid "" +"A string indicating the type of serialisation mechanism used to generate " +"the bytes in `data` from an array-like or tensor-like object." +msgstr "" + +#: flwr.common.record.parametersrecord.Array:15 of +msgid "A buffer of bytes containing the data." +msgstr "" + +#: ../../source/ref-api/flwr.common.Array.rst:26::1 +#, fuzzy +msgid ":py:obj:`numpy `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.Array.rst:26::1 +#: flwr.common.record.parametersrecord.Array.numpy:1 of +#, fuzzy +msgid "Return the array as a NumPy array." +msgstr "renvoie le poids du modèle sous la forme d'une liste de ndarrays NumPy" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`dtype `\\" +msgstr "" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +#, fuzzy +msgid ":py:obj:`shape `\\" +msgstr "serveur.stratégie.Stratégie" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +#, fuzzy +msgid ":py:obj:`stype `\\" +msgstr "serveur.stratégie.Stratégie" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`data `\\" +msgstr "" + #: ../../source/ref-api/flwr.common.ClientMessage.rst:2 #, fuzzy msgid "ClientMessage" @@ -9241,6 +9191,106 @@ msgid "" "`\\" msgstr "" +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:2 +#, fuzzy +msgid "ConfigsRecord" +msgstr "Configurer les clients" + +#: flwr.common.record.configsrecord.ConfigsRecord:1 of +msgid "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:obj:`~typing.Union`\\ [:py:class:`int`, " +":py:class:`float`, :py:class:`str`, :py:class:`bytes`, :py:class:`bool`, " +":py:class:`~typing.List`\\ [:py:class:`int`], :py:class:`~typing.List`\\ " +"[:py:class:`float`], :py:class:`~typing.List`\\ [:py:class:`str`], " +":py:class:`~typing.List`\\ [:py:class:`bytes`], " +":py:class:`~typing.List`\\ [:py:class:`bool`]]]" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1 +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid "Remove all items from R." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:1 +#: flwr.common.record.metricsrecord.MetricsRecord.count_bytes:1 +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:1 +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid "Return number of Bytes stored in this object." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 +#: flwr.common.record.typeddict.TypedDict.get:1 of +msgid "d defaults to None." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 +#: flwr.common.record.typeddict.TypedDict.pop:1 of +msgid "If key is not found, d is returned if given, otherwise KeyError is raised." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 +#: flwr.common.record.typeddict.TypedDict.update:1 of +msgid "Update R from dict/iterable E and F." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:3 of +msgid "This function counts booleans as occupying 1 Byte." +msgstr "" + +#: ../../source/ref-api/flwr.common.Context.rst:2 +msgid "Context" +msgstr "" + +#: flwr.common.context.Context:3 of +msgid "" +"Holds records added by the entity in a given run and that will stay " +"local. This means that the data it holds will never leave the system it's" +" running from. This can be used as an intermediate storage or scratchpad " +"when executing mods. It can also be used as a memory to access at " +"different points during the lifecycle of this entity (e.g. across " +"multiple rounds)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Context.rst:28::1 +#, fuzzy +msgid ":py:obj:`state `\\" +msgstr "serveur.stratégie.Stratégie" + #: ../../source/ref-api/flwr.common.DisconnectRes.rst:2 msgid "DisconnectRes" msgstr "" @@ -9249,6 +9299,34 @@ msgstr "" msgid ":py:obj:`reason `\\" msgstr "" +#: ../../source/ref-api/flwr.common.Error.rst:2 +msgid "Error" +msgstr "" + +#: flwr.common.message.Error:3 of +msgid "An identifier for the error." +msgstr "" + +#: flwr.common.message.Error:5 of +msgid "A reason for why the error arose (e.g. an exception stack-trace)" +msgstr "" + +#: flwr.common.Error.code:1::1 of +msgid ":py:obj:`code `\\" +msgstr "" + +#: flwr.common.Error.code:1 flwr.common.Error.code:1::1 of +msgid "Error code." +msgstr "" + +#: flwr.common.Error.code:1::1 of +msgid ":py:obj:`reason `\\" +msgstr "" + +#: flwr.common.Error.code:1::1 flwr.common.Error.reason:1 of +msgid "Reason reported about the error." +msgstr "" + #: ../../source/ref-api/flwr.common.EvaluateIns.rst:2 #, fuzzy msgid "EvaluateIns" @@ -9472,18 +9550,352 @@ msgstr "" msgid ":py:obj:`properties `\\" msgstr "" -#: ../../source/ref-api/flwr.common.NDArray.rst:2 -msgid "NDArray" -msgstr "" +#: ../../source/ref-api/flwr.common.Message.rst:2 +#, fuzzy +msgid "Message" +msgstr "Côté serveur" -#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 -msgid ":py:obj:`tensors `\\" +#: flwr.common.Message.content:1::1 flwr.common.Message.metadata:1 +#: flwr.common.message.Message:3 of +msgid "A dataclass including information about the message to be executed." +msgstr "" + +#: flwr.common.message.Message:5 of +msgid "" +"Holds records either sent by another entity (e.g. sent by the server-side" +" logic to a client, or vice-versa) or that will be sent to it." +msgstr "" + +#: flwr.common.message.Message:8 of +msgid "" +"A dataclass that captures information about an error that took place when" +" processing another message." +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid "" +":py:obj:`create_error_reply `\\ " +"\\(error\\, ttl\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_error_reply:1 of +msgid "Construct a reply message indicating an error happened." +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid "" +":py:obj:`create_reply `\\ \\(content\\," +" ttl\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_reply:1 of +msgid "Create a reply to this message with specified content and TTL." +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid ":py:obj:`has_content `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_content:1 of +msgid "Return True if message has content, else False." +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid ":py:obj:`has_error `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_error:1 of +msgid "Return True if message has an error, else False." +msgstr "" + +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`content `\\" +msgstr "" + +#: flwr.common.Message.content:1 flwr.common.Message.content:1::1 +#: of +#, fuzzy +msgid "The content of this message." +msgstr "Évaluer la réponse d'un client." + +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`error `\\" +msgstr "" + +#: flwr.common.Message.content:1::1 flwr.common.Message.error:1 of +msgid "Error captured by this message." +msgstr "" + +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`metadata `\\" +msgstr "" + +#: flwr.common.message.Message.create_error_reply:3 of +msgid "The error that was encountered." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.ttl:1 flwr.common.message.Message.create_error_reply:5 +#: flwr.common.message.Message.create_reply:9 flwr.common.message.Metadata:16 +#: of +msgid "Time-to-live for this message." +msgstr "" + +#: flwr.common.message.Message.create_reply:3 of +msgid "" +"The method generates a new `Message` as a reply to this message. It " +"inherits 'run_id', 'src_node_id', 'dst_node_id', and 'message_type' from " +"this message and sets 'reply_to_message' to the ID of this message." +msgstr "" + +#: flwr.common.message.Message.create_reply:7 of +msgid "The content for the reply message." +msgstr "" + +#: flwr.common.message.Message.create_reply:12 of +msgid "A new `Message` instance representing the reply." +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageType.rst:2 +msgid "MessageType" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`EVALUATE `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`QUERY `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`TRAIN `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:2 +msgid "MessageTypeLegacy" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +msgid ":py:obj:`GET_PARAMETERS `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +msgid ":py:obj:`GET_PROPERTIES `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of +msgid "An identifier for the current run." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.message_id:1 flwr.common.message.Metadata:5 of +msgid "An identifier for the current message." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.src_node_id:1 flwr.common.message.Metadata:7 of +msgid "An identifier for the node sending this message." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1 +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.message.Metadata:9 of +msgid "An identifier for the node receiving this message." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.reply_to_message:1 flwr.common.message.Metadata:11 of +msgid "An identifier for the message this message replies to." +msgstr "" + +#: flwr.common.message.Metadata:13 of +msgid "" +"An identifier for grouping messages. In some settings, this is used as " +"the FL round." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.message_type:1 flwr.common.message.Metadata:18 of +msgid "A string that encodes the action to be executed on the receiving end." +msgstr "" + +#: flwr.common.message.Metadata:21 of +msgid "" +"An identifier that can be used when loading a particular data partition " +"for a ClientApp. Making use of this identifier is more relevant when " +"conducting simulations." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`dst_node_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`group_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.group_id:1 of +msgid "An identifier for grouping messages." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`message_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`message_type `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`partition_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.partition_id:1 of +msgid "An identifier telling which data partition a ClientApp should use." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`reply_to_message `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`run_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`src_node_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`ttl `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:2 +msgid "MetricsRecord" +msgstr "" + +#: flwr.common.record.metricsrecord.MetricsRecord:1 of +msgid "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:obj:`~typing.Union`\\ [:py:class:`int`, " +":py:class:`float`, :py:class:`~typing.List`\\ [:py:class:`int`], " +":py:class:`~typing.List`\\ [:py:class:`float`]]]" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.NDArray.rst:2 +msgid "NDArray" +msgstr "" + +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +msgid ":py:obj:`tensors `\\" msgstr "" #: ../../source/ref-api/flwr.common.Parameters.rst:29::1 msgid ":py:obj:`tensor_type `\\" msgstr "" +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:2 +#, fuzzy +msgid "ParametersRecord" +msgstr "Paramètres du modèle." + +#: flwr.common.record.parametersrecord.ParametersRecord:1 of +msgid "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" +msgstr "" + +#: flwr.common.record.parametersrecord.ParametersRecord:3 of +msgid "" +"A dataclass storing named Arrays in order. This means that it holds " +"entries as an OrderedDict[str, Array]. ParametersRecord objects can be " +"viewed as an equivalent to PyTorch's state_dict, but holding serialised " +"tensors instead." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:3 of +msgid "" +"Note that a small amount of Bytes might also be included in this counting" +" that correspond to metadata of the serialized object (e.g. of NumPy " +"array) needed for deseralization." +msgstr "" + #: ../../source/ref-api/flwr.common.ReconnectIns.rst:2 #, fuzzy msgid "ReconnectIns" @@ -9493,6 +9905,37 @@ msgstr "Collecte centralisée des données" msgid ":py:obj:`seconds `\\" msgstr "" +#: ../../source/ref-api/flwr.common.RecordSet.rst:2 +msgid "RecordSet" +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`configs_records `\\" +msgstr "" + +#: flwr.common.RecordSet.configs_records:1 +#: flwr.common.RecordSet.configs_records:1::1 of +msgid "Dictionary holding ConfigsRecord instances." +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`metrics_records `\\" +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.metrics_records:1 of +msgid "Dictionary holding MetricsRecord instances." +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`parameters_records `\\" +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.parameters_records:1 of +msgid "Dictionary holding ParametersRecord instances." +msgstr "" + #: ../../source/ref-api/flwr.common.ServerMessage.rst:2 #, fuzzy msgid "ServerMessage" @@ -9531,6 +9974,10 @@ msgstr "" msgid ":py:obj:`message `\\" msgstr "" +#: ../../source/ref-api/flwr.common.array_from_numpy.rst:2 +msgid "array\\_from\\_numpy" +msgstr "" + #: ../../source/ref-api/flwr.common.bytes_to_ndarray.rst:2 msgid "bytes\\_to\\_ndarray" msgstr "" @@ -9581,81 +10028,132 @@ msgstr "" msgid "server" msgstr "serveur" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 msgid ":py:obj:`run_driver_api `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 #: flwr.server.app.run_driver_api:1 of #, fuzzy msgid "Run Flower server (Driver API)." msgstr "flower-driver-api" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 msgid ":py:obj:`run_fleet_api `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 #: flwr.server.app.run_fleet_api:1 of #, fuzzy msgid "Run Flower server (Fleet API)." msgstr "flower-fleet-api" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 msgid ":py:obj:`run_server_app `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 -#: flwr.server.app.run_server_app:1 of +#: ../../source/ref-api/flwr.server.rst:27::1 +#: flwr.server.run_serverapp.run_server_app:1 of #, fuzzy msgid "Run Flower server app." msgstr "Serveur de Flower" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 msgid ":py:obj:`run_superlink `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 #: flwr.server.app.run_superlink:1 of msgid "Run Flower server (Driver API and Fleet API)." msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 +msgid "" +":py:obj:`start_driver `\\ \\(\\*\\[\\, " +"server\\_address\\, server\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:27::1 +#: flwr.server.compat.app.start_driver:1 of +#, fuzzy +msgid "Start a Flower Driver API server." +msgstr "Tout d'abord, démarre un serveur Flower :" + +#: ../../source/ref-api/flwr.server.rst:27::1 msgid "" ":py:obj:`start_server `\\ \\(\\*\\[\\, " "server\\_address\\, server\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 #: flwr.server.app.start_server:1 of msgid "Start a Flower server using the gRPC transport layer." msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 msgid ":py:obj:`ClientManager `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 #: flwr.server.client_manager.ClientManager:1 of msgid "Abstract base class for managing Flower clients." msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 +#, fuzzy +msgid "" +":py:obj:`Driver `\\ " +"\\(\\[driver\\_service\\_address\\, ...\\]\\)" +msgstr "" +"Flower 1.0 : ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" + +#: ../../source/ref-api/flwr.server.rst:41::1 +#: flwr.server.driver.driver.Driver:1 of +msgid "`Driver` class provides an interface to the Driver API." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:41::1 msgid ":py:obj:`History `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 #: flwr.server.history.History:1 of msgid "History class for training and/or evaluation metrics collection." msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 +msgid "" +":py:obj:`LegacyContext `\\ \\(state\\[\\, " +"config\\, strategy\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:41::1 +#: flwr.server.compat.legacy_context.LegacyContext:1 of +msgid "Legacy Context." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:41::1 msgid "" ":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " "strategy\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 +#, fuzzy +msgid "" +":py:obj:`ServerApp `\\ \\(\\[server\\, config\\, " +"strategy\\, ...\\]\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.server.rst:41::1 +#: flwr.server.server_app.ServerApp:1 of +#, fuzzy +msgid "Flower ServerApp." +msgstr "Serveur de Flower" + +#: ../../source/ref-api/flwr.server.rst:41::1 #, fuzzy msgid "" ":py:obj:`ServerConfig `\\ \\(\\[num\\_rounds\\," @@ -9665,41 +10163,42 @@ msgstr "" "config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " "...)``" -#: ../../source/ref-api/flwr.server.rst:37::1 -#: flwr.server.app.ServerConfig:1 of +#: ../../source/ref-api/flwr.server.rst:41::1 +#: flwr.server.server_config.ServerConfig:1 of #, fuzzy msgid "Flower server config." msgstr "Serveur de Flower" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 msgid ":py:obj:`SimpleClientManager `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 #: flwr.server.client_manager.SimpleClientManager:1 of msgid "Provides a pool of available clients." msgstr "" -#: ../../source/ref-api/flwr.server.rst:56::1 -msgid ":py:obj:`flwr.server.driver `\\" -msgstr "" - -#: ../../source/ref-api/flwr.server.rst:56::1 flwr.server.driver:1 -#: of -#, fuzzy -msgid "Flower driver SDK." -msgstr "Serveur de Flower" - -#: ../../source/ref-api/flwr.server.rst:56::1 +#: ../../source/ref-api/flwr.server.rst:60::1 #, fuzzy msgid ":py:obj:`flwr.server.strategy `\\" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.rst:56::1 +#: ../../source/ref-api/flwr.server.rst:60::1 #: flwr.server.strategy:1 of msgid "Contains the strategy abstraction and different implementations." msgstr "" +#: ../../source/ref-api/flwr.server.rst:60::1 +#, fuzzy +msgid ":py:obj:`flwr.server.workflow `\\" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.server.rst:60::1 +#: flwr.server.workflow:1 of +#, fuzzy +msgid "Workflows." +msgstr "Flux de travail" + #: ../../source/ref-api/flwr.server.ClientManager.rst:2 #, fuzzy msgid "ClientManager" @@ -9793,36 +10292,250 @@ msgstr "" msgid "This method is idempotent." msgstr "" -#: ../../source/ref-api/flwr.server.History.rst:2 -msgid "History" -msgstr "" - -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "" -":py:obj:`add_loss_centralized " -"`\\ \\(server\\_round\\, " -"loss\\)" -msgstr "" - -#: flwr.server.history.History.add_loss_centralized:1 -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: ../../source/ref-api/flwr.server.Driver.rst:2 #, fuzzy -msgid "Add one loss entry (from centralized evaluation)." -msgstr "Évaluation centralisée" +msgid "Driver" +msgstr "serveur" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.server.driver.driver.Driver:3 of msgid "" -":py:obj:`add_loss_distributed " -"`\\ \\(server\\_round\\, " -"loss\\)" +"The IPv4 or IPv6 address of the Driver API server. Defaults to " +"`\"[::]:9091\"`." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_loss_distributed:1 of -msgid "Add one loss entry (from distributed evaluation)." +#: flwr.server.app.start_server:28 flwr.server.driver.driver.Driver:6 of +msgid "" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order: * CA certificate. * " +"server certificate. * server private key." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.server.app.start_server:28 flwr.server.driver.driver.Driver:6 of +msgid "" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order:" +msgstr "" + +#: flwr.server.app.start_server:32 flwr.server.driver.driver.Driver:10 of +#, fuzzy +msgid "CA certificate." +msgstr "Certificats" + +#: flwr.server.app.start_server:33 flwr.server.driver.driver.Driver:11 of +#, fuzzy +msgid "server certificate." +msgstr "Certificats" + +#: flwr.server.app.start_server:34 flwr.server.driver.driver.Driver:12 of +#, fuzzy +msgid "server private key." +msgstr "stratégie.du.serveur" + +#: flwr.server.driver.driver.Driver.close:1::1 of +#, fuzzy +msgid ":py:obj:`close `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" + +#: flwr.server.driver.driver.Driver.close:1 +#: flwr.server.driver.driver.Driver.close:1::1 of +msgid "Disconnect from the SuperLink if connected." +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 of +msgid "" +":py:obj:`create_message `\\ " +"\\(content\\, message\\_type\\, ...\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 +#: flwr.server.driver.driver.Driver.create_message:1 of +msgid "Create a new message with specified parameters." +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 of +msgid ":py:obj:`get_node_ids `\\ \\(\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 +#: flwr.server.driver.driver.Driver.get_node_ids:1 of +msgid "Get node IDs." +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 of +msgid "" +":py:obj:`pull_messages `\\ " +"\\(message\\_ids\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 +#: flwr.server.driver.driver.Driver.pull_messages:1 of +msgid "Pull messages based on message IDs." +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 of +msgid "" +":py:obj:`push_messages `\\ " +"\\(messages\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 +#: flwr.server.driver.driver.Driver.push_messages:1 of +msgid "Push messages to specified node IDs." +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 of +#, fuzzy +msgid "" +":py:obj:`send_and_receive `\\ " +"\\(messages\\, \\*\\[\\, timeout\\]\\)" +msgstr "" +"Flower 1.0 : ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" + +#: flwr.server.driver.driver.Driver.close:1::1 +#: flwr.server.driver.driver.Driver.send_and_receive:1 of +msgid "Push messages to specified node IDs and pull the reply messages." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:3 of +msgid "" +"This method constructs a new `Message` with given content and metadata. " +"The `run_id` and `src_node_id` will be set automatically." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:6 of +msgid "" +"The content for the new message. This holds records that are to be sent " +"to the destination node." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:9 of +msgid "" +"The type of the message, defining the action to be executed on the " +"receiving end." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:12 of +msgid "The ID of the destination node to which the message is being sent." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:14 of +msgid "" +"The ID of the group to which this message is associated. In some " +"settings, this is used as the FL round." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:17 of +msgid "" +"Time-to-live for the round trip of this message, i.e., the time from " +"sending this message to receiving a reply. It specifies the duration for " +"which the message and its potential reply are considered valid." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:22 of +msgid "" +"**message** -- A new `Message` instance with the specified content and " +"metadata." +msgstr "" + +#: flwr.server.driver.driver.Driver.pull_messages:3 of +msgid "" +"This method is used to collect messages from the SuperLink that " +"correspond to a set of given message IDs." +msgstr "" + +#: flwr.server.driver.driver.Driver.pull_messages:6 of +msgid "An iterable of message IDs for which reply messages are to be retrieved." +msgstr "" + +#: flwr.server.driver.driver.Driver.pull_messages:9 of +msgid "**messages** -- An iterable of messages received." +msgstr "" + +#: flwr.server.driver.driver.Driver.push_messages:3 of +msgid "" +"This method takes an iterable of messages and sends each message to the " +"node specified in `dst_node_id`." +msgstr "" + +#: flwr.server.driver.driver.Driver.push_messages:6 +#: flwr.server.driver.driver.Driver.send_and_receive:7 of +msgid "An iterable of messages to be sent." +msgstr "" + +#: flwr.server.driver.driver.Driver.push_messages:9 of +msgid "" +"**message_ids** -- An iterable of IDs for the messages that were sent, " +"which can be used to pull replies." +msgstr "" + +#: flwr.server.driver.driver.Driver.send_and_receive:3 of +msgid "" +"This method sends a list of messages to their destination node IDs and " +"then waits for the replies. It continues to pull replies until either all" +" replies are received or the specified timeout duration is exceeded." +msgstr "" + +#: flwr.server.driver.driver.Driver.send_and_receive:9 of +msgid "" +"The timeout duration in seconds. If specified, the method will wait for " +"replies for this duration. If `None`, there is no time limit and the " +"method will wait until replies for all messages are received." +msgstr "" + +#: flwr.server.driver.driver.Driver.send_and_receive:14 of +msgid "**replies** -- An iterable of reply messages received from the SuperLink." +msgstr "" + +#: flwr.server.driver.driver.Driver.send_and_receive:18 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:53 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:60 +#: of +#, fuzzy +msgid "Notes" +msgstr "Aucun" + +#: flwr.server.driver.driver.Driver.send_and_receive:19 of +msgid "" +"This method uses `push_messages` to send the messages and `pull_messages`" +" to collect the replies. If `timeout` is set, the method may not return " +"replies for all sent messages. A message remains valid until its TTL, " +"which is not affected by `timeout`." +msgstr "" + +#: ../../source/ref-api/flwr.server.History.rst:2 +msgid "History" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_loss_centralized " +"`\\ \\(server\\_round\\, " +"loss\\)" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1 +#: flwr.server.history.History.add_loss_centralized:1::1 of +#, fuzzy +msgid "Add one loss entry (from centralized evaluation)." +msgstr "Évaluation centralisée" + +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_loss_distributed " +"`\\ \\(server\\_round\\, " +"loss\\)" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_loss_distributed:1 of +msgid "Add one loss entry (from distributed evaluation)." +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" ":py:obj:`add_metrics_centralized " "`\\ \\(server\\_round\\, " @@ -9859,6 +10572,38 @@ msgstr "" msgid "Add metrics entries (from distributed fit)." msgstr "" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:2 +msgid "LegacyContext" +msgstr "" + +#: flwr.server.compat.legacy_context.LegacyContext:1 of +msgid "Bases: :py:class:`~flwr.common.context.Context`" +msgstr "" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#, fuzzy +msgid ":py:obj:`config `\\" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#, fuzzy +msgid ":py:obj:`strategy `\\" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +msgid ":py:obj:`client_manager `\\" +msgstr "" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#, fuzzy +msgid ":py:obj:`history `\\" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#, fuzzy +msgid ":py:obj:`state `\\" +msgstr "serveur.stratégie.Stratégie" + #: flwr.server.server.Server.client_manager:1::1 of msgid ":py:obj:`client_manager `\\ \\(\\)" msgstr "" @@ -9931,12 +10676,36 @@ msgstr "" msgid "Replace server strategy." msgstr "stratégie.du.serveur" +#: ../../source/ref-api/flwr.server.ServerApp.rst:2 +#, fuzzy +msgid "ServerApp" +msgstr "serveur" + +#: flwr.server.server_app.ServerApp:5 of +#, fuzzy +msgid "Use the `ServerApp` with an existing `Strategy`:" +msgstr "Utilise une stratégie existante" + +#: flwr.server.server_app.ServerApp:15 of +msgid "Use the `ServerApp` with a custom main function:" +msgstr "" + +#: flwr.server.server_app.ServerApp.main:1::1 of +#, fuzzy +msgid ":py:obj:`main `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" + +#: flwr.server.server_app.ServerApp.main:1 +#: flwr.server.server_app.ServerApp.main:1::1 of +msgid "Return a decorator that registers the main fn with the server app." +msgstr "" + #: ../../source/ref-api/flwr.server.ServerConfig.rst:2 #, fuzzy msgid "ServerConfig" msgstr "serveur" -#: flwr.server.app.ServerConfig:3 of +#: flwr.server.server_config.ServerConfig:3 of msgid "" "All attributes have default values which allows users to configure just " "the ones they care about." @@ -10010,311 +10779,218 @@ msgstr "" msgid "**success**" msgstr "" -#: ../../source/ref-api/flwr.server.driver.rst:2 -#, fuzzy -msgid "driver" -msgstr "serveur" - -#: ../../source/ref-api/flwr.server.driver.rst:22::1 -msgid "" -":py:obj:`start_driver `\\ \\(\\*\\[\\, " -"server\\_address\\, server\\, ...\\]\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.driver.rst:22::1 -#: flwr.server.driver.app.start_driver:1 of +#: ../../source/ref-api/flwr.server.run_driver_api.rst:2 #, fuzzy -msgid "Start a Flower Driver API server." -msgstr "Tout d'abord, démarre un serveur Flower :" - -#: ../../source/ref-api/flwr.server.driver.rst:30::1 -msgid "" -":py:obj:`Driver `\\ " -"\\(\\[driver\\_service\\_address\\, ...\\]\\)" -msgstr "" +msgid "run\\_driver\\_api" +msgstr "flower-driver-api" -#: ../../source/ref-api/flwr.server.driver.rst:30::1 -#: flwr.server.driver.driver.Driver:1 of -msgid "`Driver` class provides an interface to the Driver API." +#: ../../source/ref-api/flwr.server.run_fleet_api.rst:2 +msgid "run\\_fleet\\_api" msgstr "" -#: ../../source/ref-api/flwr.server.driver.rst:30::1 -msgid "" -":py:obj:`GrpcDriver `\\ " -"\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#: ../../source/ref-api/flwr.server.run_server_app.rst:2 +msgid "run\\_server\\_app" msgstr "" -#: ../../source/ref-api/flwr.server.driver.rst:30::1 -#: flwr.server.driver.grpc_driver.GrpcDriver:1 of -msgid "`GrpcDriver` provides access to the gRPC Driver API/service." -msgstr "" +#: ../../source/ref-api/flwr.server.run_superlink.rst:2 +#, fuzzy +msgid "run\\_superlink" +msgstr "flower-superlink" -#: ../../source/ref-api/flwr.server.driver.Driver.rst:2 +#: ../../source/ref-api/flwr.server.start_driver.rst:2 #, fuzzy -msgid "Driver" -msgstr "serveur" +msgid "start\\_driver" +msgstr "start_client" -#: flwr.server.driver.driver.Driver:3 of +#: flwr.server.compat.app.start_driver:3 of msgid "" "The IPv4 or IPv6 address of the Driver API server. Defaults to " -"`\"[::]:9091\"`." +"`\"[::]:8080\"`." msgstr "" -#: flwr.server.app.start_server:28 flwr.server.driver.driver.Driver:6 of +#: flwr.server.compat.app.start_driver:6 of msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order: * CA certificate. * " -"server certificate. * server private key." +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, then `start_driver` will create one." msgstr "" -#: flwr.server.app.start_server:28 flwr.server.driver.driver.Driver:6 of +#: flwr.server.app.start_server:9 flwr.server.compat.app.start_driver:10 +#: flwr.simulation.app.start_simulation:28 of msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order:" -msgstr "" - -#: flwr.server.app.start_server:32 flwr.server.driver.driver.Driver:10 of -#, fuzzy -msgid "CA certificate." -msgstr "Certificats" - -#: flwr.server.app.start_server:33 flwr.server.driver.driver.Driver:11 of -#, fuzzy -msgid "server certificate." -msgstr "Certificats" - -#: flwr.server.app.start_server:34 flwr.server.driver.driver.Driver:12 of -#, fuzzy -msgid "server private key." -msgstr "stratégie.du.serveur" - -#: flwr.server.driver.driver.Driver.get_nodes:1::1 of -msgid ":py:obj:`get_nodes `\\ \\(\\)" +"Currently supported values are `num_rounds` (int, default: 1) and " +"`round_timeout` in seconds (float, default: None)." msgstr "" -#: flwr.server.driver.driver.Driver.get_nodes:1 -#: flwr.server.driver.driver.Driver.get_nodes:1::1 of -msgid "Get node IDs." +#: flwr.server.app.start_server:12 flwr.server.compat.app.start_driver:13 of +msgid "" +"An implementation of the abstract base class " +"`flwr.server.strategy.Strategy`. If no strategy is provided, then " +"`start_server` will use `flwr.server.strategy.FedAvg`." msgstr "" -#: flwr.server.driver.driver.Driver.get_nodes:1::1 of +#: flwr.server.compat.app.start_driver:17 of msgid "" -":py:obj:`pull_task_res `\\ " -"\\(task\\_ids\\)" +"An implementation of the class `flwr.server.ClientManager`. If no " +"implementation is provided, then `start_driver` will use " +"`flwr.server.SimpleClientManager`." msgstr "" -#: flwr.server.driver.driver.Driver.get_nodes:1::1 -#: flwr.server.driver.driver.Driver.pull_task_res:1 -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 -#: flwr.server.driver.grpc_driver.GrpcDriver.pull_task_res:1 of -msgid "Get task results." +#: flwr.server.compat.app.start_driver:25 of +msgid "The Driver object to use." msgstr "" -#: flwr.server.driver.driver.Driver.get_nodes:1::1 of -msgid "" -":py:obj:`push_task_ins `\\ " -"\\(task\\_ins\\_list\\)" +#: flwr.server.app.start_server:37 flwr.server.compat.app.start_driver:28 of +msgid "**hist** -- Object containing training and evaluation metrics." msgstr "" -#: flwr.server.driver.driver.Driver.get_nodes:1::1 -#: flwr.server.driver.driver.Driver.push_task_ins:1 -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 -#: flwr.server.driver.grpc_driver.GrpcDriver.push_task_ins:1 of -msgid "Schedule tasks." +#: flwr.server.compat.app.start_driver:33 of +msgid "Starting a driver that connects to an insecure server:" msgstr "" -#: ../../source/ref-api/flwr.server.driver.GrpcDriver.rst:2 -msgid "GrpcDriver" +#: flwr.server.compat.app.start_driver:37 of +msgid "Starting a driver that connects to an SSL-enabled server:" msgstr "" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of -msgid ":py:obj:`connect `\\ \\(\\)" -msgstr "" +#: ../../source/ref-api/flwr.server.start_server.rst:2 +#, fuzzy +msgid "start\\_server" +msgstr "serveur.start_server" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1 -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of -msgid "Connect to the Driver API." +#: flwr.server.app.start_server:3 of +msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." msgstr "" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of +#: flwr.server.app.start_server:5 of msgid "" -":py:obj:`create_run `\\ " -"\\(req\\)" +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, then `start_server` will create one." msgstr "" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 -#: flwr.server.driver.grpc_driver.GrpcDriver.create_run:1 of -#, fuzzy -msgid "Request for run ID." -msgstr "Demande pour une nouvelle Flower Baseline" - -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of -msgid ":py:obj:`disconnect `\\ \\(\\)" +#: flwr.server.app.start_server:16 of +msgid "" +"An implementation of the abstract base class `flwr.server.ClientManager`." +" If no implementation is provided, then `start_server` will use " +"`flwr.server.client_manager.SimpleClientManager`." msgstr "" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 -#: flwr.server.driver.grpc_driver.GrpcDriver.disconnect:1 of -msgid "Disconnect from the Driver API." +#: flwr.server.app.start_server:21 of +msgid "" +"The maximum length of gRPC messages that can be exchanged with the Flower" +" clients. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower clients need to be started with the same value (see " +"`flwr.client.start_client`), otherwise clients will not know about the " +"increased limit and block larger messages." msgstr "" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of -msgid ":py:obj:`get_nodes `\\ \\(req\\)" -msgstr "" +#: flwr.server.app.start_server:42 of +#, fuzzy +msgid "Starting an insecure server:" +msgstr "Démarrer le serveur" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 -#: flwr.server.driver.grpc_driver.GrpcDriver.get_nodes:1 of +#: flwr.server.app.start_server:46 of #, fuzzy -msgid "Get client IDs." -msgstr "Moteur client Edge" +msgid "Starting an SSL-enabled server:" +msgstr "Démarrer le serveur" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of -msgid "" -":py:obj:`pull_task_res `\\ " -"\\(req\\)" -msgstr "" +#: ../../source/ref-api/flwr.server.strategy.rst:2 +#, fuzzy +msgid "strategy" +msgstr "stratégie.du.serveur" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`push_task_ins `\\ " -"\\(req\\)" +":py:obj:`Bulyan `\\ \\(\\*\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.driver.start_driver.rst:2 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.bulyan.Bulyan:1 of #, fuzzy -msgid "start\\_driver" -msgstr "start_client" +msgid "Bulyan strategy." +msgstr "Stratégies intégrées" -#: flwr.server.driver.app.start_driver:3 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"The IPv4 or IPv6 address of the Driver API server. Defaults to " -"`\"[::]:8080\"`." +":py:obj:`DPFedAvgAdaptive `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\)" msgstr "" -#: flwr.server.driver.app.start_driver:6 of -msgid "" -"A server implementation, either `flwr.server.Server` or a subclass " -"thereof. If no instance is provided, then `start_driver` will create one." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." msgstr "" -#: flwr.server.app.start_server:9 flwr.server.driver.app.start_driver:10 -#: flwr.simulation.app.start_simulation:28 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"Currently supported values are `num_rounds` (int, default: 1) and " -"`round_timeout` in seconds (float, default: None)." +":py:obj:`DPFedAvgFixed `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" msgstr "" -#: flwr.server.app.start_server:12 flwr.server.driver.app.start_driver:13 of -msgid "" -"An implementation of the abstract base class " -"`flwr.server.strategy.Strategy`. If no strategy is provided, then " -"`start_server` will use `flwr.server.strategy.FedAvg`." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of +msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." msgstr "" -#: flwr.server.driver.app.start_driver:17 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"An implementation of the class `flwr.server.ClientManager`. If no " -"implementation is provided, then `start_driver` will use " -"`flwr.server.SimpleClientManager`." -msgstr "" - -#: flwr.server.app.start_server:37 flwr.server.driver.app.start_driver:26 of -msgid "**hist** -- Object containing training and evaluation metrics." -msgstr "" - -#: flwr.server.driver.app.start_driver:31 of -msgid "Starting a driver that connects to an insecure server:" -msgstr "" - -#: flwr.server.driver.app.start_driver:35 of -msgid "Starting a driver that connects to an SSL-enabled server:" -msgstr "" - -#: ../../source/ref-api/flwr.server.run_driver_api.rst:2 -#, fuzzy -msgid "run\\_driver\\_api" -msgstr "flower-driver-api" - -#: ../../source/ref-api/flwr.server.run_fleet_api.rst:2 -msgid "run\\_fleet\\_api" -msgstr "" - -#: ../../source/ref-api/flwr.server.run_server_app.rst:2 -msgid "run\\_server\\_app" +":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.run_superlink.rst:2 -#, fuzzy -msgid "run\\_superlink" -msgstr "flower-superlink" - -#: ../../source/ref-api/flwr.server.start_server.rst:2 -#, fuzzy -msgid "start\\_server" -msgstr "serveur.start_server" - -#: flwr.server.app.start_server:3 of -msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 +#: of +msgid "Strategy wrapper for central DP with client-side adaptive clipping." msgstr "" -#: flwr.server.app.start_server:5 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"A server implementation, either `flwr.server.Server` or a subclass " -"thereof. If no instance is provided, then `start_server` will create one." +":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: flwr.server.app.start_server:16 of -msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`." -" If no implementation is provided, then `start_server` will use " -"`flwr.server.client_manager.SimpleClientManager`." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 +#: of +msgid "Strategy wrapper for central DP with server-side adaptive clipping." msgstr "" -#: flwr.server.app.start_server:21 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"The maximum length of gRPC messages that can be exchanged with the Flower" -" clients. The default should be sufficient for most models. Users who " -"train very large models might need to increase this value. Note that the " -"Flower clients need to be started with the same value (see " -"`flwr.client.start_client`), otherwise clients will not know about the " -"increased limit and block larger messages." +":py:obj:`DifferentialPrivacyClientSideFixedClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: flwr.server.app.start_server:42 of -#, fuzzy -msgid "Starting an insecure server:" -msgstr "Démarrer le serveur" - -#: flwr.server.app.start_server:46 of -#, fuzzy -msgid "Starting an SSL-enabled server:" -msgstr "Démarrer le serveur" - -#: ../../source/ref-api/flwr.server.strategy.rst:2 -#, fuzzy -msgid "strategy" -msgstr "stratégie.du.serveur" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 +#: of +msgid "Strategy wrapper for central DP with client-side fixed clipping." +msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`FaultTolerantFedAvg " -"`\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +":py:obj:`DifferentialPrivacyServerSideFixedClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of -msgid "Configurable fault-tolerant FedAvg strategy implementation." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 +#: of +msgid "Strategy wrapper for central DP with server-side fixed clipping." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " "fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedadagrad.FedAdagrad:1 of #, fuzzy msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." @@ -10322,201 +10998,179 @@ msgstr "" "`FedAdam` et `FedAdam` correspondent à la dernière version de l'article " "sur l'optimisation fédérée adaptative." -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedAdam `\\ \\(\\*\\[\\, " "fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedadam.FedAdam:1 of msgid "FedAdam - Adaptive Federated Optimization using Adam." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedAvg `\\ \\(\\*\\[\\, " "fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedavg.FedAvg:1 #: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of #, fuzzy msgid "Federated Averaging strategy." msgstr "Stratégie de moyenne fédérée." -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -msgid "" -":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " -"\\*\\*kwargs\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of -msgid "Configurable FedXgbNnAvg strategy implementation." -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -msgid "" -":py:obj:`FedXgbBagging `\\ " -"\\(\\[evaluate\\_function\\]\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of -msgid "Configurable FedXgbBagging strategy implementation." -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -msgid "" -":py:obj:`FedXgbCyclic `\\ " -"\\(\\*\\*kwargs\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of -msgid "Configurable FedXgbCyclic strategy implementation." -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedAvgAndroid `\\ " "\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " "fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedavgm.FedAvgM:1 of #, fuzzy msgid "Federated Averaging with Momentum strategy." msgstr "Stratégie de moyenne fédérée." -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedMedian `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedmedian.FedMedian:1 of +#, fuzzy +msgid "Configurable FedMedian strategy implementation." +msgstr "Configuration de l'évaluation fédérée" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedOpt `\\ \\(\\*\\[\\, " "fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedopt.FedOpt:1 of #, fuzzy msgid "Federated Optim strategy." msgstr "Stratégie de moyenne fédérée." -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedProx `\\ \\(\\*\\[\\, " "fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedprox.FedProx:1 of #, fuzzy msgid "Federated Optimization strategy." msgstr "Stratégie de moyenne fédérée." -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`FedYogi `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +":py:obj:`FedTrimmedAvg `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedyogi.FedYogi:1 of -msgid "FedYogi [Reddi et al., 2020] strategy." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of +msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " -"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" +":py:obj:`FedXgbBagging `\\ " +"\\(\\[evaluate\\_function\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.qfedavg.QFedAvg:1 of -msgid "Configurable QFedAvg strategy implementation." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of +msgid "Configurable FedXgbBagging strategy implementation." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`FedMedian `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +":py:obj:`FedXgbCyclic `\\ " +"\\(\\*\\*kwargs\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedmedian.FedMedian:1 of -#, fuzzy -msgid "Configurable FedMedian strategy implementation." -msgstr "Configuration de l'évaluation fédérée" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of +msgid "Configurable FedXgbCyclic strategy implementation." +msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`FedTrimmedAvg `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of -msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of +msgid "Configurable FedXgbNnAvg strategy implementation." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`Krum `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +":py:obj:`FedYogi `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.krum.Krum:1 of -msgid "Krum [Blanchard et al., 2017] strategy." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "FedYogi [Reddi et al., 2020] strategy." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`Bulyan `\\ \\(\\*\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" +":py:obj:`FaultTolerantFedAvg " +"`\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.bulyan.Bulyan:1 of -#, fuzzy -msgid "Bulyan strategy." -msgstr "Stratégies intégrées" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of +msgid "Configurable fault-tolerant FedAvg strategy implementation." +msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`DPFedAvgAdaptive `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\)" +":py:obj:`Krum `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of -msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.krum.Krum:1 of +msgid "Krum [Blanchard et al., 2017] strategy." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`DPFedAvgFixed `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" +":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " +"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of -msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.qfedavg.QFedAvg:1 of +msgid "Configurable QFedAvg strategy implementation." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid ":py:obj:`Strategy `\\ \\(\\)" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.strategy.Strategy:1 of msgid "Abstract base class for server strategy implementations." msgstr "" @@ -10719,6 +11373,14 @@ msgid "" "parameters\\, ...\\)" msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_evaluate:1 #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 #: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 @@ -10741,6 +11403,14 @@ msgid "" "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_fit:1 #: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 @@ -10835,6 +11505,10 @@ msgstr "" msgid "Return the sample size and the required number of available clients." msgstr "" +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 +msgid "DPFedAvgAdaptive" +msgstr "DPFedAvgAdaptive" + #: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of msgid "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" msgstr "" @@ -10852,6 +11526,14 @@ msgid "" "\\(server\\_round\\, results\\, ...\\)" msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of @@ -10901,6 +11583,14 @@ msgid "" "\\(server\\_round\\, parameters\\)" msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.evaluate:1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of msgid "Evaluate model parameters using an evaluation function from the strategy." @@ -10914,6 +11604,14 @@ msgid "" "\\(client\\_manager\\)" msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.initialize_parameters:1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of msgid "Initialize global model parameters using given strategy." @@ -10948,6 +11646,14 @@ msgid "" "round of federated evaluation." msgstr "" +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 +msgid "DPFedAvgFixed" +msgstr "DPFedAvgFixed" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 #: flwr.server.strategy.fedavg.FedAvg:1 #: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of @@ -11029,14 +11735,400 @@ msgid "" "round of federated learning." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 -#, fuzzy -msgid "FaultTolerantFedAvg" -msgstr "server.strategy.FaultTolerantFedAvg" +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:2 +msgid "DifferentialPrivacyClientSideAdaptiveClipping" +msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:3 #: of -msgid "" +msgid "Use `adaptiveclipping_mod` modifier at the client side." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:5 +#: of +msgid "" +"In comparison to `DifferentialPrivacyServerSideAdaptiveClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideAdaptiveClipping` expects clipping to " +"happen on the client-side, usually by using the built-in " +"`adaptiveclipping_mod`." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:10 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:3 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:10 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:3 +#: of +msgid "The strategy to which DP functionalities will be added by this wrapper." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:12 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:5 +#: of +msgid "The noise multiplier for the Gaussian mechanism for model updates." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:14 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:7 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:17 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:10 +#: of +msgid "The number of clients that are sampled on each round." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:16 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:9 +#: of +msgid "" +"The initial value of clipping norm. Defaults to 0.1. Andrew et al. " +"recommends to set to 0.1." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:19 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:12 +#: of +msgid "The desired quantile of updates which should be clipped. Defaults to 0.5." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:21 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:14 +#: of +msgid "" +"The learning rate for the clipping norm adaptation. Defaults to 0.2. " +"Andrew et al. recommends to set to 0.2." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:24 +#: of +msgid "" +"The stddev of the noise added to the count of updates currently below the" +" estimate. Andrew et al. recommends to set to `expected_num_records/20`" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:30 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:23 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:15 +#: of +#, fuzzy +msgid "Create a strategy:" +msgstr "stratégie.du.serveur" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:34 +#: of +msgid "" +"Wrap the strategy with the " +"`DifferentialPrivacyClientSideAdaptiveClipping` wrapper:" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:40 +#: of +msgid "On the client, add the `adaptiveclipping_mod` to the client-side mods:" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_fit:1 +#: of +#, fuzzy +msgid "Aggregate training results and update clip norms." +msgstr "Résultats globaux de l'évaluation." + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:2 +#, fuzzy +msgid "DifferentialPrivacyClientSideFixedClipping" +msgstr "Confidentialité différentielle" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:3 +#: of +msgid "Use `fixedclipping_mod` modifier at the client side." +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:5 +#: of +msgid "" +"In comparison to `DifferentialPrivacyServerSideFixedClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideFixedClipping` expects clipping to happen " +"on the client-side, usually by using the built-in `fixedclipping_mod`." +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:12 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:5 +#: of +msgid "" +"The noise multiplier for the Gaussian mechanism for model updates. A " +"value of 1.0 or higher is recommended for strong privacy." +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:15 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:8 +#: of +msgid "The value of the clipping norm." +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:26 +#: of +msgid "" +"Wrap the strategy with the `DifferentialPrivacyClientSideFixedClipping` " +"wrapper:" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:32 +#: of +msgid "On the client, add the `fixedclipping_mod` to the client-side mods:" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_fit:1 +#: of +#, fuzzy +msgid "Add noise to the aggregated parameters." +msgstr "Puis sérialise le résultat agrégé :" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:2 +msgid "DifferentialPrivacyServerSideAdaptiveClipping" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:17 +#: of +msgid "" +"The standard deviation of the noise added to the count of updates below " +"the estimate. Andrew et al. recommends to set to " +"`expected_num_records/20`" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:27 +#: of +msgid "" +"Wrap the strategy with the DifferentialPrivacyServerSideAdaptiveClipping " +"wrapper" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:2 +#, fuzzy +msgid "DifferentialPrivacyServerSideFixedClipping" +msgstr "Confidentialité différentielle" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:19 +#: of +msgid "" +"Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping " +"wrapper" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:1 +#: of +msgid "Compute the updates, clip, and pass them for aggregation." +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:3 +#: of +msgid "Afterward, add noise to the aggregated parameters." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 +#, fuzzy +msgid "FaultTolerantFedAvg" +msgstr "server.strategy.FaultTolerantFedAvg" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" ":py:obj:`aggregate_evaluate " "`\\ " "\\(server\\_round\\, results\\, ...\\)" @@ -11312,6 +12404,10 @@ msgid "" "Defaults to 1.0." msgstr "" +#: flwr.server.strategy.fedavg.FedAvg:33 of +msgid "Enable (True) or disable (False) in-place aggregation of model updates." +msgstr "" + #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`aggregate_evaluate " @@ -12410,31 +13506,477 @@ msgid "" "these as the initial global model parameters." msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:2 +#: ../../source/ref-api/flwr.server.workflow.rst:2 #, fuzzy -msgid "simulation" -msgstr "Simulation de moniteur" +msgid "workflow" +msgstr "Flux de travail" -#: ../../source/ref-api/flwr.simulation.rst:17::1 +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 msgid "" -":py:obj:`start_simulation `\\ \\(\\*\\," -" client\\_fn\\[\\, ...\\]\\)" +":py:obj:`DefaultWorkflow `\\ " +"\\(\\[fit\\_workflow\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:17::1 -#: flwr.simulation.app.start_simulation:1 of -#, fuzzy -msgid "Start a Ray-based Flower simulation server." -msgstr "Simulation de moniteur" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 of +msgid "Default workflow in Flower." +msgstr "" -#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 -#, fuzzy -msgid "start\\_simulation" -msgstr "démarrer_simulation" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +msgid "" +":py:obj:`SecAggPlusWorkflow `\\ " +"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" +msgstr "" -#: flwr.simulation.app.start_simulation:3 of +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of +msgid "The workflow for the SecAgg+ protocol." +msgstr "" + +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 msgid "" -"A function creating client instances. The function must take a single " +":py:obj:`SecAggWorkflow `\\ " +"\\(reconstruction\\_threshold\\, \\*\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +msgid "The workflow for the SecAgg protocol." +msgstr "" + +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:2 +#, fuzzy +msgid "DefaultWorkflow" +msgstr "Flux de travail" + +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:2 +#, fuzzy +msgid "SecAggPlusWorkflow" +msgstr "Flux de travail" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:3 +#: of +msgid "" +"The SecAgg+ protocol ensures the secure summation of integer vectors " +"owned by multiple parties, without accessing any individual integer " +"vector. This workflow allows the server to compute the weighted average " +"of model parameters across all clients, ensuring individual contributions" +" remain private. This is achieved by clients sending both, a weighting " +"factor and a weighted version of the locally updated parameters, both of " +"which are masked for privacy. Specifically, each client uploads \"[w, w *" +" params]\" with masks, where weighting factor 'w' is the number of " +"examples ('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:14 +#: of +msgid "" +"The protocol involves four main stages: - 'setup': Send SecAgg+ " +"configuration to clients and collect their public keys. - 'share keys': " +"Broadcast public keys among clients and collect encrypted secret" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:17 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:17 +#: of +msgid "key shares." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:18 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:18 +#: of +msgid "" +"'collect masked vectors': Forward encrypted secret key shares to target " +"clients and collect masked model parameters." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:20 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:20 +#: of +msgid "" +"'unmask': Collect secret key shares to decrypt and aggregate the model " +"parameters." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:22 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:22 +#: of +msgid "" +"Only the aggregated model parameters are exposed and passed to " +"`Strategy.aggregate_fit`, ensuring individual data privacy." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:25 +#: of +msgid "" +"The number of shares into which each client's private key is split under " +"the SecAgg+ protocol. If specified as a float, it represents the " +"proportion of all selected clients, and the number of shares will be set " +"dynamically in the run time. A private key can be reconstructed from " +"these shares, allowing for the secure aggregation of model updates. Each " +"client sends one share to each of its neighbors while retaining one." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:25 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:32 +#: of +msgid "" +"The minimum number of shares required to reconstruct a client's private " +"key, or, if specified as a float, it represents the proportion of the " +"total number of shares needed for reconstruction. This threshold ensures " +"privacy by allowing for the recovery of contributions from dropped " +"clients during aggregation, without compromising individual client data." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:31 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:38 +#: of +msgid "" +"The maximum value of the weight that can be assigned to any single " +"client's update during the weighted average calculation on the server " +"side, e.g., in the FedAvg algorithm." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:35 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:42 +#: of +msgid "" +"The range within which model parameters are clipped before quantization. " +"This parameter ensures each model parameter is bounded within " +"[-clipping_range, clipping_range], facilitating quantization." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:39 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:46 +#: of +msgid "" +"The size of the range into which floating-point model parameters are " +"quantized, mapping each parameter to an integer in [0, " +"quantization_range-1]. This facilitates cryptographic operations on the " +"model updates." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:43 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:50 +#: of +msgid "" +"The range of values from which random mask entries are uniformly sampled " +"([0, modulus_range-1]). `modulus_range` must be less than 4294967296. " +"Please use 2**n values for `modulus_range` to prevent overflow issues." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:47 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:54 +#: of +msgid "" +"The timeout duration in seconds. If specified, the workflow will wait for" +" replies for this duration each time. If `None`, there is no time limit " +"and the workflow will wait until replies for all messages are received." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:61 +#: of +msgid "" +"Generally, higher `num_shares` means more robust to dropouts while " +"increasing the computational costs; higher `reconstruction_threshold` " +"means better privacy guarantees but less tolerance to dropouts." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:58 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:64 +#: of +msgid "Too large `max_weight` may compromise the precision of the quantization." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:59 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:65 +#: of +msgid "`modulus_range` must be 2**n and larger than `quantization_range`." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:66 +#: of +msgid "" +"When `num_shares` is a float, it is interpreted as the proportion of all " +"selected clients, and hence the number of shares will be determined in " +"the runtime. This allows for dynamic adjustment based on the total number" +" of participating clients." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:69 +#: of +msgid "" +"Similarly, when `reconstruction_threshold` is a float, it is interpreted " +"as the proportion of the number of shares needed for the reconstruction " +"of a private key. This feature enables flexibility in setting the " +"security threshold relative to the number of distributed shares." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:73 +#: of +msgid "" +"`num_shares`, `reconstruction_threshold`, and the quantization parameters" +" (`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg+" +" protocol." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`collect_masked_vectors_stage " +"`\\" +" \\(driver\\, ...\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "Execute the 'collect masked vectors' stage." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`setup_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.setup_stage:1 +#: of +msgid "Execute the 'setup' stage." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`share_keys_stage " +"`\\ " +"\\(driver\\, context\\, state\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.share_keys_stage:1 +#: of +msgid "Execute the 'share keys' stage." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.unmask_stage:1 +#: of +msgid "Execute the 'unmask' stage." +msgstr "" + +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:2 +#, fuzzy +msgid "SecAggWorkflow" +msgstr "Flux de travail" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +msgid "" +"Bases: " +":py:class:`~flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow`" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:3 of +msgid "" +"The SecAgg protocol ensures the secure summation of integer vectors owned" +" by multiple parties, without accessing any individual integer vector. " +"This workflow allows the server to compute the weighted average of model " +"parameters across all clients, ensuring individual contributions remain " +"private. This is achieved by clients sending both, a weighting factor and" +" a weighted version of the locally updated parameters, both of which are " +"masked for privacy. Specifically, each client uploads \"[w, w * params]\"" +" with masks, where weighting factor 'w' is the number of examples " +"('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:14 of +msgid "" +"The protocol involves four main stages: - 'setup': Send SecAgg " +"configuration to clients and collect their public keys. - 'share keys': " +"Broadcast public keys among clients and collect encrypted secret" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:54 of +msgid "" +"Each client's private key is split into N shares under the SecAgg " +"protocol, where N is the number of selected clients." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:56 of +msgid "" +"Generally, higher `reconstruction_threshold` means better privacy " +"guarantees but less tolerance to dropouts." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:60 of +msgid "" +"When `reconstruction_threshold` is a float, it is interpreted as the " +"proportion of the number of all selected clients needed for the " +"reconstruction of a private key. This feature enables flexibility in " +"setting the security threshold relative to the number of selected " +"clients." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:64 of +msgid "" +"`reconstruction_threshold`, and the quantization parameters " +"(`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg " +"protocol." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`collect_masked_vectors_stage " +"`\\ " +"\\(driver\\, ...\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`setup_stage `\\" +" \\(driver\\, context\\, state\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`share_keys_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" +msgstr "" + +#: ../../source/ref-api/flwr.simulation.rst:2 +#, fuzzy +msgid "simulation" +msgstr "Simulation de moniteur" + +#: ../../source/ref-api/flwr.simulation.rst:19::1 +msgid "" +":py:obj:`start_simulation `\\ \\(\\*\\," +" client\\_fn\\[\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.simulation.rst:19::1 +#: flwr.simulation.app.start_simulation:1 of +#, fuzzy +msgid "Start a Ray-based Flower simulation server." +msgstr "Simulation de moniteur" + +#: ../../source/ref-api/flwr.simulation.rst:19::1 +msgid "" +":py:obj:`run_simulation_from_cli " +"`\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.simulation.rst:19::1 +#: flwr.simulation.run_simulation.run_simulation_from_cli:1 of +msgid "Run Simulation Engine from the CLI." +msgstr "" + +#: ../../source/ref-api/flwr.simulation.rst:19::1 +msgid "" +":py:obj:`run_simulation `\\ " +"\\(server\\_app\\, client\\_app\\, ...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.simulation.rst:19::1 +#: flwr.simulation.run_simulation.run_simulation:1 of +msgid "Run a Flower App using the Simulation Engine." +msgstr "" + +#: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 +#, fuzzy +msgid "run\\_simulation" +msgstr "Simulation de moniteur" + +#: flwr.simulation.run_simulation.run_simulation:3 of +msgid "" +"The `ServerApp` to be executed. It will send messages to different " +"`ClientApp` instances running on different (virtual) SuperNodes." +msgstr "" + +#: flwr.simulation.run_simulation.run_simulation:6 of +msgid "" +"The `ClientApp` to be executed by each of the SuperNodes. It will receive" +" messages sent by the `ServerApp`." +msgstr "" + +#: flwr.simulation.run_simulation.run_simulation:9 of +msgid "" +"Number of nodes that run a ClientApp. They can be sampled by a Driver in " +"the ServerApp and receive a Message describing what the ClientApp should " +"perform." +msgstr "" + +#: flwr.simulation.run_simulation.run_simulation:13 of +msgid "A simulation backend that runs `ClientApp`s." +msgstr "" + +#: flwr.simulation.run_simulation.run_simulation:15 of +msgid "" +"'A dictionary, e.g {\"\": , \"\": } to " +"configure a backend. Values supported in are those included by " +"`flwr.common.typing.ConfigsRecordValues`." +msgstr "" + +#: flwr.simulation.run_simulation.run_simulation:19 of +msgid "" +"A boolean to indicate whether to enable GPU growth on the main thread. " +"This is desirable if you make use of a TensorFlow model on your " +"`ServerApp` while having your `ClientApp` running on the same GPU. " +"Without enabling this, you might encounter an out-of-memory error because" +" TensorFlow, by default, allocates all GPU memory. Read more about how " +"`tf.config.experimental.set_memory_growth()` works in the TensorFlow " +"documentation: https://www.tensorflow.org/api/stable." +msgstr "" + +#: flwr.simulation.run_simulation.run_simulation:26 of +msgid "" +"When diabled, only INFO, WARNING and ERROR log messages will be shown. If" +" enabled, DEBUG-level logs will be displayed." +msgstr "" + +#: ../../source/ref-api/flwr.simulation.run_simulation_from_cli.rst:2 +#, fuzzy +msgid "run\\_simulation\\_from\\_cli" +msgstr "Simulation de moniteur" + +#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 +#, fuzzy +msgid "start\\_simulation" +msgstr "démarrer_simulation" + +#: flwr.simulation.app.start_simulation:3 of +msgid "" +"A function creating client instances. The function must take a single " "`str` argument called `cid`. It should return a single client instance of" " type Client. Note that the created client instances are ephemeral and " "will often be destroyed after a single method invocation. Since client " @@ -12522,7 +14064,7 @@ msgstr "" msgid "" "Optionally specify the type of actor to use. The actor object, which " "persists throughout the simulation, will be the process in charge of " -"running the clients' jobs (i.e. their `fit()` method)." +"executing a ClientApp wrapping input argument `client_fn`." msgstr "" #: flwr.simulation.app.start_simulation:54 of @@ -13635,9 +15177,9 @@ msgstr "" #: ../../source/ref-changelog.md:220 msgid "" "Much effort went into a completely restructured Flower docs experience. " -"The documentation on [flower.ai/docs](flower.ai/docs) is now divided " -"into Flower Framework, Flower Baselines, Flower Android SDK, Flower iOS " -"SDK, and code example projects." +"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " +"divided into Flower Framework, Flower Baselines, Flower Android SDK, " +"Flower iOS SDK, and code example projects." msgstr "" #: ../../source/ref-changelog.md:222 @@ -13975,15 +15517,15 @@ msgid "" "gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" " " "[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," -" and a [code " -"example](https://github.com/adap/flower/tree/main/examples/xgboost-quickstart)" -" that demonstrates the usage of this new strategy in an XGBoost project." +" and a [code example](https://github.com/adap/flower/tree/main/examples" +"/xgboost-quickstart) that demonstrates the usage of this new strategy in " +"an XGBoost project." msgstr "" "Nous avons ajouté une nouvelle [stratégie] `FedXgbNnAvg` " "(https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," " et un [exemple de code] " -"(https://github.com/adap/flower/tree/main/examples/xgboost-quickstart)" -" qui démontre l'utilisation de cette nouvelle stratégie dans un projet " +"(https://github.com/adap/flower/tree/main/examples/xgboost-quickstart) " +"qui démontre l'utilisation de cette nouvelle stratégie dans un projet " "XGBoost." #: ../../source/ref-changelog.md:300 @@ -14199,12 +15741,14 @@ msgstr "" msgid "" "TabNet is a powerful and flexible framework for training machine learning" " models on tabular data. We now have a federated example using Flower: " -"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples/quickstart-tabnet)." +"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" +"/quickstart-tabnet)." msgstr "" "TabNet est un cadre puissant et flexible pour former des modèles " "d'apprentissage automatique sur des données tabulaires. Nous avons " -"maintenant un exemple fédéré utilisant Flower : " -"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples/quickstart-tabnet)." +"maintenant un exemple fédéré utilisant Flower : [quickstart-" +"tabnet](https://github.com/adap/flower/tree/main/examples/quickstart-" +"tabnet)." #: ../../source/ref-changelog.md:334 msgid "" @@ -14396,12 +15940,14 @@ msgstr "" msgid "" "A new code example (`quickstart-fastai`) demonstrates federated learning " "with [fastai](https://www.fast.ai/) and Flower. You can find it here: " -"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples/quickstart-fastai)." +"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" +"/quickstart-fastai)." msgstr "" "Un nouvel exemple de code (`quickstart-fastai`) démontre l'apprentissage " "fédéré avec [fastai](https://www.fast.ai/) et Flower. Tu peux le trouver " -"ici : " -"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples/quickstart-fastai)." +"ici : [quickstart-" +"fastai](https://github.com/adap/flower/tree/main/examples/quickstart-" +"fastai)." #: ../../source/ref-changelog.md:376 msgid "" @@ -14723,8 +16269,8 @@ msgid "" "[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" "customize-the-client-pytorch.html)" msgstr "" -"[Client et NumPyClient] (https://flower.ai/docs/tutorial/Flower-4" -"-Client-and-NumPyClient-PyTorch.html)" +"[Client et NumPyClient] (https://flower.ai/docs/tutorial/Flower-4-Client-" +"and-NumPyClient-PyTorch.html)" #: ../../source/ref-changelog.md:435 msgid "" @@ -14845,12 +16391,14 @@ msgstr "" #: ../../source/ref-changelog.md:453 msgid "" "A new code example (`quickstart-pandas`) demonstrates federated analytics" -" with Pandas and Flower. You can find it here: " -"[quickstart-pandas](https://github.com/adap/flower/tree/main/examples/quickstart-pandas)." +" with Pandas and Flower. You can find it here: [quickstart-" +"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" +"pandas)." msgstr "" "Un nouvel exemple de code (`quickstart-pandas`) démontre l'analyse " -"fédérée avec Pandas et Flower. Tu peux le trouver ici : " -"[quickstart-pandas](https://github.com/adap/flower/tree/main/examples/quickstart-pandas)." +"fédérée avec Pandas et Flower. Tu peux le trouver ici : [quickstart-" +"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" +"pandas)." #: ../../source/ref-changelog.md:455 msgid "" @@ -14949,9 +16497,8 @@ msgid "" "never contributed on GitHub before, this is the perfect place to start!" msgstr "" "L'un des points forts est le nouveau [guide du premier contributeur] " -"(https://flower.ai/docs/first-time-contributors.html) : si tu n'as " -"jamais contribué sur GitHub auparavant, c'est l'endroit idéal pour " -"commencer !" +"(https://flower.ai/docs/first-time-contributors.html) : si tu n'as jamais" +" contribué sur GitHub auparavant, c'est l'endroit idéal pour commencer !" #: ../../source/ref-changelog.md:477 msgid "v1.1.0 (2022-10-31)" @@ -15847,14 +17394,15 @@ msgstr "" "[#914](https://github.com/adap/flower/pull/914))" #: ../../source/ref-changelog.md:660 +#, fuzzy msgid "" "The first preview release of Flower Baselines has arrived! We're " "kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " "FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " "to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " "With this first preview release we're also inviting the community to " -"[contribute their own baselines](https://flower.ai/docs/contributing-" -"baselines.html)." +"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" +"contribute-baselines.html)." msgstr "" "La première version préliminaire de Flower Baselines est arrivée ! Nous " "démarrons Flower Baselines avec des implémentations de FedOpt (FedYogi, " @@ -16743,10 +18291,11 @@ msgstr "" "métriques spécifiques à une tâche sur le serveur." #: ../../source/ref-changelog.md:845 +#, fuzzy msgid "" "Custom metric dictionaries are now used in two user-facing APIs: they are" " returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " -"they enable evaluation functions passed to build-in strategies (via " +"they enable evaluation functions passed to built-in strategies (via " "`eval_fn`) to return more than two evaluation metrics. Strategies can " "even return *aggregated* metrics dictionaries for the server to keep " "track of." @@ -16760,8 +18309,9 @@ msgstr "" "*agrégées* pour que le serveur puisse en garder la trace." #: ../../source/ref-changelog.md:847 +#, fuzzy msgid "" -"Stratey implementations should migrate their `aggregate_fit` and " +"Strategy implementations should migrate their `aggregate_fit` and " "`aggregate_evaluate` methods to the new return type (e.g., by simply " "returning an empty `{}`), server-side evaluation functions should migrate" " from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." @@ -17252,28 +18802,14 @@ msgstr "" "tels que `PyTorch `_ ou `TensorFlow " "`_." -#: ../../source/ref-example-projects.rst:11 +#: ../../source/ref-example-projects.rst:10 +#, fuzzy msgid "" -"Flower usage examples used to be bundled with Flower in a package called " -"``flwr_example``. We are migrating those examples to standalone projects " -"to make them easier to use. All new examples are based in the directory " -"`examples `_." -msgstr "" -"Les exemples d'utilisation de Flower étaient auparavant regroupés avec " -"Flower dans un paquet appelé ``flwr_example``. Nous migrons ces exemples " -"vers des projets autonomes pour les rendre plus faciles à utiliser. Tous " -"les nouveaux exemples sont basés dans le répertoire ``examples " -"`_." - -#: ../../source/ref-example-projects.rst:16 -msgid "The following examples are available as standalone projects." +"The following examples are available as standalone projects. Quickstart " +"TensorFlow/Keras ---------------------------" msgstr "Les exemples suivants sont disponibles sous forme de projets autonomes." -#: ../../source/ref-example-projects.rst:20 -msgid "Quickstart TensorFlow/Keras" -msgstr "Démarrage rapide de TensorFlow/Keras" - -#: ../../source/ref-example-projects.rst:22 +#: ../../source/ref-example-projects.rst:14 msgid "" "The TensorFlow/Keras quickstart example shows CIFAR-10 image " "classification with MobileNetV2:" @@ -17281,7 +18817,7 @@ msgstr "" "L'exemple de démarrage rapide TensorFlow/Keras montre la classification " "d'images CIFAR-10 avec MobileNetV2 :" -#: ../../source/ref-example-projects.rst:25 +#: ../../source/ref-example-projects.rst:17 #, fuzzy msgid "" "`Quickstart TensorFlow (Code) " @@ -17292,16 +18828,14 @@ msgstr "" "`_" -#: ../../source/ref-example-projects.rst:26 +#: ../../source/ref-example-projects.rst:18 #, fuzzy -msgid "" -"`Quickstart TensorFlow (Tutorial) `_" +msgid ":doc:`Quickstart TensorFlow (Tutorial) `" msgstr "" "`Quickstart TensorFlow (Tutorial) `_" -#: ../../source/ref-example-projects.rst:27 +#: ../../source/ref-example-projects.rst:19 msgid "" "`Quickstart TensorFlow (Blog Post) `_" @@ -17309,12 +18843,12 @@ msgstr "" "`Quickstart TensorFlow (Blog Post) `_" -#: ../../source/ref-example-projects.rst:31 +#: ../../source/ref-example-projects.rst:23 #: ../../source/tutorial-quickstart-pytorch.rst:5 msgid "Quickstart PyTorch" msgstr "Démarrage rapide de PyTorch" -#: ../../source/ref-example-projects.rst:33 +#: ../../source/ref-example-projects.rst:25 msgid "" "The PyTorch quickstart example shows CIFAR-10 image classification with a" " simple Convolutional Neural Network:" @@ -17322,7 +18856,7 @@ msgstr "" "L'exemple de démarrage rapide PyTorch montre la classification d'images " "CIFAR-10 avec un simple réseau neuronal convolutif :" -#: ../../source/ref-example-projects.rst:36 +#: ../../source/ref-example-projects.rst:28 #, fuzzy msgid "" "`Quickstart PyTorch (Code) " @@ -17331,20 +18865,18 @@ msgstr "" "`Quickstart PyTorch (Code) " "`_" -#: ../../source/ref-example-projects.rst:37 +#: ../../source/ref-example-projects.rst:29 #, fuzzy -msgid "" -"`Quickstart PyTorch (Tutorial) `_" +msgid ":doc:`Quickstart PyTorch (Tutorial) `" msgstr "" "`Quickstart PyTorch (Tutorial) `_" -#: ../../source/ref-example-projects.rst:41 +#: ../../source/ref-example-projects.rst:33 msgid "PyTorch: From Centralized To Federated" msgstr "PyTorch : De la centralisation à la fédération" -#: ../../source/ref-example-projects.rst:43 +#: ../../source/ref-example-projects.rst:35 msgid "" "This example shows how a regular PyTorch project can be federated using " "Flower:" @@ -17352,7 +18884,7 @@ msgstr "" "Cet exemple montre comment un projet PyTorch ordinaire peut être fédéré à" " l'aide de Flower :" -#: ../../source/ref-example-projects.rst:45 +#: ../../source/ref-example-projects.rst:37 #, fuzzy msgid "" "`PyTorch: From Centralized To Federated (Code) " @@ -17363,22 +18895,21 @@ msgstr "" "`_" -#: ../../source/ref-example-projects.rst:46 +#: ../../source/ref-example-projects.rst:38 #, fuzzy msgid "" -"`PyTorch: From Centralized To Federated (Tutorial) " -"`_" +":doc:`PyTorch: From Centralized To Federated (Tutorial) `" msgstr "" "`PyTorch : De la centralisation à la fédération (Tutoriel) " "`_" -#: ../../source/ref-example-projects.rst:50 +#: ../../source/ref-example-projects.rst:42 msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" msgstr "Apprentissage fédéré sur Raspberry Pi et Nvidia Jetson" -#: ../../source/ref-example-projects.rst:52 +#: ../../source/ref-example-projects.rst:44 msgid "" "This example shows how Flower can be used to build a federated learning " "system that run across Raspberry Pi and Nvidia Jetson:" @@ -17387,7 +18918,7 @@ msgstr "" "système d'apprentissage fédéré qui fonctionne sur Raspberry Pi et Nvidia " "Jetson :" -#: ../../source/ref-example-projects.rst:54 +#: ../../source/ref-example-projects.rst:46 #, fuzzy msgid "" "`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " @@ -17396,7 +18927,7 @@ msgstr "" "`L'apprentissage fédéré sur Raspberry Pi et Nvidia Jetson (Code) " "`_" -#: ../../source/ref-example-projects.rst:55 +#: ../../source/ref-example-projects.rst:47 msgid "" "`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " "`_" @@ -17404,214 +18935,39 @@ msgstr "" "`L'apprentissage fédéré sur Raspberry Pi et Nvidia Jetson (Blog Post) " "`_" -#: ../../source/ref-example-projects.rst:60 -msgid "Legacy Examples (`flwr_example`)" -msgstr "Exemples hérités (`flwr_example`)" - -#: ../../source/ref-example-projects.rst:63 +#: ../../source/ref-faq.rst:4 msgid "" -"The useage examples in `flwr_example` are deprecated and will be removed " -"in the future. New examples are provided as standalone projects in " -"`examples `_." +"This page collects answers to commonly asked questions about Federated " +"Learning with Flower." msgstr "" -"Les exemples d'utilisation dans `flwr_example` sont obsolètes et seront " -"supprimés à l'avenir. De nouveaux exemples sont fournis en tant que " -"projets autonomes dans `examples " -"`_." +"Cette page rassemble les réponses aux questions les plus fréquemment " +"posées sur l'apprentissage fédéré avec Flower." -#: ../../source/ref-example-projects.rst:69 -msgid "Extra Dependencies" -msgstr "Dépendances supplémentaires" +#: ../../source/ref-faq.rst +#, fuzzy +msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" +msgstr "" +":fa:`eye,mr-1` Flower peut-il fonctionner sur les ordinateurs portables " +"Juptyter / Google Colab ?" -#: ../../source/ref-example-projects.rst:71 +#: ../../source/ref-faq.rst:8 msgid "" -"The core Flower framework keeps a minimal set of dependencies. The " -"examples demonstrate Flower in the context of different machine learning " -"frameworks, so additional dependencies need to be installed before an " -"example can be run." +"Yes, it can! Flower even comes with a few under-the-hood optimizations to" +" make it work even better on Colab. Here's a quickstart example:" msgstr "" -"Le noyau du framework Flower conserve un ensemble minimal de dépendances." -" Les exemples démontrent Flower dans le contexte de différents frameworks" -" d'apprentissage automatique, de sorte que des dépendances " -"supplémentaires doivent être installées avant qu'un exemple puisse être " -"exécuté." - -#: ../../source/ref-example-projects.rst:75 -msgid "For PyTorch examples::" -msgstr "Pour les exemples de PyTorch: :" - -#: ../../source/ref-example-projects.rst:79 -msgid "For TensorFlow examples::" -msgstr "Pour les exemples de TensorFlow : :" - -#: ../../source/ref-example-projects.rst:83 -msgid "For both PyTorch and TensorFlow examples::" -msgstr "Pour les exemples PyTorch et TensorFlow: :" +"Oui, c'est possible ! Flower est même livré avec quelques optimisations " +"pour qu'il fonctionne encore mieux sur Colab. Voici un exemple de " +"démarrage rapide :" -#: ../../source/ref-example-projects.rst:87 +#: ../../source/ref-faq.rst:10 +#, fuzzy msgid "" -"Please consult :code:`pyproject.toml` for a full list of possible extras " -"(section :code:`[tool.poetry.extras]`)." +"`Flower simulation PyTorch " +"`_" msgstr "" -"Tu peux consulter :code:`pyproject.toml` pour une liste complète des " -"extras possibles (section :code:`[tool.poetry.extras]`)." - -#: ../../source/ref-example-projects.rst:92 -msgid "PyTorch Examples" -msgstr "Exemples de PyTorch" - -#: ../../source/ref-example-projects.rst:94 -msgid "" -"Our PyTorch examples are based on PyTorch 1.7. They should work with " -"other releases as well. So far, we provide the following examples." -msgstr "" -"Nos exemples PyTorch sont basés sur PyTorch 1.7. Ils devraient " -"fonctionner avec d'autres versions également. Jusqu'à présent, nous " -"fournissons les exemples suivants." - -#: ../../source/ref-example-projects.rst:98 -msgid "CIFAR-10 Image Classification" -msgstr "Classification d'images CIFAR-10" - -#: ../../source/ref-example-projects.rst:100 -msgid "" -"`CIFAR-10 and CIFAR-100 `_ " -"are popular RGB image datasets. The Flower CIFAR-10 example uses PyTorch " -"to train a simple CNN classifier in a federated learning setup with two " -"clients." -msgstr "" -"`CIFAR-10 et CIFAR-100 `_ " -"sont des ensembles de données d'images RVB populaires. L'exemple Flower " -"CIFAR-10 utilise PyTorch pour former un classificateur CNN simple dans " -"une configuration d'apprentissage fédéré avec deux clients." - -#: ../../source/ref-example-projects.rst:104 -#: ../../source/ref-example-projects.rst:121 -#: ../../source/ref-example-projects.rst:146 -msgid "First, start a Flower server:" -msgstr "Tout d'abord, démarre un serveur Flower :" - -#: ../../source/ref-example-projects.rst:106 -msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" -msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" - -#: ../../source/ref-example-projects.rst:108 -#: ../../source/ref-example-projects.rst:125 -#: ../../source/ref-example-projects.rst:150 -msgid "Then, start the two clients in a new terminal window:" -msgstr "Ensuite, démarre les deux clients dans une nouvelle fenêtre de terminal :" - -#: ../../source/ref-example-projects.rst:110 -msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" -msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" - -#: ../../source/ref-example-projects.rst:112 -msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." -msgstr "Pour plus de détails, voir :code:`src/py/flwr_example/pytorch_cifar`." - -#: ../../source/ref-example-projects.rst:115 -msgid "ImageNet-2012 Image Classification" -msgstr "ImageNet-2012 Classification des images" - -#: ../../source/ref-example-projects.rst:117 -msgid "" -"`ImageNet-2012 `_ is one of the major computer" -" vision datasets. The Flower ImageNet example uses PyTorch to train a " -"ResNet-18 classifier in a federated learning setup with ten clients." -msgstr "" -"`ImageNet-2012 `_ est l'un des principaux " -"ensembles de données de vision par ordinateur. L'exemple Flower ImageNet " -"utilise PyTorch pour entraîner un classificateur ResNet-18 dans une " -"configuration d'apprentissage fédéré avec dix clients." - -#: ../../source/ref-example-projects.rst:123 -msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" -msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" - -#: ../../source/ref-example-projects.rst:127 -msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" -msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" - -#: ../../source/ref-example-projects.rst:129 -msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." -msgstr "Pour plus de détails, voir :code:`src/py/flwr_example/pytorch_imagenet`." - -#: ../../source/ref-example-projects.rst:133 -msgid "TensorFlow Examples" -msgstr "Exemples de TensorFlow" - -#: ../../source/ref-example-projects.rst:135 -msgid "" -"Our TensorFlow examples are based on TensorFlow 2.0 or newer. So far, we " -"provide the following examples." -msgstr "" -"Nos exemples TensorFlow sont basés sur TensorFlow 2.0 ou une version plus" -" récente. Jusqu'à présent, nous te proposons les exemples suivants." - -#: ../../source/ref-example-projects.rst:139 -msgid "Fashion-MNIST Image Classification" -msgstr "Classification d'images Fashion-MNIST" - -#: ../../source/ref-example-projects.rst:141 -msgid "" -"`Fashion-MNIST `_ is " -"often used as the \"Hello, world!\" of machine learning. We follow this " -"tradition and provide an example which samples random local datasets from" -" Fashion-MNIST and trains a simple image classification model over those " -"partitions." -msgstr "" -"nous suivons cette tradition et fournissons un exemple qui échantillonne " -"des ensembles de données locales aléatoires de Fashion-MNIST et entraîne " -"un modèle simple de classification d'images sur ces partitions." - -#: ../../source/ref-example-projects.rst:148 -msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" -msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" - -#: ../../source/ref-example-projects.rst:152 -msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" -msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" - -#: ../../source/ref-example-projects.rst:154 -msgid "" -"For more details, see " -":code:`src/py/flwr_example/tensorflow_fashion_mnist`." -msgstr "" -"Pour plus de détails, voir " -":code:`src/py/flwr_example/tensorflow_fashion_mnist`." - -#: ../../source/ref-faq.rst:4 -msgid "" -"This page collects answers to commonly asked questions about Federated " -"Learning with Flower." -msgstr "" -"Cette page rassemble les réponses aux questions les plus fréquemment " -"posées sur l'apprentissage fédéré avec Flower." - -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Can Flower run on Juptyter Notebooks / Google Colab?" -msgstr "" -":fa:`eye,mr-1` Flower peut-il fonctionner sur les ordinateurs portables " -"Juptyter / Google Colab ?" - -#: ../../source/ref-faq.rst:8 -msgid "" -"Yes, it can! Flower even comes with a few under-the-hood optimizations to" -" make it work even better on Colab. Here's a quickstart example:" -msgstr "" -"Oui, c'est possible ! Flower est même livré avec quelques optimisations " -"pour qu'il fonctionne encore mieux sur Colab. Voici un exemple de " -"démarrage rapide :" - -#: ../../source/ref-faq.rst:10 -#, fuzzy -msgid "" -"`Flower simulation PyTorch " -"`_" -msgstr "" -"`Flower Quickstart (TensorFlow/Keras) " -"`_" +"`Flower Quickstart (TensorFlow/Keras) " +"`_" #: ../../source/ref-faq.rst:11 #, fuzzy @@ -17652,13 +19008,13 @@ msgstr "" #, fuzzy msgid "" "Yes, it does. Please take a look at our `blog post " -"`_ or check out the code examples:" +"`_ or check out the code examples:" msgstr "" "Oui. Jetez un coup d'œil à notre `blog post " -"`_ ou consultez l'`exemple de code Android sur GitHub" -" `_." +"`_ ou consultez l'`exemple de code Android sur GitHub " +"`_." #: ../../source/ref-faq.rst:21 msgid "" @@ -17701,8 +19057,9 @@ msgstr "" "`_." #: ../../source/ref-faq.rst:30 +#, fuzzy msgid "" -"`Flower meets KOSMoS `_." msgstr "" "`Flower rencontre KOSMoS `_ ." msgstr "" "Si tu veux voir tout ce qui est mis ensemble, tu devrais consulter " "l'exemple de code complet : " @@ -18261,7 +19616,7 @@ msgstr "" "huggingface](https://github.com/adap/flower/tree/main/examples" "/quickstart-huggingface)." -#: ../../source/tutorial-quickstart-huggingface.rst:227 +#: ../../source/tutorial-quickstart-huggingface.rst:226 msgid "" "Of course, this is a very basic example, and a lot can be added or " "modified, it was just to showcase how simply we could federate a Hugging " @@ -18272,7 +19627,7 @@ msgstr "" "simplicité on pouvait fédérer un flux de travail Hugging Face à l'aide de" " Flower." -#: ../../source/tutorial-quickstart-huggingface.rst:230 +#: ../../source/tutorial-quickstart-huggingface.rst:229 msgid "" "Note that in this example we used :code:`PyTorch`, but we could have very" " well used :code:`TensorFlow`." @@ -18304,9 +19659,9 @@ msgstr "" #, fuzzy msgid "" "First of all, for running the Flower Python server, it is recommended to " -"create a virtual environment and run everything within a `virtualenv " -"`_. For the Flower " -"client implementation in iOS, it is recommended to use Xcode as our IDE." +"create a virtual environment and run everything within a :doc:`virtualenv" +" `. For the Flower client " +"implementation in iOS, it is recommended to use Xcode as our IDE." msgstr "" "Tout d'abord, il est recommandé de créer un environnement virtuel et de " "tout exécuter au sein d'un `virtualenv `_. As a result, we would " -"encourage you to use other ML frameworks alongise Flower, for example, " +"encourage you to use other ML frameworks alongside Flower, for example, " "PyTorch. This tutorial might be removed in future versions of Flower." msgstr "" @@ -18517,15 +19890,25 @@ msgstr "" #: ../../source/tutorial-quickstart-mxnet.rst:14 #: ../../source/tutorial-quickstart-scikitlearn.rst:12 +#, fuzzy msgid "" "It is recommended to create a virtual environment and run everything " -"within this `virtualenv `_." +"within this :doc:`virtualenv `." msgstr "" "Il est recommandé de créer un environnement virtuel et de tout exécuter " "dans ce `virtualenv `_." +#: ../../source/tutorial-quickstart-mxnet.rst:16 +#: ../../source/tutorial-quickstart-pytorch.rst:17 +#: ../../source/tutorial-quickstart-scikitlearn.rst:14 +msgid "" +"Our example consists of one *server* and two *clients* all having the " +"same model." +msgstr "" +"Notre exemple consiste en un *serveur* et deux *clients* ayant tous le " +"même modèle." + #: ../../source/tutorial-quickstart-mxnet.rst:18 #: ../../source/tutorial-quickstart-scikitlearn.rst:16 msgid "" @@ -18849,15 +20232,40 @@ msgstr "" #: ../../source/tutorial-quickstart-pytorch.rst:15 #: ../../source/tutorial-quickstart-xgboost.rst:39 +#, fuzzy msgid "" "First of all, it is recommended to create a virtual environment and run " -"everything within a `virtualenv `_." +"everything within a :doc:`virtualenv `." msgstr "" "Tout d'abord, il est recommandé de créer un environnement virtuel et de " "tout exécuter au sein d'un `virtualenv `_." +#: ../../source/tutorial-quickstart-pytorch.rst:19 +msgid "" +"*Clients* are responsible for generating individual weight-updates for " +"the model based on their local datasets. These updates are then sent to " +"the *server* which will aggregate them to produce a better model. " +"Finally, the *server* sends this improved version of the model back to " +"each *client*. A complete cycle of weight updates is called a *round*." +msgstr "" +"*Les clients* sont chargés de générer des mises à jour de poids " +"individuelles pour le modèle en fonction de leurs ensembles de données " +"locales. Ces mises à jour sont ensuite envoyées au *serveur* qui les " +"agrège pour produire un meilleur modèle. Enfin, le *serveur* renvoie " +"cette version améliorée du modèle à chaque *client*. Un cycle complet de " +"mises à jour de poids s'appelle un *round*." + +#: ../../source/tutorial-quickstart-pytorch.rst:23 +msgid "" +"Now that we have a rough idea of what is going on, let's get started. We " +"first need to install Flower. You can do this by running :" +msgstr "" +"Maintenant que nous avons une idée générale de ce qui se passe, " +"commençons. Nous devons d'abord installer Flower. Tu peux le faire en " +"exécutant :" + #: ../../source/tutorial-quickstart-pytorch.rst:29 msgid "" "Since we want to use PyTorch to solve a computer vision task, let's go " @@ -19064,7 +20472,8 @@ msgstr "" "régression logistique` sur MNIST en utilisant Flower et scikit-learn." #: ../../source/tutorial-quickstart-scikitlearn.rst:26 -msgid "Since we want to use scikt-learn, let's go ahead and install it:" +#, fuzzy +msgid "Since we want to use scikit-learn, let's go ahead and install it:" msgstr "Puisque nous voulons utiliser scikt-learn, allons-y et installons-le :" #: ../../source/tutorial-quickstart-scikitlearn.rst:32 @@ -19154,12 +20563,14 @@ msgstr "" "scikit-learn :" #: ../../source/tutorial-quickstart-scikitlearn.rst:73 +#, fuzzy msgid "" -"We load the MNIST dataset from `OpenML `_, " -"a popular image classification dataset of handwritten digits for machine " -"learning. The utility :code:`utils.load_mnist()` downloads the training " -"and test data. The training set is split afterwards into 10 partitions " -"with :code:`utils.partition()`." +"We load the MNIST dataset from `OpenML " +"`_, a popular " +"image classification dataset of handwritten digits for machine learning. " +"The utility :code:`utils.load_mnist()` downloads the training and test " +"data. The training set is split afterwards into 10 partitions with " +":code:`utils.partition()`." msgstr "" "Nous chargeons l'ensemble de données MNIST de `OpenML " "`_, un ensemble de données de " @@ -19757,10 +21168,9 @@ msgid "" "`_), we provide more options to define various experimental" " setups, including aggregation strategies, data partitioning and " -"centralised/distributed evaluation. We also support `Flower simulation " -"`_ making " -"it easy to simulate large client cohorts in a resource-aware manner. " -"Let's take a look!" +"centralised/distributed evaluation. We also support :doc:`Flower " +"simulation ` making it easy to simulate large " +"client cohorts in a resource-aware manner. Let's take a look!" msgstr "" #: ../../source/tutorial-quickstart-xgboost.rst:603 @@ -20256,8 +21666,8 @@ msgstr "" "Bienvenue dans la quatrième partie du tutoriel sur l'apprentissage fédéré" " Flower. Dans les parties précédentes de ce tutoriel, nous avons présenté" " l'apprentissage fédéré avec PyTorch et Flower (`partie 1 " -"`__)," -" nous avons appris comment les stratégies peuvent être utilisées pour " +"`__), " +"nous avons appris comment les stratégies peuvent être utilisées pour " "personnaliser l'exécution à la fois sur le serveur et les clients " "(`partie 2 `__), et nous avons construit notre propre stratégie " @@ -20567,8 +21977,8 @@ msgstr "Côté client" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:615 msgid "" -"To be able to serialize our ``ndarray``\\ s into sparse " -"parameters, we will just have to call our custom functions in our " +"To be able to serialize our ``ndarray``\\ s into sparse parameters, we " +"will just have to call our custom functions in our " "``flwr.client.Client``." msgstr "" "Pour pouvoir sérialiser nos ``ndarray`` en paramètres sparse, il nous " @@ -21419,8 +22829,8 @@ msgid "" msgstr "" "Dans ce carnet, nous allons commencer à personnaliser le système " "d'apprentissage fédéré que nous avons construit dans le carnet " -"d'introduction (toujours en utilisant `Flower `__ et" -" `PyTorch `__)." +"d'introduction (toujours en utilisant `Flower `__ et " +"`PyTorch `__)." #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:17 #, fuzzy @@ -21725,9 +23135,9 @@ msgstr "" #, fuzzy msgid "" "The `Flower Federated Learning Tutorial - Part 3 " -"`__ shows how to build a fully custom ``Strategy`` " -"from scratch." +"`__ shows how to build a fully custom ``Strategy`` from " +"scratch." msgstr "" "Le `Tutoriel d'apprentissage fédéré Flower - Partie 3 [WIP] " "`__ browser or the `Signal `__ " "messenger shows that users care about privacy. In fact, they choose the " -"privacy-enhancing version over other alternatives, if such an alternative " -"exists. But what can we do to apply machine learning and data science to " -"these cases to utilize private data? After all, these are all areas that " -"would benefit significantly from recent advances in AI." +"privacy-enhancing version over other alternatives, if such an alternative" +" exists. But what can we do to apply machine learning and data science to" +" these cases to utilize private data? After all, these are all areas that" +" would benefit significantly from recent advances in AI." msgstr "" "La popularité des systèmes améliorant la confidentialité comme le " "navigateur `Brave `__ ou le messager `Signal " @@ -22186,7 +23596,7 @@ msgstr "" "partir d'un point de contrôle précédemment sauvegardé." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 -msgid "|ba47ffb421814b0f8f9fa5719093d839|" +msgid "|5b1408eec0d746cdb91162a9107b6089|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 @@ -22221,7 +23631,7 @@ msgstr "" "rendements décroissants." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 -msgid "|aeac5bf79cbf497082e979834717e01b|" +msgid "|aef19f4b122c4e8d9f4c57f99bcd5dd2|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 @@ -22254,7 +23664,7 @@ msgstr "" "données locales, ou même de quelques étapes (mini-batchs)." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 -msgid "|ce27ed4bbe95459dba016afc42486ba2|" +msgid "|2881a86d8fc54ba29d96b29fc2819f4a|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 @@ -22285,7 +23695,7 @@ msgstr "" " l'entraînement local." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 -msgid "|ae94a7f71dda443cbec2385751427d41|" +msgid "|ec1fe880237247e0975f52766775ab84|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 @@ -22344,7 +23754,7 @@ msgstr "" "times as much as each of the 100 examples." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 -msgid "|e61fce4d43d243e7bb08bdde97d81ce6|" +msgid "|9fdf048ed58d4467b2718cdf4aaf1ec3|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 @@ -22451,11 +23861,6 @@ msgstr "" "empêcher le serveur de voir les résultats soumis par les nœuds clients " "individuels." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 -#, fuzzy -msgid "Differential Privacy" -msgstr "Confidentialité différentielle" - #: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 msgid "" "Differential privacy (DP) is often mentioned in the context of Federated " @@ -22492,7 +23897,7 @@ msgstr "" "quel cadre de ML et n'importe quel langage de programmation." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 -msgid "|08cb60859b07461588fe44e55810b050|" +msgid "|ff726bc5505e432388ee2fdd6ef420b9|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 @@ -24629,10 +26034,10 @@ msgstr "" #~ "Flower Python server, it is recommended" #~ " to create a virtual environment and" #~ " run everything within a `virtualenv " -#~ "`_." -#~ " For the Flower client implementation " -#~ "in iOS, it is recommended to use" -#~ " Xcode as our IDE." +#~ "`_. " +#~ "For the Flower client implementation in" +#~ " iOS, it is recommended to use " +#~ "Xcode as our IDE." #~ msgstr "" #~ "Tout d'abord, pour l'exécution du " #~ "serveur Flower Python, il est recommandé" @@ -24795,27 +26200,12 @@ msgstr "" #~ "The implementation can be seen in " #~ ":code:`MLModelInspect`." #~ msgstr "" -#~ "Comme CoreML ne permet pas de voir" -#~ " les paramètres du modèle avant la" -#~ " formation, et que l'accès aux " -#~ "paramètres du modèle pendant ou après" -#~ " la formation ne peut se faire " -#~ "qu'en spécifiant le nom de la " -#~ "couche, nous devons connaître ces " -#~ "informations à l'avance, en regardant " -#~ "les spécifications du modèle, qui sont" -#~ " écrites sous forme de fichiers " -#~ "proto. La mise en œuvre peut être" -#~ " vue dans :code:`MLModelInspect`." #~ msgid "" #~ "After we have all of the necessary" #~ " informations, let's create our Flower " #~ "client." #~ msgstr "" -#~ "Après avoir obtenu toutes les " -#~ "informations nécessaires, créons notre client" -#~ " Flower." #~ msgid "" #~ "Then start the Flower gRPC client " @@ -25474,8 +26864,8 @@ msgstr "" #~ " papers. If you want to add a" #~ " new baseline or experiment, please " #~ "check the `Contributing Baselines " -#~ "`_ " -#~ "section." +#~ "`_ section." #~ msgstr "" #~ msgid "Paper" @@ -25798,3 +27188,1467 @@ msgstr "" #~ msgid "|c76452ae1ed84965be7ef23c72b95845|" #~ msgstr "" +#~ msgid "" +#~ "Please follow the first section on " +#~ "`Run Flower using Docker " +#~ "`_ which covers this" +#~ " step in more detail." +#~ msgstr "" + +#~ msgid "" +#~ "Since `Flower 1.5 `_ we have " +#~ "introduced translations to our doc " +#~ "pages, but, as you might have " +#~ "noticed, the translations are often " +#~ "imperfect. If you speak languages other" +#~ " than English, you might be able " +#~ "to help us in our effort to " +#~ "make Federated Learning accessible to as" +#~ " many people as possible by " +#~ "contributing to those translations! This " +#~ "might also be a great opportunity " +#~ "for those wanting to become open " +#~ "source contributors with little prerequistes." +#~ msgstr "" + +#~ msgid "" +#~ "You input your translation in the " +#~ "textbox at the top and then, once" +#~ " you are happy with it, you " +#~ "either press ``Save and continue`` (to" +#~ " save the translation and go to " +#~ "the next untranslated string), ``Save " +#~ "and stay`` (to save the translation " +#~ "and stay on the same page), " +#~ "``Suggest`` (to add your translation to" +#~ " suggestions for other users to " +#~ "view), or ``Skip`` (to go to the" +#~ " next untranslated string without saving" +#~ " anything)." +#~ msgstr "" + +#~ msgid "" +#~ "If the section is completely empty " +#~ "(without any token) or non-existant, " +#~ "the changelog will just contain the " +#~ "title of the PR for the changelog" +#~ " entry, without any description." +#~ msgstr "" + +#~ msgid "Example: Walk-Through PyTorch & MNIST" +#~ msgstr "Exemple : PyTorch et MNIST" + +#~ msgid "" +#~ "In this tutorial we will learn, " +#~ "how to train a Convolutional Neural " +#~ "Network on MNIST using Flower and " +#~ "PyTorch." +#~ msgstr "" +#~ "Dans ce tutoriel, nous allons apprendre," +#~ " comment former un réseau neuronal " +#~ "convolutif sur MNIST en utilisant Flower" +#~ " et PyTorch." + +#~ msgid "" +#~ "Since we want to use PyTorch to" +#~ " solve a computer vision task, let's" +#~ " go ahead an install PyTorch and " +#~ "the **torchvision** library:" +#~ msgstr "" +#~ "Puisque nous voulons utiliser PyTorch " +#~ "pour résoudre une tâche de vision " +#~ "par ordinateur, installons PyTorch et la" +#~ " bibliothèque **torchvision** :" + +#~ msgid "Ready... Set... Train!" +#~ msgstr "Prêts... prêts... entraînez-vous !" + +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on PyTorch's `Basic MNIST Example" +#~ " `_. " +#~ "This will allow you see how easy" +#~ " it is to wrap your code with" +#~ " Flower and begin training in a " +#~ "federated way. We provide you with " +#~ "two helper scripts, namely *run-" +#~ "server.sh*, and *run-clients.sh*. Don't " +#~ "be afraid to look inside, they are" +#~ " simple enough =)." +#~ msgstr "" +#~ "Maintenant que nous avons installé " +#~ "toutes nos dépendances, lançons un " +#~ "simple entraînement distribué avec deux " +#~ "clients et un serveur. Notre procédure" +#~ " d'entraînement et l'architecture de notre" +#~ " réseau sont basées sur l'exemple " +#~ "MNIST de base de PyTorch " +#~ "`_. Cela" +#~ " te permettra de voir à quel " +#~ "point il est facile d'envelopper ton " +#~ "code avec Flower et de commencer " +#~ "l'entraînement de manière fédérée. Nous " +#~ "te fournissons deux scripts d'aide, à" +#~ " savoir *run-server.sh*, et *run-" +#~ "clients.sh*. N'aie pas peur de regarder" +#~ " à l'intérieur, ils sont assez " +#~ "simples =)." + +#~ msgid "" +#~ "Go ahead and launch on a terminal" +#~ " the *run-server.sh* script first as" +#~ " follows:" +#~ msgstr "Lance sur un terminal le script *run-server.sh* d'abord comme suit :" + +#~ msgid "Now that the server is up and running, go ahead and launch the clients." +#~ msgstr "Maintenant que le serveur est opérationnel, vas-y et lance les clients." + +#~ msgid "" +#~ "Et voilà! You should be seeing the" +#~ " training procedure and, after a few" +#~ " iterations, the test accuracy for " +#~ "each client." +#~ msgstr "" +#~ "Et voilà ! Tu devrais voir la " +#~ "procédure d'entraînement et, après quelques" +#~ " itérations, la précision du test " +#~ "pour chaque client." + +#~ msgid "Now, let's see what is really happening inside." +#~ msgstr "Maintenant, voyons ce qui se passe réellement à l'intérieur." + +#~ msgid "" +#~ "Inside the server helper script *run-" +#~ "server.sh* you will find the following" +#~ " code that basically runs the " +#~ ":code:`server.py`" +#~ msgstr "" +#~ "Dans le script d'aide au serveur " +#~ "*run-server.sh*, tu trouveras le code " +#~ "suivant qui exécute le fichier " +#~ ":code:`server.py`" + +#~ msgid "" +#~ "We can go a bit deeper and " +#~ "see that :code:`server.py` simply launches " +#~ "a server that will coordinate three " +#~ "rounds of training. Flower Servers are" +#~ " very customizable, but for simple " +#~ "workloads, we can start a server " +#~ "using the :ref:`start_server ` function and leave " +#~ "all the configuration possibilities at " +#~ "their default values, as seen below." +#~ msgstr "" +#~ "Nous pouvons aller un peu plus " +#~ "loin et voir que :code:`server.py` lance" +#~ " simplement un serveur qui coordonnera " +#~ "trois tours de formation. Flower Les " +#~ "serveurs sont très personnalisables, mais " +#~ "pour les charges de travail simples, " +#~ "nous pouvons démarrer un serveur à " +#~ "l'aide de la fonction :ref:`start_server " +#~ "` et " +#~ "laisser toutes les possibilités de " +#~ "configuration à leurs valeurs par " +#~ "défaut, comme on peut le voir " +#~ "ci-dessous." + +#~ msgid "" +#~ "Next, let's take a look at the " +#~ "*run-clients.sh* file. You will see " +#~ "that it contains the main loop " +#~ "that starts a set of *clients*." +#~ msgstr "" +#~ "Ensuite, jetons un coup d'œil au " +#~ "fichier *run-clients.sh*. Tu verras " +#~ "qu'il contient la boucle principale qui" +#~ " démarre un ensemble de *clients*." + +#~ msgid "" +#~ "**cid**: is the client ID. It is" +#~ " an integer that uniquely identifies " +#~ "client identifier." +#~ msgstr "" +#~ "**cid** : c'est l'identifiant du client." +#~ " C'est un nombre entier qui identifie" +#~ " de façon unique l'identifiant du " +#~ "client." + +#~ msgid "**sever_address**: String that identifies IP and port of the server." +#~ msgstr "**sever_address** : Chaîne qui identifie l'IP et le port du serveur." + +#~ msgid "" +#~ "**nb_clients**: This defines the number " +#~ "of clients being created. This piece " +#~ "of information is not required by " +#~ "the client, but it helps us " +#~ "partition the original MNIST dataset to" +#~ " make sure that every client is " +#~ "working on unique subsets of both " +#~ "*training* and *test* sets." +#~ msgstr "" +#~ "**Cette information n'est pas requise " +#~ "par le client, mais elle nous aide" +#~ " à partitionner l'ensemble de données " +#~ "MNIST original pour nous assurer que " +#~ "chaque client travaille sur des sous-" +#~ "ensembles uniques des ensembles *formation*" +#~ " et *test*." + +#~ msgid "" +#~ "Again, we can go deeper and look" +#~ " inside :code:`flwr_example/quickstart-" +#~ "pytorch/client.py`. After going through the" +#~ " argument parsing code at the " +#~ "beginning of our :code:`main` function, " +#~ "you will find a call to " +#~ ":code:`mnist.load_data`. This function is " +#~ "responsible for partitioning the original " +#~ "MNIST datasets (*training* and *test*) " +#~ "and returning a :code:`torch.utils.data.DataLoader`" +#~ " s for each of them. We then" +#~ " instantiate a :code:`PytorchMNISTClient` object" +#~ " with our client ID, our DataLoaders," +#~ " the number of epochs in each " +#~ "round, and which device we want to" +#~ " use for training (CPU or GPU)." +#~ msgstr "" +#~ "Encore une fois, nous pouvons aller " +#~ "plus loin et regarder dans " +#~ ":code:`flwr_example/quickstart-pytorch/client.py`. Après" +#~ " avoir parcouru le code d'analyse des" +#~ " arguments au début de notre fonction" +#~ " :code:`main`, tu trouveras un appel " +#~ "à :code:`mnist.load_data`. Cette fonction est" +#~ " responsable du partitionnement des " +#~ "ensembles de données MNIST originaux " +#~ "(*training* et *test*) et renvoie un " +#~ ":code:`torch.utils.data.DataLoader` s pour chacun" +#~ " d'entre eux. Nous instancions ensuite " +#~ "un objet :code:`PytorchMNISTClient` avec notre" +#~ " ID client, nos DataLoaders, le " +#~ "nombre d'époques dans chaque tour et " +#~ "le périphérique que nous voulons " +#~ "utiliser pour l'entraînement (CPU ou " +#~ "GPU)." + +#~ msgid "" +#~ "The :code:`PytorchMNISTClient` object when " +#~ "finally passed to :code:`fl.client.start_client` " +#~ "along with the server's address as " +#~ "the training process begins." +#~ msgstr "" +#~ "L'objet :code:`PytorchMNISTClient` est finalement" +#~ " transmis à :code:`fl.client.start_client` avec" +#~ " l'adresse du serveur lorsque le " +#~ "processus de formation commence." + +#~ msgid "A Closer Look" +#~ msgstr "Regarder de plus près" + +#~ msgid "" +#~ "Now, let's look closely into the " +#~ ":code:`PytorchMNISTClient` inside :code:`flwr_example" +#~ ".quickstart-pytorch.mnist` and see what it" +#~ " is doing:" +#~ msgstr "" +#~ "Maintenant, examinons de près le " +#~ ":code:`PytorchMNISTClient` à l'intérieur du " +#~ ":code:`flwr_example.quickstart-pytorch.mnist` et " +#~ "voyons ce qu'il fait :" + +#~ msgid "" +#~ "The first thing to notice is that" +#~ " :code:`PytorchMNISTClient` instantiates a CNN" +#~ " model inside its constructor" +#~ msgstr "" +#~ "La première chose à remarquer est " +#~ "que :code:`PytorchMNISTClient` instancie un " +#~ "modèle CNN dans son constructeur" + +#~ msgid "" +#~ "The code for the CNN is available" +#~ " under :code:`quickstart-pytorch.mnist` and " +#~ "it is reproduced below. It is the" +#~ " same network found in `Basic MNIST" +#~ " Example " +#~ "`_." +#~ msgstr "" +#~ "Le code du CNN est disponible sous" +#~ " :code:`quickstart-pytorch.mnist` et il est" +#~ " reproduit ci-dessous. Il s'agit du" +#~ " même réseau que celui que l'on " +#~ "trouve dans `Exemple basique de MNIST" +#~ " `_." + +#~ msgid "" +#~ "The second thing to notice is that" +#~ " :code:`PytorchMNISTClient` class inherits from" +#~ " the :code:`fl.client.Client`, and hence it" +#~ " must implement the following methods:" +#~ msgstr "" +#~ "La deuxième chose à noter est que" +#~ " la classe :code:`PytorchMNISTClient` hérite " +#~ "de :code:`fl.client.Client`, et qu'elle doit" +#~ " donc implémenter les méthodes suivantes" +#~ " :" + +#~ msgid "" +#~ "When comparing the abstract class to " +#~ "its derived class :code:`PytorchMNISTClient` " +#~ "you will notice that :code:`fit` calls" +#~ " a :code:`train` function and that " +#~ ":code:`evaluate` calls a :code:`test`: " +#~ "function." +#~ msgstr "" +#~ "En comparant la classe abstraite à " +#~ "sa classe dérivée :code:`PytorchMNISTClient`, " +#~ "tu remarqueras que :code:`fit` appelle " +#~ "une fonction :code:`train` et que " +#~ ":code:`evaluate` appelle une fonction " +#~ ":code:`test` :." + +#~ msgid "" +#~ "These functions can both be found " +#~ "inside the same :code:`quickstart-" +#~ "pytorch.mnist` module:" +#~ msgstr "" +#~ "Ces fonctions se trouvent toutes deux" +#~ " dans le même module :code:`quickstart-" +#~ "pytorch.mnist` :" + +#~ msgid "" +#~ "Observe that these functions encapsulate " +#~ "regular training and test loops and " +#~ "provide :code:`fit` and :code:`evaluate` with" +#~ " final statistics for each round. You" +#~ " could substitute them with your " +#~ "custom train and test loops and " +#~ "change the network architecture, and the" +#~ " entire example would still work " +#~ "flawlessly. As a matter of fact, " +#~ "why not try and modify the code" +#~ " to an example of your liking?" +#~ msgstr "" +#~ "Observe que ces fonctions encapsulent " +#~ "les boucles d'entraînement et de test" +#~ " habituelles et fournissent à :code:`fit`" +#~ " et :code:`evaluate` les statistiques " +#~ "finales pour chaque tour. Tu pourrais" +#~ " les remplacer par tes boucles " +#~ "d'entraînement et de test personnalisées " +#~ "et changer l'architecture du réseau, et" +#~ " l'ensemble de l'exemple fonctionnerait " +#~ "toujours parfaitement. En fait, pourquoi " +#~ "ne pas essayer de modifier le code" +#~ " pour en faire un exemple qui " +#~ "te plairait ?" + +#~ msgid "Give It a Try" +#~ msgstr "Fais un essai" + +#~ msgid "" +#~ "Looking through the quickstart code " +#~ "description above will have given a " +#~ "good understanding of how *clients* and" +#~ " *servers* work in Flower, how to " +#~ "run a simple experiment, and the " +#~ "internals of a client wrapper. Here " +#~ "are a few things you could try " +#~ "on your own and get more " +#~ "experience with Flower:" +#~ msgstr "" +#~ "En parcourant la description du code " +#~ "de démarrage rapide ci-dessus, tu " +#~ "auras acquis une bonne compréhension du" +#~ " fonctionnement des *clients* et des " +#~ "*serveurs* dans Flower, de l'exécution " +#~ "d'une expérience simple et de la " +#~ "structure interne d'un wrapper client. " +#~ "Voici quelques exemples que tu peux " +#~ "essayer par toi-même pour acquérir " +#~ "plus d'expérience avec Flower :" + +#~ msgid "" +#~ "Try and change :code:`PytorchMNISTClient` so" +#~ " it can accept different architectures." +#~ msgstr "" +#~ "Essaie de modifier :code:`PytorchMNISTClient` " +#~ "pour qu'il puisse accepter différentes " +#~ "architectures." + +#~ msgid "" +#~ "Modify the :code:`train` function so " +#~ "that it accepts different optimizers" +#~ msgstr "" +#~ "Modifie la fonction :code:`train` pour " +#~ "qu'elle accepte différents optimiseurs" + +#~ msgid "" +#~ "Modify the :code:`test` function so that" +#~ " it proves not only the top-1 " +#~ "(regular accuracy) but also the top-5" +#~ " accuracy?" +#~ msgstr "" +#~ "Modifie la fonction :code:`test` pour " +#~ "qu'elle prouve non seulement le top-1" +#~ " (précision normale) mais aussi le " +#~ "top-5 ?" + +#~ msgid "" +#~ "Go larger! Try to adapt the code" +#~ " to larger images and datasets. Why" +#~ " not try training on ImageNet with" +#~ " a ResNet-50?" +#~ msgstr "" +#~ "Essaie d'adapter le code à des " +#~ "images et à des ensembles de " +#~ "données plus grands. Pourquoi ne pas " +#~ "essayer de s'entraîner sur ImageNet avec" +#~ " un ResNet-50 ?" + +#~ msgid "You are ready now. Enjoy learning in a federated way!" +#~ msgstr "Tu es prêt maintenant. Profite de l'apprentissage de manière fédérée !" + +#~ msgid "" +#~ "Flower provides differential privacy (DP) " +#~ "wrapper classes for the easy integration" +#~ " of the central DP guarantees " +#~ "provided by DP-FedAvg into training " +#~ "pipelines defined in any of the " +#~ "various ML frameworks that Flower is " +#~ "compatible with." +#~ msgstr "" +#~ "Flower fournit des classes d'enveloppe " +#~ "de confidentialité différentielle (DP) pour" +#~ " l'intégration facile des garanties " +#~ "centrales de DP fournies par DP-" +#~ "FedAvg dans les pipelines de formation" +#~ " définis dans n'importe lequel des " +#~ "divers cadres de ML avec lesquels " +#~ "Flower est compatible." + +#~ msgid "" +#~ "Please note that these components are" +#~ " still experimental; the correct " +#~ "configuration of DP for a specific " +#~ "task is still an unsolved problem." +#~ msgstr "" +#~ "Note que ces composants sont encore " +#~ "expérimentaux, la configuration correcte du" +#~ " DP pour une tâche spécifique est " +#~ "encore un problème non résolu." + +#~ msgid "" +#~ "The name DP-FedAvg is misleading " +#~ "since it can be applied on top " +#~ "of any FL algorithm that conforms " +#~ "to the general structure prescribed by" +#~ " the FedOpt family of algorithms." +#~ msgstr "" +#~ "Le nom DP-FedAvg est trompeur car" +#~ " il peut être appliqué à n'importe" +#~ " quel algorithme FL qui se conforme" +#~ " à la structure générale prescrite " +#~ "par la famille d'algorithmes FedOpt." + +#~ msgid "DP-FedAvg" +#~ msgstr "DP-FedAvg" + +#~ msgid "" +#~ "DP-FedAvg, originally proposed by " +#~ "McMahan et al. [mcmahan]_ and extended" +#~ " by Andrew et al. [andrew]_, is " +#~ "essentially FedAvg with the following " +#~ "modifications." +#~ msgstr "" +#~ "DP-FedAvg, proposé à l'origine par " +#~ "McMahan et al. [mcmahan]_ et étendu " +#~ "par Andrew et al. [andrew]_, est " +#~ "essentiellement FedAvg avec les modifications" +#~ " suivantes." + +#~ msgid "" +#~ "**Clipping** : The influence of each " +#~ "client's update is bounded by clipping" +#~ " it. This is achieved by enforcing" +#~ " a cap on the L2 norm of " +#~ "the update, scaling it down if " +#~ "needed." +#~ msgstr "" +#~ "**Clipping** : L'influence de la mise" +#~ " à jour de chaque client est " +#~ "limitée en l'écrêtant. Ceci est réalisé" +#~ " en imposant un plafond à la " +#~ "norme L2 de la mise à jour, " +#~ "en la réduisant si nécessaire." + +#~ msgid "" +#~ "**Noising** : Gaussian noise, calibrated " +#~ "to the clipping threshold, is added " +#~ "to the average computed at the " +#~ "server." +#~ msgstr "" +#~ "**Bruit** : un bruit gaussien, calibré" +#~ " sur le seuil d'écrêtage, est ajouté" +#~ " à la moyenne calculée au niveau " +#~ "du serveur." + +#~ msgid "" +#~ "The distribution of the update norm " +#~ "has been shown to vary from " +#~ "task-to-task and to evolve as " +#~ "training progresses. This variability is " +#~ "crucial in understanding its impact on" +#~ " differential privacy guarantees, emphasizing " +#~ "the need for an adaptive approach " +#~ "[andrew]_ that continuously adjusts the " +#~ "clipping threshold to track a " +#~ "prespecified quantile of the update norm" +#~ " distribution." +#~ msgstr "" +#~ "Il a été démontré que la " +#~ "distribution de la norme de mise à" +#~ " jour varie d'une tâche à l'autre " +#~ "et évolue au fur et à mesure " +#~ "de la formation. C'est pourquoi nous " +#~ "utilisons une approche adaptative [andrew]_" +#~ " qui ajuste continuellement le seuil " +#~ "d'écrêtage pour suivre un quantile " +#~ "prédéfini de la distribution de la " +#~ "norme de mise à jour." + +#~ msgid "Simplifying Assumptions" +#~ msgstr "Simplifier les hypothèses" + +#~ msgid "" +#~ "We make (and attempt to enforce) a" +#~ " number of assumptions that must be" +#~ " satisfied to ensure that the " +#~ "training process actually realizes the " +#~ ":math:`(\\epsilon, \\delta)` guarantees the " +#~ "user has in mind when configuring " +#~ "the setup." +#~ msgstr "" +#~ "Nous formulons (et tentons d'appliquer) " +#~ "un certain nombre d'hypothèses qui " +#~ "doivent être satisfaites pour que le " +#~ "processus de formation réalise réellement " +#~ "les garanties :math:`(\\epsilon, \\delta)` que" +#~ " l'utilisateur a à l'esprit lorsqu'il " +#~ "configure l'installation." + +#~ msgid "" +#~ "**Fixed-size subsampling** :Fixed-size " +#~ "subsamples of the clients must be " +#~ "taken at each round, as opposed to" +#~ " variable-sized Poisson subsamples." +#~ msgstr "" +#~ "**Sous-échantillonnage de taille fixe** " +#~ ":Des sous-échantillons de taille fixe" +#~ " des clients doivent être prélevés à" +#~ " chaque tour, par opposition aux " +#~ "sous-échantillons de Poisson de taille " +#~ "variable." + +#~ msgid "" +#~ "**Unweighted averaging** : The contributions" +#~ " from all the clients must weighted" +#~ " equally in the aggregate to " +#~ "eliminate the requirement for the server" +#~ " to know in advance the sum of" +#~ " the weights of all clients available" +#~ " for selection." +#~ msgstr "" +#~ "**Moyenne non pondérée** : Les " +#~ "contributions de tous les clients " +#~ "doivent être pondérées de façon égale" +#~ " dans l'ensemble afin que le serveur" +#~ " n'ait pas à connaître à l'avance " +#~ "la somme des poids de tous les " +#~ "clients disponibles pour la sélection." + +#~ msgid "" +#~ "**No client failures** : The set " +#~ "of available clients must stay constant" +#~ " across all rounds of training. In" +#~ " other words, clients cannot drop out" +#~ " or fail." +#~ msgstr "" +#~ "**Aucune défaillance de client** : " +#~ "L'ensemble des clients disponibles doit " +#~ "rester constant pendant toutes les " +#~ "séries de formation. En d'autres termes," +#~ " les clients ne peuvent pas " +#~ "abandonner ou échouer." + +#~ msgid "" +#~ "The first two are useful for " +#~ "eliminating a multitude of complications " +#~ "associated with calibrating the noise to" +#~ " the clipping threshold, while the " +#~ "third one is required to comply " +#~ "with the assumptions of the privacy " +#~ "analysis." +#~ msgstr "" +#~ "Les deux premiers sont utiles pour " +#~ "éliminer une multitude de complications " +#~ "liées au calibrage du bruit en " +#~ "fonction du seuil d'écrêtage, tandis que" +#~ " le troisième est nécessaire pour se" +#~ " conformer aux hypothèses de l'analyse " +#~ "de la vie privée." + +#~ msgid "" +#~ "These restrictions are in line with " +#~ "constraints imposed by Andrew et al. " +#~ "[andrew]_." +#~ msgstr "" +#~ "Ces restrictions sont conformes aux " +#~ "contraintes imposées par Andrew et al." +#~ " [andrew]_." + +#~ msgid "Customizable Responsibility for Noise injection" +#~ msgstr "Responsabilité personnalisable pour l'injection de bruit" + +#~ msgid "" +#~ "In contrast to other implementations " +#~ "where the addition of noise is " +#~ "performed at the server, you can " +#~ "configure the site of noise injection" +#~ " to better match your threat model." +#~ " We provide users with the " +#~ "flexibility to set up the training " +#~ "such that each client independently adds" +#~ " a small amount of noise to the" +#~ " clipped update, with the result that" +#~ " simply aggregating the noisy updates " +#~ "is equivalent to the explicit addition" +#~ " of noise to the non-noisy " +#~ "aggregate at the server." +#~ msgstr "" +#~ "Contrairement à d'autres implémentations où" +#~ " l'ajout de bruit est effectué au " +#~ "niveau du serveur, tu peux configurer" +#~ " le site d'injection de bruit pour" +#~ " qu'il corresponde mieux à ton modèle" +#~ " de menace. Nous offrons aux " +#~ "utilisateurs la possibilité de configurer " +#~ "l'entraînement de telle sorte que chaque" +#~ " client ajoute indépendamment une petite" +#~ " quantité de bruit à la mise à" +#~ " jour écrêtée, ce qui fait que " +#~ "le simple fait d'agréger les mises " +#~ "à jour bruyantes équivaut à l'ajout " +#~ "explicite de bruit à l'agrégat non " +#~ "bruyant au niveau du serveur." + +#~ msgid "" +#~ "To be precise, if we let :math:`m`" +#~ " be the number of clients sampled " +#~ "each round and :math:`\\sigma_\\Delta` be " +#~ "the scale of the total Gaussian " +#~ "noise that needs to be added to" +#~ " the sum of the model updates, " +#~ "we can use simple maths to show" +#~ " that this is equivalent to each " +#~ "client adding noise with scale " +#~ ":math:`\\sigma_\\Delta/\\sqrt{m}`." +#~ msgstr "" +#~ "Pour être précis, si nous laissons " +#~ ":math:`m` être le nombre de clients " +#~ "échantillonnés à chaque tour et " +#~ ":math:\\sigma_\\Delta` être l'échelle du bruit" +#~ " gaussien total qui doit être ajouté" +#~ " à la somme des mises à jour" +#~ " du modèle, nous pouvons utiliser des" +#~ " mathématiques simples pour montrer que " +#~ "cela équivaut à ce que chaque " +#~ "client ajoute du bruit avec l'échelle" +#~ " :math:\\sigma_\\Delta/\\sqrt{m}`." + +#~ msgid "Wrapper-based approach" +#~ msgstr "Approche basée sur l'enveloppe" + +#~ msgid "" +#~ "Introducing DP to an existing workload" +#~ " can be thought of as adding an" +#~ " extra layer of security around it." +#~ " This inspired us to provide the " +#~ "additional server and client-side logic" +#~ " needed to make the training process" +#~ " differentially private as wrappers for " +#~ "instances of the :code:`Strategy` and " +#~ ":code:`NumPyClient` abstract classes respectively." +#~ " This wrapper-based approach has the" +#~ " advantage of being easily composable " +#~ "with other wrappers that someone might" +#~ " contribute to the Flower library in" +#~ " the future, e.g., for secure " +#~ "aggregation. Using Inheritance instead can " +#~ "be tedious because that would require" +#~ " the creation of new sub- classes " +#~ "every time a new class implementing " +#~ ":code:`Strategy` or :code:`NumPyClient` is " +#~ "defined." +#~ msgstr "" +#~ "L'introduction du DP dans une charge " +#~ "de travail existante peut être " +#~ "considérée comme l'ajout d'une couche de" +#~ " sécurité supplémentaire autour d'elle. " +#~ "Cela nous a incités à fournir la" +#~ " logique supplémentaire côté serveur et " +#~ "côté client nécessaire pour rendre le" +#~ " processus de formation différentiellement " +#~ "privé en tant qu'enveloppes pour les " +#~ "instances des classes abstraites " +#~ ":code:`Strategy` et :code:`NumPyClient` " +#~ "respectivement. Cette approche basée sur " +#~ "l'enveloppe a l'avantage d'être facilement " +#~ "composable avec d'autres enveloppes que " +#~ "quelqu'un pourrait contribuer à la " +#~ "bibliothèque Flower à l'avenir, par " +#~ "exemple, pour l'agrégation sécurisée. " +#~ "L'utilisation de l'héritage à la place" +#~ " peut être fastidieuse car cela " +#~ "nécessiterait la création de nouvelles " +#~ "sous-classes chaque fois qu'une nouvelle" +#~ " classe mettant en œuvre :code:`Strategy`" +#~ " ou :code:`NumPyClient` est définie." + +#~ msgid "" +#~ "The first version of our solution " +#~ "was to define a decorator whose " +#~ "constructor accepted, among other things, " +#~ "a boolean-valued variable indicating " +#~ "whether adaptive clipping was to be " +#~ "enabled or not. We quickly realized " +#~ "that this would clutter its " +#~ ":code:`__init__()` function with variables " +#~ "corresponding to hyperparameters of adaptive" +#~ " clipping that would remain unused " +#~ "when it was disabled. A cleaner " +#~ "implementation could be achieved by " +#~ "splitting the functionality into two " +#~ "decorators, :code:`DPFedAvgFixed` and " +#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" +#~ " classing the former. The constructors " +#~ "for both classes accept a boolean " +#~ "parameter :code:`server_side_noising`, which, as " +#~ "the name suggests, determines where " +#~ "noising is to be performed." +#~ msgstr "" +#~ "La première version de notre solution" +#~ " consistait à définir un décorateur " +#~ "dont le constructeur acceptait, entre " +#~ "autres, une variable à valeur booléenne" +#~ " indiquant si l'écrêtage adaptatif devait" +#~ " être activé ou non. Nous nous " +#~ "sommes rapidement rendu compte que cela" +#~ " encombrerait sa fonction :code:`__init__()` " +#~ "avec des variables correspondant aux " +#~ "hyperparamètres de l'écrêtage adaptatif qui" +#~ " resteraient inutilisées lorsque celui-ci" +#~ " était désactivé. Une implémentation plus" +#~ " propre pourrait être obtenue en " +#~ "divisant la fonctionnalité en deux " +#~ "décorateurs, :code:`DPFedAvgFixed` et " +#~ ":code:`DPFedAvgAdaptive`, le second sous-" +#~ "classant le premier. Les constructeurs " +#~ "des deux classes acceptent un paramètre" +#~ " booléen :code:`server_side_noising` qui, comme" +#~ " son nom l'indique, détermine l'endroit " +#~ "où le noising doit être effectué." + +#~ msgid "" +#~ "The server-side capabilities required " +#~ "for the original version of DP-" +#~ "FedAvg, i.e., the one which performed" +#~ " fixed clipping, can be completely " +#~ "captured with the help of wrapper " +#~ "logic for just the following two " +#~ "methods of the :code:`Strategy` abstract " +#~ "class." +#~ msgstr "" +#~ "Les capacités côté serveur requises pour" +#~ " la version originale de DP-FedAvg," +#~ " c'est-à-dire celle qui effectue un " +#~ "écrêtage fixe, peuvent être entièrement " +#~ "capturées à l'aide d'une logique " +#~ "d'enveloppement pour les deux méthodes " +#~ "suivantes de la classe abstraite " +#~ ":code:`Strategy`." + +#~ msgid "" +#~ ":code:`configure_fit()` : The config " +#~ "dictionary being sent by the wrapped " +#~ ":code:`Strategy` to each client needs to" +#~ " be augmented with an additional " +#~ "value equal to the clipping threshold" +#~ " (keyed under :code:`dpfedavg_clip_norm`) and," +#~ " if :code:`server_side_noising=true`, another one" +#~ " equal to the scale of the " +#~ "Gaussian noise that needs to be " +#~ "added at the client (keyed under " +#~ ":code:`dpfedavg_noise_stddev`). This entails " +#~ "*post*-processing of the results returned " +#~ "by the wrappee's implementation of " +#~ ":code:`configure_fit()`." +#~ msgstr "" +#~ ":code:`configure_fit()` : Le dictionnaire de" +#~ " configuration envoyé par la " +#~ ":code:`Strategy` enveloppée à chaque client" +#~ " doit être augmenté d'une valeur " +#~ "supplémentaire égale au seuil d'écrêtage " +#~ "(indiqué sous :code:`dpfedavg_clip_norm`) et, " +#~ "si :code:`server_side_noising=true`, d'une autre " +#~ "égale à l'échelle du bruit gaussien " +#~ "qui doit être ajouté au client " +#~ "(indiqué sous :code:`dpfedavg_noise_stddev`)." + +#~ msgid "" +#~ ":code:`aggregate_fit()`: We check whether any" +#~ " of the sampled clients dropped out" +#~ " or failed to upload an update " +#~ "before the round timed out. In " +#~ "that case, we need to abort the" +#~ " current round, discarding any successful" +#~ " updates that were received, and move" +#~ " on to the next one. On the " +#~ "other hand, if all clients responded " +#~ "successfully, we must force the " +#~ "averaging of the updates to happen " +#~ "in an unweighted manner by intercepting" +#~ " the :code:`parameters` field of " +#~ ":code:`FitRes` for each received update " +#~ "and setting it to 1. Furthermore, " +#~ "if :code:`server_side_noising=true`, each update " +#~ "is perturbed with an amount of " +#~ "noise equal to what it would have" +#~ " been subjected to had client-side" +#~ " noising being enabled. This entails " +#~ "*pre*-processing of the arguments to " +#~ "this method before passing them on " +#~ "to the wrappee's implementation of " +#~ ":code:`aggregate_fit()`." +#~ msgstr "" +#~ ":code:`aggregate_fit()`: We check whether any" +#~ " of the sampled clients dropped out" +#~ " or failed to upload an update " +#~ "before the round timed out. In " +#~ "that case, we need to abort the" +#~ " current round, discarding any successful" +#~ " updates that were received, and move" +#~ " on to the next one. On the " +#~ "other hand, if all clients responded " +#~ "successfully, we must force the " +#~ "averaging of the updates to happen " +#~ "in an unweighted manner by intercepting" +#~ " the :code:`parameters` field of " +#~ ":code:`FitRes` for each received update " +#~ "and setting it to 1. Furthermore, " +#~ "if :code:`server_side_noising=true`, each update " +#~ "is perturbed with an amount of " +#~ "noise equal to what it would have" +#~ " been subjected to had client-side" +#~ " noising being enabled. This entails " +#~ "*pre*-processing of the arguments to " +#~ "this method before passing them on " +#~ "to the wrappee's implementation of " +#~ ":code:`aggregate_fit()`." + +#~ msgid "" +#~ "We can't directly change the aggregation" +#~ " function of the wrapped strategy to" +#~ " force it to add noise to the" +#~ " aggregate, hence we simulate client-" +#~ "side noising to implement server-side" +#~ " noising." +#~ msgstr "" +#~ "Nous ne pouvons pas modifier directement" +#~ " la fonction d'agrégation de la " +#~ "stratégie enveloppée pour la forcer à" +#~ " ajouter du bruit à l'agrégat, c'est" +#~ " pourquoi nous simulons le bruit côté" +#~ " client pour mettre en œuvre le " +#~ "bruit côté serveur." + +#~ msgid "" +#~ "These changes have been put together " +#~ "into a class called :code:`DPFedAvgFixed`, " +#~ "whose constructor accepts the strategy " +#~ "being decorated, the clipping threshold " +#~ "and the number of clients sampled " +#~ "every round as compulsory arguments. The" +#~ " user is expected to specify the " +#~ "clipping threshold since the order of" +#~ " magnitude of the update norms is " +#~ "highly dependent on the model being " +#~ "trained and providing a default value" +#~ " would be misleading. The number of" +#~ " clients sampled at every round is" +#~ " required to calculate the amount of" +#~ " noise that must be added to " +#~ "each individual update, either by the" +#~ " server or the clients." +#~ msgstr "" +#~ "Ces modifications ont été regroupées " +#~ "dans une classe appelée :code:`DPFedAvgFixed`," +#~ " dont le constructeur accepte la " +#~ "stratégie décorée, le seuil d'écrêtage " +#~ "et le nombre de clients échantillonnés" +#~ " à chaque tour comme arguments " +#~ "obligatoires. L'utilisateur est censé " +#~ "spécifier le seuil d'écrêtage car " +#~ "l'ordre de grandeur des normes de " +#~ "mise à jour dépend fortement du " +#~ "modèle formé et fournir une valeur " +#~ "par défaut serait trompeur. Le nombre" +#~ " de clients échantillonnés à chaque " +#~ "tour est nécessaire pour calculer la " +#~ "quantité de bruit qui doit être " +#~ "ajoutée à chaque mise à jour " +#~ "individuelle, que ce soit par le " +#~ "serveur ou par les clients." + +#~ msgid "" +#~ "The additional functionality required to " +#~ "facilitate adaptive clipping has been " +#~ "provided in :code:`DPFedAvgAdaptive`, a " +#~ "subclass of :code:`DPFedAvgFixed`. It " +#~ "overrides the above-mentioned methods to" +#~ " do the following." +#~ msgstr "" +#~ "La fonctionnalité supplémentaire nécessaire " +#~ "pour faciliter l'écrêtage adaptatif a " +#~ "été fournie dans :code:`DPFedAvgAdaptive`, une" +#~ " sous-classe de :code:`DPFedAvgFixed`. Elle" +#~ " remplace les méthodes mentionnées ci-" +#~ "dessus pour effectuer les opérations " +#~ "suivantes." + +#~ msgid "" +#~ ":code:`configure_fit()` : It intercepts the" +#~ " config dict returned by " +#~ ":code:`super.configure_fit()` to add the " +#~ "key-value pair " +#~ ":code:`dpfedavg_adaptive_clip_enabled:True` to it, " +#~ "which the client interprets as an " +#~ "instruction to include an indicator bit" +#~ " (1 if update norm <= clipping " +#~ "threshold, 0 otherwise) in the results" +#~ " returned by it." +#~ msgstr "" +#~ ":code:`configure_fit()` : Il intercepte le " +#~ "dict de configuration renvoyé par " +#~ ":code:`super.configure_fit()` pour y ajouter " +#~ "la paire clé-valeur " +#~ ":code:`dpfedavg_adaptive_clip_enabled:True`, que le " +#~ "client interprète comme une instruction " +#~ "d'inclure un bit indicateur (1 si " +#~ "la norme de mise à jour <= " +#~ "seuil d'écrêtage, 0 sinon) dans les " +#~ "résultats qu'il renvoie." + +#~ msgid "" +#~ ":code:`aggregate_fit()` : It follows a " +#~ "call to :code:`super.aggregate_fit()` with one" +#~ " to :code:`__update_clip_norm__()`, a procedure" +#~ " which adjusts the clipping threshold " +#~ "on the basis of the indicator bits" +#~ " received from the sampled clients." +#~ msgstr "" +#~ ":code:`aggregate_fit()` : Il fait suivre " +#~ "un appel à :code:`super.aggregate_fit()` d'un" +#~ " appel à :code:`__update_clip_norm__()`, une " +#~ "procédure qui ajuste le seuil d'écrêtage" +#~ " sur la base des bits indicateurs " +#~ "reçus des clients échantillonnés." + +#~ msgid "" +#~ "The client-side capabilities required " +#~ "can be completely captured through " +#~ "wrapper logic for just the :code:`fit()`" +#~ " method of the :code:`NumPyClient` abstract" +#~ " class. To be precise, we need " +#~ "to *post-process* the update computed" +#~ " by the wrapped client to clip " +#~ "it, if necessary, to the threshold " +#~ "value supplied by the server as " +#~ "part of the config dictionary. In " +#~ "addition to this, it may need to" +#~ " perform some extra work if either" +#~ " (or both) of the following keys " +#~ "are also present in the dict." +#~ msgstr "" +#~ "Les capacités requises côté client " +#~ "peuvent être entièrement capturées par " +#~ "une logique de wrapper pour la " +#~ "seule méthode :code:`fit()` de la classe" +#~ " abstraite :code:`NumPyClient`. Pour être " +#~ "précis, nous devons *post-traiter* la" +#~ " mise à jour calculée par le " +#~ "client wrapped pour l'écrêter, si " +#~ "nécessaire, à la valeur seuil fournie" +#~ " par le serveur dans le cadre " +#~ "du dictionnaire de configuration. En " +#~ "plus de cela, il peut avoir besoin" +#~ " d'effectuer un travail supplémentaire si" +#~ " l'une des clés suivantes (ou les " +#~ "deux) est également présente dans le " +#~ "dict." + +#~ msgid "" +#~ ":code:`dpfedavg_noise_stddev` : Generate and " +#~ "add the specified amount of noise " +#~ "to the clipped update." +#~ msgstr "" +#~ ":code:`dpfedavg_noise_stddev` : Génère et " +#~ "ajoute la quantité de bruit spécifiée" +#~ " à la mise à jour de " +#~ "l'écrêtage." + +#~ msgid "" +#~ ":code:`dpfedavg_adaptive_clip_enabled` : Augment the" +#~ " metrics dict in the :code:`FitRes` " +#~ "object being returned to the server " +#~ "with an indicator bit, calculated as " +#~ "described earlier." +#~ msgstr "" +#~ ":code:`dpfedavg_adaptive_clip_enabled` : Complète " +#~ "les métriques dict dans l'objet " +#~ ":code:`FitRes` renvoyé au serveur avec " +#~ "un bit indicateur, calculé comme décrit" +#~ " précédemment." + +#~ msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" +#~ msgstr "Effectuer l'analyse :math:`(\\epsilon, \\delta)`" + +#~ msgid "" +#~ "Assume you have trained for :math:`n`" +#~ " rounds with sampling fraction :math:`q`" +#~ " and noise multiplier :math:`z`. In " +#~ "order to calculate the :math:`\\epsilon` " +#~ "value this would result in for a" +#~ " particular :math:`\\delta`, the following " +#~ "script may be used." +#~ msgstr "" +#~ "Supposons que tu te sois entraîné " +#~ "pendant :math:`n` tours avec la fraction" +#~ " d'échantillonnage :math:`q` et le " +#~ "multiplicateur de bruit :math:`z`. Afin " +#~ "de calculer la valeur :math:`epsilon` " +#~ "qui en résulterait pour un " +#~ ":math:`\\delta` particulier, le script suivant" +#~ " peut être utilisé." + +#~ msgid "" +#~ "`How to run Flower using Docker " +#~ "`_" +#~ msgstr "" + +#~ msgid "Enjoy building more robust and flexible ``ClientApp``s with mods!" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`ClientApp `\\ " +#~ "\\(client\\_fn\\[\\, mods\\]\\)" +#~ msgstr "" + +#~ msgid ":py:obj:`flwr.server.driver `\\" +#~ msgstr "" + +#~ msgid "Flower driver SDK." +#~ msgstr "Serveur de Flower" + +#~ msgid "driver" +#~ msgstr "serveur" + +#~ msgid "" +#~ ":py:obj:`start_driver `\\ " +#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`Driver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`GrpcDriver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "`GrpcDriver` provides access to the gRPC Driver API/service." +#~ msgstr "" + +#~ msgid ":py:obj:`get_nodes `\\ \\(\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`pull_task_res " +#~ "`\\ \\(task\\_ids\\)" +#~ msgstr "" + +#~ msgid "Get task results." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`push_task_ins " +#~ "`\\ " +#~ "\\(task\\_ins\\_list\\)" +#~ msgstr "" + +#~ msgid "Schedule tasks." +#~ msgstr "" + +#~ msgid "GrpcDriver" +#~ msgstr "" + +#~ msgid ":py:obj:`connect `\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Connect to the Driver API." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`create_run " +#~ "`\\ \\(req\\)" +#~ msgstr "" + +#~ msgid "Request for run ID." +#~ msgstr "Demande pour une nouvelle Flower Baseline" + +#~ msgid "" +#~ ":py:obj:`disconnect " +#~ "`\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Disconnect from the Driver API." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`get_nodes `\\" +#~ " \\(req\\)" +#~ msgstr "" + +#~ msgid "Get client IDs." +#~ msgstr "Moteur client Edge" + +#~ msgid "" +#~ ":py:obj:`pull_task_res " +#~ "`\\ \\(req\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`push_task_ins " +#~ "`\\ \\(req\\)" +#~ msgstr "" + +#~ msgid "" +#~ "Optionally specify the type of actor " +#~ "to use. The actor object, which " +#~ "persists throughout the simulation, will " +#~ "be the process in charge of " +#~ "running the clients' jobs (i.e. their" +#~ " `fit()` method)." +#~ msgstr "" + +#~ msgid "" +#~ "Much effort went into a completely " +#~ "restructured Flower docs experience. The " +#~ "documentation on [flower.ai/docs](flower.ai/docs) is" +#~ " now divided into Flower Framework, " +#~ "Flower Baselines, Flower Android SDK, " +#~ "Flower iOS SDK, and code example " +#~ "projects." +#~ msgstr "" + +#~ msgid "" +#~ "Flower usage examples used to be " +#~ "bundled with Flower in a package " +#~ "called ``flwr_example``. We are migrating " +#~ "those examples to standalone projects to" +#~ " make them easier to use. All " +#~ "new examples are based in the " +#~ "directory `examples " +#~ "`_." +#~ msgstr "" +#~ "Les exemples d'utilisation de Flower " +#~ "étaient auparavant regroupés avec Flower " +#~ "dans un paquet appelé ``flwr_example``. " +#~ "Nous migrons ces exemples vers des " +#~ "projets autonomes pour les rendre plus" +#~ " faciles à utiliser. Tous les " +#~ "nouveaux exemples sont basés dans le " +#~ "répertoire ``examples " +#~ "`_." + +#~ msgid "Quickstart TensorFlow/Keras" +#~ msgstr "Démarrage rapide de TensorFlow/Keras" + +#~ msgid "Legacy Examples (`flwr_example`)" +#~ msgstr "Exemples hérités (`flwr_example`)" + +#~ msgid "" +#~ "The useage examples in `flwr_example` " +#~ "are deprecated and will be removed " +#~ "in the future. New examples are " +#~ "provided as standalone projects in " +#~ "`examples `_." +#~ msgstr "" +#~ "Les exemples d'utilisation dans `flwr_example`" +#~ " sont obsolètes et seront supprimés à" +#~ " l'avenir. De nouveaux exemples sont " +#~ "fournis en tant que projets autonomes" +#~ " dans `examples " +#~ "`_." + +#~ msgid "Extra Dependencies" +#~ msgstr "Dépendances supplémentaires" + +#~ msgid "" +#~ "The core Flower framework keeps a " +#~ "minimal set of dependencies. The " +#~ "examples demonstrate Flower in the " +#~ "context of different machine learning " +#~ "frameworks, so additional dependencies need" +#~ " to be installed before an example" +#~ " can be run." +#~ msgstr "" +#~ "Le noyau du framework Flower conserve" +#~ " un ensemble minimal de dépendances. " +#~ "Les exemples démontrent Flower dans le" +#~ " contexte de différents frameworks " +#~ "d'apprentissage automatique, de sorte que " +#~ "des dépendances supplémentaires doivent être" +#~ " installées avant qu'un exemple puisse " +#~ "être exécuté." + +#~ msgid "For PyTorch examples::" +#~ msgstr "Pour les exemples de PyTorch: :" + +#~ msgid "For TensorFlow examples::" +#~ msgstr "Pour les exemples de TensorFlow : :" + +#~ msgid "For both PyTorch and TensorFlow examples::" +#~ msgstr "Pour les exemples PyTorch et TensorFlow: :" + +#~ msgid "" +#~ "Please consult :code:`pyproject.toml` for a" +#~ " full list of possible extras " +#~ "(section :code:`[tool.poetry.extras]`)." +#~ msgstr "" +#~ "Tu peux consulter :code:`pyproject.toml` pour" +#~ " une liste complète des extras " +#~ "possibles (section :code:`[tool.poetry.extras]`)." + +#~ msgid "PyTorch Examples" +#~ msgstr "Exemples de PyTorch" + +#~ msgid "" +#~ "Our PyTorch examples are based on " +#~ "PyTorch 1.7. They should work with " +#~ "other releases as well. So far, we" +#~ " provide the following examples." +#~ msgstr "" +#~ "Nos exemples PyTorch sont basés sur " +#~ "PyTorch 1.7. Ils devraient fonctionner " +#~ "avec d'autres versions également. Jusqu'à " +#~ "présent, nous fournissons les exemples " +#~ "suivants." + +#~ msgid "CIFAR-10 Image Classification" +#~ msgstr "Classification d'images CIFAR-10" + +#~ msgid "" +#~ "`CIFAR-10 and CIFAR-100 " +#~ "`_ are " +#~ "popular RGB image datasets. The Flower" +#~ " CIFAR-10 example uses PyTorch to " +#~ "train a simple CNN classifier in a" +#~ " federated learning setup with two " +#~ "clients." +#~ msgstr "" +#~ "`CIFAR-10 et CIFAR-100 " +#~ "`_ sont des" +#~ " ensembles de données d'images RVB " +#~ "populaires. L'exemple Flower CIFAR-10 utilise" +#~ " PyTorch pour former un classificateur " +#~ "CNN simple dans une configuration " +#~ "d'apprentissage fédéré avec deux clients." + +#~ msgid "First, start a Flower server:" +#~ msgstr "Tout d'abord, démarre un serveur Flower :" + +#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" + +#~ msgid "Then, start the two clients in a new terminal window:" +#~ msgstr "" +#~ "Ensuite, démarre les deux clients dans" +#~ " une nouvelle fenêtre de terminal :" + +#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" + +#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." +#~ msgstr "Pour plus de détails, voir :code:`src/py/flwr_example/pytorch_cifar`." + +#~ msgid "ImageNet-2012 Image Classification" +#~ msgstr "ImageNet-2012 Classification des images" + +#~ msgid "" +#~ "`ImageNet-2012 `_ is " +#~ "one of the major computer vision " +#~ "datasets. The Flower ImageNet example " +#~ "uses PyTorch to train a ResNet-18 " +#~ "classifier in a federated learning setup" +#~ " with ten clients." +#~ msgstr "" +#~ "`ImageNet-2012 `_ est " +#~ "l'un des principaux ensembles de données" +#~ " de vision par ordinateur. L'exemple " +#~ "Flower ImageNet utilise PyTorch pour " +#~ "entraîner un classificateur ResNet-18 dans " +#~ "une configuration d'apprentissage fédéré avec" +#~ " dix clients." + +#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" + +#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" + +#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." +#~ msgstr "" +#~ "Pour plus de détails, voir " +#~ ":code:`src/py/flwr_example/pytorch_imagenet`." + +#~ msgid "TensorFlow Examples" +#~ msgstr "Exemples de TensorFlow" + +#~ msgid "" +#~ "Our TensorFlow examples are based on " +#~ "TensorFlow 2.0 or newer. So far, " +#~ "we provide the following examples." +#~ msgstr "" +#~ "Nos exemples TensorFlow sont basés sur" +#~ " TensorFlow 2.0 ou une version plus" +#~ " récente. Jusqu'à présent, nous te " +#~ "proposons les exemples suivants." + +#~ msgid "Fashion-MNIST Image Classification" +#~ msgstr "Classification d'images Fashion-MNIST" + +#~ msgid "" +#~ "`Fashion-MNIST `_ is often used as " +#~ "the \"Hello, world!\" of machine " +#~ "learning. We follow this tradition and" +#~ " provide an example which samples " +#~ "random local datasets from Fashion-MNIST" +#~ " and trains a simple image " +#~ "classification model over those partitions." +#~ msgstr "" +#~ "nous suivons cette tradition et " +#~ "fournissons un exemple qui échantillonne " +#~ "des ensembles de données locales " +#~ "aléatoires de Fashion-MNIST et entraîne" +#~ " un modèle simple de classification " +#~ "d'images sur ces partitions." + +#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" +#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" + +#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" +#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" + +#~ msgid "" +#~ "For more details, see " +#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." +#~ msgstr "" +#~ "Pour plus de détails, voir " +#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." + +#~ msgid "" +#~ "MXNet is no longer maintained and " +#~ "has been moved into `Attic " +#~ "`_. As a " +#~ "result, we would encourage you to " +#~ "use other ML frameworks alongise Flower," +#~ " for example, PyTorch. This tutorial " +#~ "might be removed in future versions " +#~ "of Flower." +#~ msgstr "" + +#~ msgid "" +#~ "Now that you have known how " +#~ "federated XGBoost work with Flower, it's" +#~ " time to run some more comprehensive" +#~ " experiments by customising the " +#~ "experimental settings. In the xgboost-" +#~ "comprehensive example (`full code " +#~ "`_), we provide more options " +#~ "to define various experimental setups, " +#~ "including aggregation strategies, data " +#~ "partitioning and centralised/distributed evaluation." +#~ " We also support `Flower simulation " +#~ "`_ making it easy to " +#~ "simulate large client cohorts in a " +#~ "resource-aware manner. Let's take a " +#~ "look!" +#~ msgstr "" + +#~ msgid "|31e4b1afa87c4b968327bbeafbf184d4|" +#~ msgstr "" + +#~ msgid "|c9d935b4284e4c389a33d86b33e07c0a|" +#~ msgstr "" + +#~ msgid "|00727b5faffb468f84dd1b03ded88638|" +#~ msgstr "" + +#~ msgid "|daf0cf0ff4c24fd29439af78416cf47b|" +#~ msgstr "" + +#~ msgid "|9f093007080d471d94ca90d3e9fde9b6|" +#~ msgstr "" + +#~ msgid "|46a26e6150e0479fbd3dfd655f36eb13|" +#~ msgstr "" + +#~ msgid "|3daba297595c4c7fb845d90404a6179a|" +#~ msgstr "" + +#~ msgid "|5769874fa9c4455b80b2efda850d39d7|" +#~ msgstr "" + +#~ msgid "|ba47ffb421814b0f8f9fa5719093d839|" +#~ msgstr "" + +#~ msgid "|aeac5bf79cbf497082e979834717e01b|" +#~ msgstr "" + +#~ msgid "|ce27ed4bbe95459dba016afc42486ba2|" +#~ msgstr "" + +#~ msgid "|ae94a7f71dda443cbec2385751427d41|" +#~ msgstr "" + +#~ msgid "|e61fce4d43d243e7bb08bdde97d81ce6|" +#~ msgstr "" + +#~ msgid "|08cb60859b07461588fe44e55810b050|" +#~ msgstr "" + diff --git a/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po b/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po index 5a5d736ece38..4e117619f9b5 100644 --- a/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po +++ b/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: Flower main\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-02-13 11:23+0100\n" +"POT-Creation-Date: 2024-03-15 14:23+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language: pt_BR\n" @@ -17,7 +17,7 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.13.1\n" +"Generated-By: Babel 2.14.0\n" #: ../../source/contributor-explanation-architecture.rst:2 msgid "Flower Architecture" @@ -83,9 +83,8 @@ msgstr "" #: ../../source/contributor-how-to-build-docker-images.rst:19 msgid "" -"Please follow the first section on `Run Flower using Docker " -"`_ " -"which covers this step in more detail." +"Please follow the first section on :doc:`Run Flower using Docker ` which covers this step in more detail." msgstr "" #: ../../source/contributor-how-to-build-docker-images.rst:23 @@ -287,7 +286,7 @@ msgid "" "to help us in our effort to make Federated Learning accessible to as many" " people as possible by contributing to those translations! This might " "also be a great opportunity for those wanting to become open source " -"contributors with little prerequistes." +"contributors with little prerequisites." msgstr "" #: ../../source/contributor-how-to-contribute-translations.rst:13 @@ -338,7 +337,7 @@ msgstr "" #: ../../source/contributor-how-to-contribute-translations.rst:47 msgid "" -"You input your translation in the textbox at the top and then, once you " +"You input your translation in the text box at the top and then, once you " "are happy with it, you either press ``Save and continue`` (to save the " "translation and go to the next untranslated string), ``Save and stay`` " "(to save the translation and stay on the same page), ``Suggest`` (to add " @@ -376,8 +375,8 @@ msgstr "" #: ../../source/contributor-how-to-contribute-translations.rst:69 msgid "" "If you want to add a new language, you will first have to contact us, " -"either on `Slack `_, or by opening an " -"issue on our `GitHub repo `_." +"either on `Slack `_, or by opening an issue" +" on our `GitHub repo `_." msgstr "" #: ../../source/contributor-how-to-create-new-messages.rst:2 @@ -419,8 +418,8 @@ msgid "" "The first thing we need to do is to define a message type for the RPC " "system in :code:`transport.proto`. Note that we have to do it for both " "the request and response messages. For more details on the syntax of " -"proto3, please see the `official documentation " -"`_." +"proto3, please see the `official documentation `_." msgstr "" #: ../../source/contributor-how-to-create-new-messages.rst:35 @@ -530,7 +529,7 @@ msgstr "" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:11 msgid "" "Source: `Official VSCode documentation " -"`_" +"`_" msgstr "" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:15 @@ -567,14 +566,14 @@ msgstr "" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:23 msgid "" "`Developing inside a Container " -"`_" msgstr "" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:24 msgid "" "`Remote development in Containers " -"`_" +"`_" msgstr "" #: ../../source/contributor-how-to-install-development-versions.rst:2 @@ -823,8 +822,8 @@ msgstr "" #: ../../source/contributor-how-to-release-flower.rst:25 msgid "" -"Merge the pull request on the same day (i.e., before a new nightly release" -" gets published to PyPI)." +"Merge the pull request on the same day (i.e., before a new nightly " +"release gets published to PyPI)." msgstr "" #: ../../source/contributor-how-to-release-flower.rst:28 @@ -837,8 +836,8 @@ msgstr "" #: ../../source/contributor-how-to-release-flower.rst:33 msgid "" -"PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases " -"MUST use one of the following naming patterns:" +"PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases" +" MUST use one of the following naming patterns:" msgstr "" #: ../../source/contributor-how-to-release-flower.rst:35 @@ -1114,8 +1113,8 @@ msgstr "" #: ../../source/contributor-ref-good-first-contributions.rst:25 msgid "" "If you are not familiar with Flower Baselines, you should probably check-" -"out our `contributing guide for baselines `_." +"out our `contributing guide for baselines " +"`_." msgstr "" #: ../../source/contributor-ref-good-first-contributions.rst:27 @@ -1123,7 +1122,7 @@ msgid "" "You should then check out the open `issues " "`_" " for baseline requests. If you find a baseline that you'd like to work on" -" and that has no assignes, feel free to assign it to yourself and start " +" and that has no assignees, feel free to assign it to yourself and start " "working on it!" msgstr "" @@ -1208,42 +1207,41 @@ msgstr "" #: ../../source/contributor-tutorial-contribute-on-github.rst:6 msgid "" "If you're familiar with how contributing on GitHub works, you can " -"directly checkout our `getting started guide for contributors " -"`_." +"directly checkout our :doc:`getting started guide for contributors " +"`." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:11 +#: ../../source/contributor-tutorial-contribute-on-github.rst:10 msgid "Setting up the repository" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:22 +#: ../../source/contributor-tutorial-contribute-on-github.rst:21 msgid "**Create a GitHub account and setup Git**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:14 +#: ../../source/contributor-tutorial-contribute-on-github.rst:13 msgid "" "Git is a distributed version control tool. This allows for an entire " "codebase's history to be stored and every developer's machine. It is a " "software that will need to be installed on your local machine, you can " -"follow this `guide `_ to set it up." +"follow this `guide `_ to set it up." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:17 +#: ../../source/contributor-tutorial-contribute-on-github.rst:16 msgid "" "GitHub, itself, is a code hosting platform for version control and " "collaboration. It allows for everyone to collaborate and work from " "anywhere on remote repositories." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:19 +#: ../../source/contributor-tutorial-contribute-on-github.rst:18 msgid "" "If you haven't already, you will need to create an account on `GitHub " "`_." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:21 +#: ../../source/contributor-tutorial-contribute-on-github.rst:20 msgid "" "The idea behind the generic Git and GitHub workflow boils down to this: " "you download code from a remote repository on GitHub, make changes " @@ -1251,19 +1249,19 @@ msgid "" "history back to GitHub." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:33 +#: ../../source/contributor-tutorial-contribute-on-github.rst:32 msgid "**Forking the Flower repository**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:25 +#: ../../source/contributor-tutorial-contribute-on-github.rst:24 msgid "" "A fork is a personal copy of a GitHub repository. To create one for " -"Flower, you must navigate to https://github.com/adap/flower (while " +"Flower, you must navigate to ``_ (while " "connected to your GitHub account) and click the ``Fork`` button situated " "on the top right of the page." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:30 +#: ../../source/contributor-tutorial-contribute-on-github.rst:29 msgid "" "You can change the name if you want, but this is not necessary as this " "version of Flower will be yours and will sit inside your own account " @@ -1271,11 +1269,11 @@ msgid "" " the top left corner that you are looking at your own version of Flower." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:48 +#: ../../source/contributor-tutorial-contribute-on-github.rst:47 msgid "**Cloning your forked repository**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:36 +#: ../../source/contributor-tutorial-contribute-on-github.rst:35 msgid "" "The next step is to download the forked repository on your machine to be " "able to make changes to it. On your forked repository page, you should " @@ -1283,27 +1281,27 @@ msgid "" "ability to copy the HTTPS link of the repository." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:42 +#: ../../source/contributor-tutorial-contribute-on-github.rst:41 msgid "" "Once you copied the \\, you can open a terminal on your machine, " "navigate to the place you want to download the repository to and type:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:48 +#: ../../source/contributor-tutorial-contribute-on-github.rst:47 msgid "" "This will create a ``flower/`` (or the name of your fork if you renamed " "it) folder in the current working directory." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:67 +#: ../../source/contributor-tutorial-contribute-on-github.rst:66 msgid "**Add origin**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:51 +#: ../../source/contributor-tutorial-contribute-on-github.rst:50 msgid "You can then go into the repository folder:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:57 +#: ../../source/contributor-tutorial-contribute-on-github.rst:56 msgid "" "And here we will need to add an origin to our repository. The origin is " "the \\ of the remote fork repository. To obtain it, we can do as " @@ -1311,27 +1309,27 @@ msgid "" "account and copying the link." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:62 +#: ../../source/contributor-tutorial-contribute-on-github.rst:61 msgid "" "Once the \\ is copied, we can type the following command in our " "terminal:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:91 +#: ../../source/contributor-tutorial-contribute-on-github.rst:90 msgid "**Add upstream**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:70 +#: ../../source/contributor-tutorial-contribute-on-github.rst:69 msgid "" "Now we will add an upstream address to our repository. Still in the same " -"directroy, we must run the following command:" +"directory, we must run the following command:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:77 +#: ../../source/contributor-tutorial-contribute-on-github.rst:76 msgid "The following diagram visually explains what we did in the previous steps:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:81 +#: ../../source/contributor-tutorial-contribute-on-github.rst:80 msgid "" "The upstream is the GitHub remote address of the parent repository (in " "this case Flower), i.e. the one we eventually want to contribute to and " @@ -1340,169 +1338,169 @@ msgid "" "in our own account." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:85 +#: ../../source/contributor-tutorial-contribute-on-github.rst:84 msgid "" "To make sure our local version of the fork is up-to-date with the latest " "changes from the Flower repository, we can execute the following command:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:94 +#: ../../source/contributor-tutorial-contribute-on-github.rst:93 msgid "Setting up the coding environment" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:96 +#: ../../source/contributor-tutorial-contribute-on-github.rst:95 msgid "" -"This can be achieved by following this `getting started guide for " -"contributors`_ (note that you won't need to clone the repository). Once " -"you are able to write code and test it, you can finally start making " -"changes!" +"This can be achieved by following this :doc:`getting started guide for " +"contributors ` (note " +"that you won't need to clone the repository). Once you are able to write " +"code and test it, you can finally start making changes!" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:101 +#: ../../source/contributor-tutorial-contribute-on-github.rst:100 msgid "Making changes" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:103 +#: ../../source/contributor-tutorial-contribute-on-github.rst:102 msgid "" "Before making any changes make sure you are up-to-date with your " "repository:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:109 +#: ../../source/contributor-tutorial-contribute-on-github.rst:108 msgid "And with Flower's repository:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:123 +#: ../../source/contributor-tutorial-contribute-on-github.rst:122 msgid "**Create a new branch**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:116 +#: ../../source/contributor-tutorial-contribute-on-github.rst:115 msgid "" "To make the history cleaner and easier to work with, it is good practice " "to create a new branch for each feature/project that needs to be " "implemented." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:119 +#: ../../source/contributor-tutorial-contribute-on-github.rst:118 msgid "" "To do so, just run the following command inside the repository's " "directory:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:126 +#: ../../source/contributor-tutorial-contribute-on-github.rst:125 msgid "**Make changes**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:126 +#: ../../source/contributor-tutorial-contribute-on-github.rst:125 msgid "Write great code and create wonderful changes using your favorite editor!" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:139 +#: ../../source/contributor-tutorial-contribute-on-github.rst:138 msgid "**Test and format your code**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:129 +#: ../../source/contributor-tutorial-contribute-on-github.rst:128 msgid "" "Don't forget to test and format your code! Otherwise your code won't be " "able to be merged into the Flower repository. This is done so the " "codebase stays consistent and easy to understand." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:132 +#: ../../source/contributor-tutorial-contribute-on-github.rst:131 msgid "To do so, we have written a few scripts that you can execute:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:151 +#: ../../source/contributor-tutorial-contribute-on-github.rst:150 msgid "**Stage changes**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:142 +#: ../../source/contributor-tutorial-contribute-on-github.rst:141 msgid "" "Before creating a commit that will update your history, you must specify " "to Git which files it needs to take into account." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:144 +#: ../../source/contributor-tutorial-contribute-on-github.rst:143 msgid "This can be done with:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:150 +#: ../../source/contributor-tutorial-contribute-on-github.rst:149 msgid "" "To check which files have been modified compared to the last version " "(last commit) and to see which files are staged for commit, you can use " "the :code:`git status` command." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:161 +#: ../../source/contributor-tutorial-contribute-on-github.rst:160 msgid "**Commit changes**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:154 +#: ../../source/contributor-tutorial-contribute-on-github.rst:153 msgid "" "Once you have added all the files you wanted to commit using :code:`git " "add`, you can finally create your commit using this command:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:160 +#: ../../source/contributor-tutorial-contribute-on-github.rst:159 msgid "" "The \\ is there to explain to others what the commit " "does. It should be written in an imperative style and be concise. An " "example would be :code:`git commit -m \"Add images to README\"`." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:172 +#: ../../source/contributor-tutorial-contribute-on-github.rst:171 msgid "**Push the changes to the fork**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:164 +#: ../../source/contributor-tutorial-contribute-on-github.rst:163 msgid "" "Once we have committed our changes, we have effectively updated our local" " history, but GitHub has no way of knowing this unless we push our " "changes to our origin's remote address:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:171 +#: ../../source/contributor-tutorial-contribute-on-github.rst:170 msgid "" "Once this is done, you will see on the GitHub that your forked repo was " "updated with the changes you have made." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:175 +#: ../../source/contributor-tutorial-contribute-on-github.rst:174 msgid "Creating and merging a pull request (PR)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:206 +#: ../../source/contributor-tutorial-contribute-on-github.rst:205 msgid "**Create the PR**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:178 +#: ../../source/contributor-tutorial-contribute-on-github.rst:177 msgid "" "Once you have pushed changes, on the GitHub webpage of your repository " "you should see the following message:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:182 +#: ../../source/contributor-tutorial-contribute-on-github.rst:181 msgid "Otherwise you can always find this option in the ``Branches`` page." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:184 +#: ../../source/contributor-tutorial-contribute-on-github.rst:183 msgid "" "Once you click the ``Compare & pull request`` button, you should see " "something similar to this:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:188 +#: ../../source/contributor-tutorial-contribute-on-github.rst:187 msgid "At the top you have an explanation of which branch will be merged where:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:192 +#: ../../source/contributor-tutorial-contribute-on-github.rst:191 msgid "" "In this example you can see that the request is to merge the branch " "``doc-fixes`` from my forked repository to branch ``main`` from the " "Flower repository." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:194 +#: ../../source/contributor-tutorial-contribute-on-github.rst:193 msgid "" "The input box in the middle is there for you to describe what your PR " "does and to link it to existing issues. We have placed comments (that " @@ -1510,7 +1508,7 @@ msgid "" "process." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:197 +#: ../../source/contributor-tutorial-contribute-on-github.rst:196 msgid "" "It is important to follow the instructions described in comments. For " "instance, in order to not break how our changelog system works, you " @@ -1519,163 +1517,163 @@ msgid "" ":ref:`changelogentry` appendix." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:201 +#: ../../source/contributor-tutorial-contribute-on-github.rst:200 msgid "" "At the bottom you will find the button to open the PR. This will notify " "reviewers that a new PR has been opened and that they should look over it" " to merge or to request changes." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:204 +#: ../../source/contributor-tutorial-contribute-on-github.rst:203 msgid "" "If your PR is not yet ready for review, and you don't want to notify " "anyone, you have the option to create a draft pull request:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:208 msgid "**Making new changes**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:208 msgid "" "Once the PR has been opened (as draft or not), you can still push new " "commits to it the same way we did before, by making changes to the branch" " associated with the PR." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:231 +#: ../../source/contributor-tutorial-contribute-on-github.rst:230 msgid "**Review the PR**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:212 +#: ../../source/contributor-tutorial-contribute-on-github.rst:211 msgid "" "Once the PR has been opened or once the draft PR has been marked as " "ready, a review from code owners will be automatically requested:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:216 +#: ../../source/contributor-tutorial-contribute-on-github.rst:215 msgid "" "Code owners will then look into the code, ask questions, request changes " "or validate the PR." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:218 +#: ../../source/contributor-tutorial-contribute-on-github.rst:217 msgid "Merging will be blocked if there are ongoing requested changes." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:222 +#: ../../source/contributor-tutorial-contribute-on-github.rst:221 msgid "" "To resolve them, just push the necessary changes to the branch associated" " with the PR:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:226 +#: ../../source/contributor-tutorial-contribute-on-github.rst:225 msgid "And resolve the conversation:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:230 +#: ../../source/contributor-tutorial-contribute-on-github.rst:229 msgid "" "Once all the conversations have been resolved, you can re-request a " "review." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:251 +#: ../../source/contributor-tutorial-contribute-on-github.rst:250 msgid "**Once the PR is merged**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:234 +#: ../../source/contributor-tutorial-contribute-on-github.rst:233 msgid "" "If all the automatic tests have passed and reviewers have no more changes" " to request, they can approve the PR and merge it." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:238 +#: ../../source/contributor-tutorial-contribute-on-github.rst:237 msgid "" "Once it is merged, you can delete the branch on GitHub (a button should " "appear to do so) and also delete it locally by doing:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:245 +#: ../../source/contributor-tutorial-contribute-on-github.rst:244 msgid "Then you should update your forked repository by doing:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:254 +#: ../../source/contributor-tutorial-contribute-on-github.rst:253 msgid "Example of first contribution" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:257 +#: ../../source/contributor-tutorial-contribute-on-github.rst:256 msgid "Problem" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:259 +#: ../../source/contributor-tutorial-contribute-on-github.rst:258 msgid "" -"For our documentation, we’ve started to use the `Diàtaxis framework " +"For our documentation, we've started to use the `Diàtaxis framework " "`_." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:261 +#: ../../source/contributor-tutorial-contribute-on-github.rst:260 msgid "" -"Our “How to” guides should have titles that continue the sencence “How to" -" …”, for example, “How to upgrade to Flower 1.0”." +"Our \"How to\" guides should have titles that continue the sentence \"How" +" to …\", for example, \"How to upgrade to Flower 1.0\"." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:263 +#: ../../source/contributor-tutorial-contribute-on-github.rst:262 msgid "" "Most of our guides do not follow this new format yet, and changing their " "title is (unfortunately) more involved than one might think." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:265 +#: ../../source/contributor-tutorial-contribute-on-github.rst:264 msgid "" -"This issue is about changing the title of a doc from present continious " +"This issue is about changing the title of a doc from present continuous " "to present simple." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:267 +#: ../../source/contributor-tutorial-contribute-on-github.rst:266 msgid "" -"Let's take the example of “Saving Progress” which we changed to “Save " -"Progress”. Does this pass our check?" +"Let's take the example of \"Saving Progress\" which we changed to \"Save " +"Progress\". Does this pass our check?" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:269 -msgid "Before: ”How to saving progress” ❌" +#: ../../source/contributor-tutorial-contribute-on-github.rst:268 +msgid "Before: \"How to saving progress\" ❌" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:271 -msgid "After: ”How to save progress” ✅" +#: ../../source/contributor-tutorial-contribute-on-github.rst:270 +msgid "After: \"How to save progress\" ✅" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:274 +#: ../../source/contributor-tutorial-contribute-on-github.rst:273 msgid "Solution" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:276 +#: ../../source/contributor-tutorial-contribute-on-github.rst:275 msgid "" -"This is a tiny change, but it’ll allow us to test your end-to-end setup. " -"After cloning and setting up the Flower repo, here’s what you should do:" +"This is a tiny change, but it'll allow us to test your end-to-end setup. " +"After cloning and setting up the Flower repo, here's what you should do:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:278 +#: ../../source/contributor-tutorial-contribute-on-github.rst:277 msgid "Find the source file in ``doc/source``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:279 +#: ../../source/contributor-tutorial-contribute-on-github.rst:278 msgid "" "Make the change in the ``.rst`` file (beware, the dashes under the title " "should be the same length as the title itself)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:280 +#: ../../source/contributor-tutorial-contribute-on-github.rst:279 msgid "" -"Build the docs and check the result: ``_" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:283 +#: ../../source/contributor-tutorial-contribute-on-github.rst:282 msgid "Rename file" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:285 +#: ../../source/contributor-tutorial-contribute-on-github.rst:284 msgid "" "You might have noticed that the file name still reflects the old wording." " If we just change the file, then we break all existing links to it - it " @@ -1683,77 +1681,77 @@ msgid "" "engine ranking." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:288 -msgid "Here’s how to change the file name:" +#: ../../source/contributor-tutorial-contribute-on-github.rst:287 +msgid "Here's how to change the file name:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:290 +#: ../../source/contributor-tutorial-contribute-on-github.rst:289 msgid "Change the file name to ``save-progress.rst``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:291 +#: ../../source/contributor-tutorial-contribute-on-github.rst:290 msgid "Add a redirect rule to ``doc/source/conf.py``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:293 +#: ../../source/contributor-tutorial-contribute-on-github.rst:292 msgid "" "This will cause a redirect from ``saving-progress.html`` to ``save-" "progress.html``, old links will continue to work." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:296 +#: ../../source/contributor-tutorial-contribute-on-github.rst:295 msgid "Apply changes in the index file" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:298 +#: ../../source/contributor-tutorial-contribute-on-github.rst:297 msgid "" "For the lateral navigation bar to work properly, it is very important to " "update the ``index.rst`` file as well. This is where we define the whole " "arborescence of the navbar." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:301 +#: ../../source/contributor-tutorial-contribute-on-github.rst:300 msgid "Find and modify the file name in ``index.rst``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:304 +#: ../../source/contributor-tutorial-contribute-on-github.rst:303 msgid "Open PR" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:306 +#: ../../source/contributor-tutorial-contribute-on-github.rst:305 msgid "" -"Commit the changes (commit messages are always imperative: “Do " -"something”, in this case “Change …”)" +"Commit the changes (commit messages are always imperative: \"Do " +"something\", in this case \"Change …\")" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:307 +#: ../../source/contributor-tutorial-contribute-on-github.rst:306 msgid "Push the changes to your fork" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:308 +#: ../../source/contributor-tutorial-contribute-on-github.rst:307 msgid "Open a PR (as shown above)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:309 +#: ../../source/contributor-tutorial-contribute-on-github.rst:308 msgid "Wait for it to be approved!" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:310 +#: ../../source/contributor-tutorial-contribute-on-github.rst:309 msgid "Congrats! 🥳 You're now officially a Flower contributor!" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:314 +#: ../../source/contributor-tutorial-contribute-on-github.rst:313 msgid "How to write a good PR title" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:316 +#: ../../source/contributor-tutorial-contribute-on-github.rst:315 msgid "" "A well-crafted PR title helps team members quickly understand the purpose" " and scope of the changes being proposed. Here's a guide to help you " "write a good GitHub PR title:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:318 +#: ../../source/contributor-tutorial-contribute-on-github.rst:317 msgid "" "1. Be Clear and Concise: Provide a clear summary of the changes in a " "concise manner. 1. Use Actionable Verbs: Start with verbs like \"Add,\" " @@ -1763,62 +1761,62 @@ msgid "" "Capitalization and Punctuation: Follow grammar rules for clarity." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:324 +#: ../../source/contributor-tutorial-contribute-on-github.rst:323 msgid "" "Let's start with a few examples for titles that should be avoided because" " they do not provide meaningful information:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:326 +#: ../../source/contributor-tutorial-contribute-on-github.rst:325 msgid "Implement Algorithm" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:327 +#: ../../source/contributor-tutorial-contribute-on-github.rst:326 msgid "Database" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:328 +#: ../../source/contributor-tutorial-contribute-on-github.rst:327 msgid "Add my_new_file.py to codebase" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:329 +#: ../../source/contributor-tutorial-contribute-on-github.rst:328 msgid "Improve code in module" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:330 +#: ../../source/contributor-tutorial-contribute-on-github.rst:329 msgid "Change SomeModule" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:332 +#: ../../source/contributor-tutorial-contribute-on-github.rst:331 msgid "" "Here are a few positive examples which provide helpful information " "without repeating how they do it, as that is already visible in the " "\"Files changed\" section of the PR:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:334 +#: ../../source/contributor-tutorial-contribute-on-github.rst:333 msgid "Update docs banner to mention Flower Summit 2023" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:335 +#: ../../source/contributor-tutorial-contribute-on-github.rst:334 msgid "Remove unnecessary XGBoost dependency" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:336 +#: ../../source/contributor-tutorial-contribute-on-github.rst:335 msgid "Remove redundant attributes in strategies subclassing FedAvg" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:337 +#: ../../source/contributor-tutorial-contribute-on-github.rst:336 msgid "Add CI job to deploy the staging system when the ``main`` branch changes" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:338 +#: ../../source/contributor-tutorial-contribute-on-github.rst:337 msgid "" "Add new amazing library which will be used to improve the simulation " "engine" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:342 +#: ../../source/contributor-tutorial-contribute-on-github.rst:341 #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:548 #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:946 #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:727 @@ -1827,150 +1825,150 @@ msgstr "" msgid "Next steps" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:344 +#: ../../source/contributor-tutorial-contribute-on-github.rst:343 msgid "" "Once you have made your first PR, and want to contribute more, be sure to" " check out the following :" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:346 +#: ../../source/contributor-tutorial-contribute-on-github.rst:345 msgid "" -"`Good first contributions `_, where you should particularly look " -"into the :code:`baselines` contributions." +":doc:`Good first contributions `, where you should particularly look into the " +":code:`baselines` contributions." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:350 +#: ../../source/contributor-tutorial-contribute-on-github.rst:349 #: ../../source/fed/0000-20200102-fed-template.md:60 msgid "Appendix" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:355 +#: ../../source/contributor-tutorial-contribute-on-github.rst:354 msgid "Changelog entry" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:357 +#: ../../source/contributor-tutorial-contribute-on-github.rst:356 msgid "" "When opening a new PR, inside its description, there should be a " "``Changelog entry`` header." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:359 +#: ../../source/contributor-tutorial-contribute-on-github.rst:358 msgid "" "Above this header you should see the following comment that explains how " "to write your changelog entry:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:361 +#: ../../source/contributor-tutorial-contribute-on-github.rst:360 msgid "" "Inside the following 'Changelog entry' section, you should put the " "description of your changes that will be added to the changelog alongside" " your PR title." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:364 +#: ../../source/contributor-tutorial-contribute-on-github.rst:363 msgid "" -"If the section is completely empty (without any token) or non-existant, " +"If the section is completely empty (without any token) or non-existent, " "the changelog will just contain the title of the PR for the changelog " "entry, without any description." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:367 +#: ../../source/contributor-tutorial-contribute-on-github.rst:366 msgid "" "If the section contains some text other than tokens, it will use it to " "add a description to the change." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:369 +#: ../../source/contributor-tutorial-contribute-on-github.rst:368 msgid "" "If the section contains one of the following tokens it will ignore any " "other text and put the PR under the corresponding section of the " "changelog:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:371 +#: ../../source/contributor-tutorial-contribute-on-github.rst:370 msgid " is for classifying a PR as a general improvement." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:373 +#: ../../source/contributor-tutorial-contribute-on-github.rst:372 msgid " is to not add the PR to the changelog" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:375 +#: ../../source/contributor-tutorial-contribute-on-github.rst:374 msgid " is to add a general baselines change to the PR" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:377 +#: ../../source/contributor-tutorial-contribute-on-github.rst:376 msgid " is to add a general examples change to the PR" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:379 +#: ../../source/contributor-tutorial-contribute-on-github.rst:378 msgid " is to add a general sdk change to the PR" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:381 +#: ../../source/contributor-tutorial-contribute-on-github.rst:380 msgid " is to add a general simulations change to the PR" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:383 +#: ../../source/contributor-tutorial-contribute-on-github.rst:382 msgid "Note that only one token should be used." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:385 +#: ../../source/contributor-tutorial-contribute-on-github.rst:384 msgid "" "Its content must have a specific format. We will break down what each " "possibility does:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:387 +#: ../../source/contributor-tutorial-contribute-on-github.rst:386 msgid "" "If the ``### Changelog entry`` section contains nothing or doesn't exist," " the following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:391 +#: ../../source/contributor-tutorial-contribute-on-github.rst:390 msgid "" "If the ``### Changelog entry`` section contains a description (and no " "token), the following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:397 +#: ../../source/contributor-tutorial-contribute-on-github.rst:396 msgid "" "If the ``### Changelog entry`` section contains ````, nothing will " "change in the changelog." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:399 +#: ../../source/contributor-tutorial-contribute-on-github.rst:398 msgid "" "If the ``### Changelog entry`` section contains ````, the " "following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:403 +#: ../../source/contributor-tutorial-contribute-on-github.rst:402 msgid "" "If the ``### Changelog entry`` section contains ````, the " "following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:407 +#: ../../source/contributor-tutorial-contribute-on-github.rst:406 msgid "" "If the ``### Changelog entry`` section contains ````, the " "following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:411 +#: ../../source/contributor-tutorial-contribute-on-github.rst:410 msgid "" "If the ``### Changelog entry`` section contains ````, the following " "text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:415 +#: ../../source/contributor-tutorial-contribute-on-github.rst:414 msgid "" "If the ``### Changelog entry`` section contains ````, the " "following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:419 +#: ../../source/contributor-tutorial-contribute-on-github.rst:418 msgid "" "Note that only one token must be provided, otherwise, only the first " "action (in the order listed above), will be performed." @@ -2004,7 +2002,7 @@ msgstr "" msgid "" "Flower uses :code:`pyproject.toml` to manage dependencies and configure " "development tools (the ones which support it). Poetry is a build tool " -"which supports `PEP 517 `_." +"which supports `PEP 517 `_." msgstr "" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:18 @@ -2172,9 +2170,9 @@ msgid "" "`_, a federated training strategy " "designed for non-iid data. We are using PyTorch to train a Convolutional " "Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " -"When applying FedBN, only few changes needed compared to `Example: " -"PyTorch - From Centralized To Federated `_." +"When applying FedBN, only few changes needed compared to :doc:`Example: " +"PyTorch - From Centralized To Federated `." msgstr "" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:9 @@ -2184,10 +2182,10 @@ msgstr "" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:10 msgid "" -"All files are revised based on `Example: PyTorch - From Centralized To " -"Federated `_. The only thing to do is modifying the file called " -":code:`cifar.py`, revised part is shown below:" +"All files are revised based on :doc:`Example: PyTorch - From Centralized " +"To Federated `. The only " +"thing to do is modifying the file called :code:`cifar.py`, revised part " +"is shown below:" msgstr "" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:13 @@ -2205,8 +2203,8 @@ msgstr "" msgid "" "So far this should all look fairly familiar if you've used PyTorch " "before. Let's take the next step and use what we've built to create a " -"federated learning system within FedBN, the sytstem consists of one " -"server and two clients." +"federated learning system within FedBN, the system consists of one server" +" and two clients." msgstr "" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:51 @@ -2216,13 +2214,12 @@ msgstr "" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 msgid "" -"If you have read `Example: PyTorch - From Centralized To Federated " -"`_, the following parts are easy to follow, onyl " -":code:`get_parameters` and :code:`set_parameters` function in " -":code:`client.py` needed to revise. If not, please read the `Example: " -"PyTorch - From Centralized To Federated `_. first." +"If you have read :doc:`Example: PyTorch - From Centralized To Federated " +"`, the following parts are" +" easy to follow, only :code:`get_parameters` and :code:`set_parameters` " +"function in :code:`client.py` needed to revise. If not, please read the " +":doc:`Example: PyTorch - From Centralized To Federated `. first." msgstr "" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:56 @@ -2730,8 +2727,8 @@ msgid "" "Implementing a Flower *client* basically means implementing a subclass of" " either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " "Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`MNISTClient`. :code:`NumPyClient` is slightly easier " -"to implement than :code:`Client` if you use a framework with good NumPy " +"we'll call it :code:`MNISTClient`. :code:`NumPyClient` is slightly easier" +" to implement than :code:`Client` if you use a framework with good NumPy " "interoperability (like PyTorch or MXNet) because it avoids some of the " "boilerplate that would otherwise be necessary. :code:`MNISTClient` needs " "to implement four methods, two methods for getting/setting model " @@ -2911,8 +2908,8 @@ msgid "" "Implementing a Flower *client* basically means implementing a subclass of" " either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " "Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier " -"to implement than :code:`Client` if you use a framework with good NumPy " +"we'll call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier" +" to implement than :code:`Client` if you use a framework with good NumPy " "interoperability (like PyTorch or TensorFlow/Keras) because it avoids " "some of the boilerplate that would otherwise be necessary. " ":code:`CifarClient` needs to implement four methods, two methods for " @@ -2962,569 +2959,291 @@ msgid "" "How about adding more clients?" msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:2 -msgid "Example: Walk-Through PyTorch & MNIST" -msgstr "" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:4 -msgid "" -"In this tutorial we will learn, how to train a Convolutional Neural " -"Network on MNIST using Flower and PyTorch." -msgstr "" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:6 -#: ../../source/tutorial-quickstart-mxnet.rst:16 -#: ../../source/tutorial-quickstart-pytorch.rst:17 -#: ../../source/tutorial-quickstart-scikitlearn.rst:14 -msgid "" -"Our example consists of one *server* and two *clients* all having the " -"same model." -msgstr "" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:8 -#: ../../source/tutorial-quickstart-pytorch.rst:19 -msgid "" -"*Clients* are responsible for generating individual weight-updates for " -"the model based on their local datasets. These updates are then sent to " -"the *server* which will aggregate them to produce a better model. " -"Finally, the *server* sends this improved version of the model back to " -"each *client*. A complete cycle of weight updates is called a *round*." -msgstr "" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:12 -#: ../../source/tutorial-quickstart-pytorch.rst:23 -msgid "" -"Now that we have a rough idea of what is going on, let's get started. We " -"first need to install Flower. You can do this by running :" -msgstr "" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:18 -msgid "" -"Since we want to use PyTorch to solve a computer vision task, let's go " -"ahead an install PyTorch and the **torchvision** library:" -msgstr "" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:26 -msgid "Ready... Set... Train!" -msgstr "" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:28 -msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. Our training " -"procedure and network architecture are based on PyTorch's `Basic MNIST " -"Example `_. This " -"will allow you see how easy it is to wrap your code with Flower and begin" -" training in a federated way. We provide you with two helper scripts, " -"namely *run-server.sh*, and *run-clients.sh*. Don't be afraid to look " -"inside, they are simple enough =)." -msgstr "" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:31 -msgid "" -"Go ahead and launch on a terminal the *run-server.sh* script first as " -"follows:" -msgstr "" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:38 -msgid "Now that the server is up and running, go ahead and launch the clients." +#: ../../source/explanation-differential-privacy.rst:2 +#: ../../source/explanation-differential-privacy.rst:11 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 +msgid "Differential Privacy" msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:45 +#: ../../source/explanation-differential-privacy.rst:3 msgid "" -"Et voilà! You should be seeing the training procedure and, after a few " -"iterations, the test accuracy for each client." -msgstr "" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:66 -msgid "Now, let's see what is really happening inside." -msgstr "" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:69 -#: ../../source/tutorial-quickstart-ios.rst:129 -#: ../../source/tutorial-quickstart-mxnet.rst:226 -#: ../../source/tutorial-quickstart-pytorch.rst:203 -#: ../../source/tutorial-quickstart-scikitlearn.rst:157 -#: ../../source/tutorial-quickstart-tensorflow.rst:98 -#: ../../source/tutorial-quickstart-xgboost.rst:309 -msgid "Flower Server" +"The information in datasets like healthcare, financial transactions, user" +" preferences, etc., is valuable and has the potential for scientific " +"breakthroughs and provides important business insights. However, such " +"data is also sensitive and there is a risk of compromising individual " +"privacy." msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:71 +#: ../../source/explanation-differential-privacy.rst:6 msgid "" -"Inside the server helper script *run-server.sh* you will find the " -"following code that basically runs the :code:`server.py`" +"Traditional methods like anonymization alone would not work because of " +"attacks like Re-identification and Data Linkage. That's where " +"differential privacy comes in. It provides the possibility of analyzing " +"data while ensuring the privacy of individuals." msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:78 +#: ../../source/explanation-differential-privacy.rst:12 msgid "" -"We can go a bit deeper and see that :code:`server.py` simply launches a " -"server that will coordinate three rounds of training. Flower Servers are " -"very customizable, but for simple workloads, we can start a server using " -"the :ref:`start_server ` function and " -"leave all the configuration possibilities at their default values, as " -"seen below." -msgstr "" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:89 -#: ../../source/tutorial-quickstart-ios.rst:34 -#: ../../source/tutorial-quickstart-mxnet.rst:36 -#: ../../source/tutorial-quickstart-pytorch.rst:37 -#: ../../source/tutorial-quickstart-scikitlearn.rst:40 -#: ../../source/tutorial-quickstart-tensorflow.rst:29 -#: ../../source/tutorial-quickstart-xgboost.rst:55 -msgid "Flower Client" +"Imagine two datasets that are identical except for a single record (for " +"instance, Alice's data). Differential Privacy (DP) guarantees that any " +"analysis (M), like calculating the average income, will produce nearly " +"identical results for both datasets (O and O' would be similar). This " +"preserves group patterns while obscuring individual details, ensuring the" +" individual's information remains hidden in the crowd." msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:91 -msgid "" -"Next, let's take a look at the *run-clients.sh* file. You will see that " -"it contains the main loop that starts a set of *clients*." +#: ../../source/explanation-differential-privacy.rst:-1 +msgid "DP Intro" msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:100 +#: ../../source/explanation-differential-privacy.rst:22 msgid "" -"**cid**: is the client ID. It is an integer that uniquely identifies " -"client identifier." -msgstr "" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:101 -msgid "**sever_address**: String that identifies IP and port of the server." +"One of the most commonly used mechanisms to achieve DP is adding enough " +"noise to the output of the analysis to mask the contribution of each " +"individual in the data while preserving the overall accuracy of the " +"analysis." msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:102 -msgid "" -"**nb_clients**: This defines the number of clients being created. This " -"piece of information is not required by the client, but it helps us " -"partition the original MNIST dataset to make sure that every client is " -"working on unique subsets of both *training* and *test* sets." +#: ../../source/explanation-differential-privacy.rst:25 +msgid "Formal Definition" msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:104 +#: ../../source/explanation-differential-privacy.rst:26 msgid "" -"Again, we can go deeper and look inside :code:`flwr_example/quickstart-" -"pytorch/client.py`. After going through the argument parsing code at the " -"beginning of our :code:`main` function, you will find a call to " -":code:`mnist.load_data`. This function is responsible for partitioning " -"the original MNIST datasets (*training* and *test*) and returning a " -":code:`torch.utils.data.DataLoader` s for each of them. We then " -"instantiate a :code:`PytorchMNISTClient` object with our client ID, our " -"DataLoaders, the number of epochs in each round, and which device we want" -" to use for training (CPU or GPU)." +"Differential Privacy (DP) provides statistical guarantees against the " +"information an adversary can infer through the output of a randomized " +"algorithm. It provides an unconditional upper bound on the influence of a" +" single individual on the output of the algorithm by adding noise [1]. A " +"randomized mechanism M provides (:math:`\\epsilon`, " +":math:`\\delta`)-differential privacy if for any two neighboring " +"databases, D :sub:`1` and D :sub:`2`, that differ in only a single " +"record, and for all possible outputs S ⊆ Range(A):" msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:119 +#: ../../source/explanation-differential-privacy.rst:32 msgid "" -"The :code:`PytorchMNISTClient` object when finally passed to " -":code:`fl.client.start_client` along with the server's address as the " -"training process begins." +"\\small\n" +"P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:123 -msgid "A Closer Look" -msgstr "" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:125 +#: ../../source/explanation-differential-privacy.rst:38 msgid "" -"Now, let's look closely into the :code:`PytorchMNISTClient` inside " -":code:`flwr_example.quickstart-pytorch.mnist` and see what it is doing:" +"The :math:`\\epsilon` parameter, also known as the privacy budget, is a " +"metric of privacy loss. It also controls the privacy-utility trade-off; " +"lower :math:`\\epsilon` values indicate higher levels of privacy but are " +"likely to reduce utility as well. The :math:`\\delta` parameter accounts " +"for a small probability on which the upper bound :math:`\\epsilon` does " +"not hold. The amount of noise needed to achieve differential privacy is " +"proportional to the sensitivity of the output, which measures the maximum" +" change in the output due to the inclusion or removal of a single record." msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:226 -msgid "" -"The first thing to notice is that :code:`PytorchMNISTClient` instantiates" -" a CNN model inside its constructor" +#: ../../source/explanation-differential-privacy.rst:45 +msgid "Differential Privacy in Machine Learning" msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:244 +#: ../../source/explanation-differential-privacy.rst:46 msgid "" -"The code for the CNN is available under :code:`quickstart-pytorch.mnist` " -"and it is reproduced below. It is the same network found in `Basic MNIST " -"Example `_." +"DP can be utilized in machine learning to preserve the privacy of the " +"training data. Differentially private machine learning algorithms are " +"designed in a way to prevent the algorithm to learn any specific " +"information about any individual data points and subsequently prevent the" +" model from revealing sensitive information. Depending on the stage at " +"which noise is introduced, various methods exist for applying DP to " +"machine learning algorithms. One approach involves adding noise to the " +"training data (either to the features or labels), while another method " +"entails injecting noise into the gradients of the loss function during " +"model training. Additionally, such noise can be incorporated into the " +"model's output." msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:290 -msgid "" -"The second thing to notice is that :code:`PytorchMNISTClient` class " -"inherits from the :code:`fl.client.Client`, and hence it must implement " -"the following methods:" +#: ../../source/explanation-differential-privacy.rst:53 +msgid "Differential Privacy in Federated Learning" msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:315 +#: ../../source/explanation-differential-privacy.rst:54 msgid "" -"When comparing the abstract class to its derived class " -":code:`PytorchMNISTClient` you will notice that :code:`fit` calls a " -":code:`train` function and that :code:`evaluate` calls a :code:`test`: " -"function." +"Federated learning is a data minimization approach that allows multiple " +"parties to collaboratively train a model without sharing their raw data. " +"However, federated learning also introduces new privacy challenges. The " +"model updates between parties and the central server can leak information" +" about the local data. These leaks can be exploited by attacks such as " +"membership inference and property inference attacks, or model inversion " +"attacks." msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:317 +#: ../../source/explanation-differential-privacy.rst:58 msgid "" -"These functions can both be found inside the same :code:`quickstart-" -"pytorch.mnist` module:" +"DP can play a crucial role in federated learning to provide privacy for " +"the clients' data." msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:437 +#: ../../source/explanation-differential-privacy.rst:60 msgid "" -"Observe that these functions encapsulate regular training and test loops " -"and provide :code:`fit` and :code:`evaluate` with final statistics for " -"each round. You could substitute them with your custom train and test " -"loops and change the network architecture, and the entire example would " -"still work flawlessly. As a matter of fact, why not try and modify the " -"code to an example of your liking?" +"Depending on the granularity of privacy provision or the location of " +"noise addition, different forms of DP exist in federated learning. In " +"this explainer, we focus on two approaches of DP utilization in federated" +" learning based on where the noise is added: at the server (also known as" +" the center) or at the client (also known as the local)." msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:444 -msgid "Give It a Try" -msgstr "" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:445 +#: ../../source/explanation-differential-privacy.rst:63 msgid "" -"Looking through the quickstart code description above will have given a " -"good understanding of how *clients* and *servers* work in Flower, how to " -"run a simple experiment, and the internals of a client wrapper. Here are " -"a few things you could try on your own and get more experience with " -"Flower:" +"**Central Differential Privacy**: DP is applied by the server and the " +"goal is to prevent the aggregated model from leaking information about " +"each client's data." msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:448 +#: ../../source/explanation-differential-privacy.rst:65 msgid "" -"Try and change :code:`PytorchMNISTClient` so it can accept different " -"architectures." -msgstr "" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:449 -msgid "Modify the :code:`train` function so that it accepts different optimizers" +"**Local Differential Privacy**: DP is applied on the client side before " +"sending any information to the server and the goal is to prevent the " +"updates that are sent to the server from leaking any information about " +"the client's data." msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:450 -msgid "" -"Modify the :code:`test` function so that it proves not only the top-1 " -"(regular accuracy) but also the top-5 accuracy?" +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:68 +#: ../../source/how-to-use-differential-privacy.rst:11 +msgid "Central Differential Privacy" msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:451 +#: ../../source/explanation-differential-privacy.rst:69 msgid "" -"Go larger! Try to adapt the code to larger images and datasets. Why not " -"try training on ImageNet with a ResNet-50?" -msgstr "" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:453 -msgid "You are ready now. Enjoy learning in a federated way!" -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:2 -msgid "Differential privacy" +"In this approach, which is also known as user-level DP, the central " +"server is responsible for adding noise to the globally aggregated " +"parameters. It should be noted that trust in the server is required." msgstr "" -#: ../../source/explanation-differential-privacy.rst:4 +#: ../../source/explanation-differential-privacy.rst:76 msgid "" -"Flower provides differential privacy (DP) wrapper classes for the easy " -"integration of the central DP guarantees provided by DP-FedAvg into " -"training pipelines defined in any of the various ML frameworks that " -"Flower is compatible with." +"While there are various ways to implement central DP in federated " +"learning, we concentrate on the algorithms proposed by [2] and [3]. The " +"overall approach is to clip the model updates sent by the clients and add" +" some amount of noise to the aggregated model. In each iteration, a " +"random set of clients is chosen with a specific probability for training." +" Each client performs local training on its own data. The update of each " +"client is then clipped by some value `S` (sensitivity `S`). This would " +"limit the impact of any individual client which is crucial for privacy " +"and often beneficial for robustness. A common approach to achieve this is" +" by restricting the `L2` norm of the clients' model updates, ensuring " +"that larger updates are scaled down to fit within the norm `S`." msgstr "" -#: ../../source/explanation-differential-privacy.rst:7 -msgid "" -"Please note that these components are still experimental; the correct " -"configuration of DP for a specific task is still an unsolved problem." +#: ../../source/explanation-differential-privacy.rst:-1 +msgid "clipping" msgstr "" -#: ../../source/explanation-differential-privacy.rst:10 +#: ../../source/explanation-differential-privacy.rst:89 msgid "" -"The name DP-FedAvg is misleading since it can be applied on top of any FL" -" algorithm that conforms to the general structure prescribed by the " -"FedOpt family of algorithms." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:13 -msgid "DP-FedAvg" +"Afterwards, the Gaussian mechanism is used to add noise in order to " +"distort the sum of all clients' updates. The amount of noise is scaled to" +" the sensitivity value to obtain a privacy guarantee. The Gaussian " +"mechanism is used with a noise sampled from `N (0, σ²)` where `σ = ( " +"noise_scale * S ) / (number of sampled clients)`." msgstr "" -#: ../../source/explanation-differential-privacy.rst:15 -msgid "" -"DP-FedAvg, originally proposed by McMahan et al. [mcmahan]_ and extended " -"by Andrew et al. [andrew]_, is essentially FedAvg with the following " -"modifications." +#: ../../source/explanation-differential-privacy.rst:94 +msgid "Clipping" msgstr "" -#: ../../source/explanation-differential-privacy.rst:17 +#: ../../source/explanation-differential-privacy.rst:96 msgid "" -"**Clipping** : The influence of each client's update is bounded by " -"clipping it. This is achieved by enforcing a cap on the L2 norm of the " -"update, scaling it down if needed." +"There are two forms of clipping commonly used in Central DP: Fixed " +"Clipping and Adaptive Clipping." msgstr "" -#: ../../source/explanation-differential-privacy.rst:18 +#: ../../source/explanation-differential-privacy.rst:98 msgid "" -"**Noising** : Gaussian noise, calibrated to the clipping threshold, is " -"added to the average computed at the server." +"**Fixed Clipping** : A predefined fix threshold is set for the magnitude " +"of clients' updates. Any update exceeding this threshold is clipped back " +"to the threshold value." msgstr "" -#: ../../source/explanation-differential-privacy.rst:20 +#: ../../source/explanation-differential-privacy.rst:100 msgid "" -"The distribution of the update norm has been shown to vary from task-to-" -"task and to evolve as training progresses. This variability is crucial in" -" understanding its impact on differential privacy guarantees, emphasizing" -" the need for an adaptive approach [andrew]_ that continuously adjusts " -"the clipping threshold to track a prespecified quantile of the update " +"**Adaptive Clipping** : The clipping threshold dynamically adjusts based " +"on the observed update distribution [4]. It means that the clipping value" +" is tuned during the rounds with respect to the quantile of the update " "norm distribution." msgstr "" -#: ../../source/explanation-differential-privacy.rst:23 -msgid "Simplifying Assumptions" -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:25 -msgid "" -"We make (and attempt to enforce) a number of assumptions that must be " -"satisfied to ensure that the training process actually realizes the " -":math:`(\\epsilon, \\delta)` guarantees the user has in mind when " -"configuring the setup." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:27 -msgid "" -"**Fixed-size subsampling** :Fixed-size subsamples of the clients must be " -"taken at each round, as opposed to variable-sized Poisson subsamples." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:28 -msgid "" -"**Unweighted averaging** : The contributions from all the clients must " -"weighted equally in the aggregate to eliminate the requirement for the " -"server to know in advance the sum of the weights of all clients available" -" for selection." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:29 -msgid "" -"**No client failures** : The set of available clients must stay constant " -"across all rounds of training. In other words, clients cannot drop out or" -" fail." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:31 -msgid "" -"The first two are useful for eliminating a multitude of complications " -"associated with calibrating the noise to the clipping threshold, while " -"the third one is required to comply with the assumptions of the privacy " -"analysis." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:34 -msgid "" -"These restrictions are in line with constraints imposed by Andrew et al. " -"[andrew]_." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:37 -msgid "Customizable Responsibility for Noise injection" -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:38 -msgid "" -"In contrast to other implementations where the addition of noise is " -"performed at the server, you can configure the site of noise injection to" -" better match your threat model. We provide users with the flexibility to" -" set up the training such that each client independently adds a small " -"amount of noise to the clipped update, with the result that simply " -"aggregating the noisy updates is equivalent to the explicit addition of " -"noise to the non-noisy aggregate at the server." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:41 -msgid "" -"To be precise, if we let :math:`m` be the number of clients sampled each " -"round and :math:`\\sigma_\\Delta` be the scale of the total Gaussian " -"noise that needs to be added to the sum of the model updates, we can use " -"simple maths to show that this is equivalent to each client adding noise " -"with scale :math:`\\sigma_\\Delta/\\sqrt{m}`." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:44 -msgid "Wrapper-based approach" -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:46 -msgid "" -"Introducing DP to an existing workload can be thought of as adding an " -"extra layer of security around it. This inspired us to provide the " -"additional server and client-side logic needed to make the training " -"process differentially private as wrappers for instances of the " -":code:`Strategy` and :code:`NumPyClient` abstract classes respectively. " -"This wrapper-based approach has the advantage of being easily composable " -"with other wrappers that someone might contribute to the Flower library " -"in the future, e.g., for secure aggregation. Using Inheritance instead " -"can be tedious because that would require the creation of new sub- " -"classes every time a new class implementing :code:`Strategy` or " -":code:`NumPyClient` is defined." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:49 -msgid "Server-side logic" -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:51 -msgid "" -"The first version of our solution was to define a decorator whose " -"constructor accepted, among other things, a boolean-valued variable " -"indicating whether adaptive clipping was to be enabled or not. We quickly" -" realized that this would clutter its :code:`__init__()` function with " -"variables corresponding to hyperparameters of adaptive clipping that " -"would remain unused when it was disabled. A cleaner implementation could " -"be achieved by splitting the functionality into two decorators, " -":code:`DPFedAvgFixed` and :code:`DPFedAvgAdaptive`, with the latter sub- " -"classing the former. The constructors for both classes accept a boolean " -"parameter :code:`server_side_noising`, which, as the name suggests, " -"determines where noising is to be performed." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:54 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 -msgid "DPFedAvgFixed" -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:56 -msgid "" -"The server-side capabilities required for the original version of DP-" -"FedAvg, i.e., the one which performed fixed clipping, can be completely " -"captured with the help of wrapper logic for just the following two " -"methods of the :code:`Strategy` abstract class." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:58 -msgid "" -":code:`configure_fit()` : The config dictionary being sent by the wrapped" -" :code:`Strategy` to each client needs to be augmented with an additional" -" value equal to the clipping threshold (keyed under " -":code:`dpfedavg_clip_norm`) and, if :code:`server_side_noising=true`, " -"another one equal to the scale of the Gaussian noise that needs to be " -"added at the client (keyed under :code:`dpfedavg_noise_stddev`). This " -"entails *post*-processing of the results returned by the wrappee's " -"implementation of :code:`configure_fit()`." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:59 +#: ../../source/explanation-differential-privacy.rst:102 msgid "" -":code:`aggregate_fit()`: We check whether any of the sampled clients " -"dropped out or failed to upload an update before the round timed out. In " -"that case, we need to abort the current round, discarding any successful " -"updates that were received, and move on to the next one. On the other " -"hand, if all clients responded successfully, we must force the averaging " -"of the updates to happen in an unweighted manner by intercepting the " -":code:`parameters` field of :code:`FitRes` for each received update and " -"setting it to 1. Furthermore, if :code:`server_side_noising=true`, each " -"update is perturbed with an amount of noise equal to what it would have " -"been subjected to had client-side noising being enabled. This entails " -"*pre*-processing of the arguments to this method before passing them on " -"to the wrappee's implementation of :code:`aggregate_fit()`." +"The choice between fixed and adaptive clipping depends on various factors" +" such as privacy requirements, data distribution, model complexity, and " +"others." msgstr "" -#: ../../source/explanation-differential-privacy.rst:62 -msgid "" -"We can't directly change the aggregation function of the wrapped strategy" -" to force it to add noise to the aggregate, hence we simulate client-side" -" noising to implement server-side noising." +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:105 +#: ../../source/how-to-use-differential-privacy.rst:96 +msgid "Local Differential Privacy" msgstr "" -#: ../../source/explanation-differential-privacy.rst:64 +#: ../../source/explanation-differential-privacy.rst:107 msgid "" -"These changes have been put together into a class called " -":code:`DPFedAvgFixed`, whose constructor accepts the strategy being " -"decorated, the clipping threshold and the number of clients sampled every" -" round as compulsory arguments. The user is expected to specify the " -"clipping threshold since the order of magnitude of the update norms is " -"highly dependent on the model being trained and providing a default value" -" would be misleading. The number of clients sampled at every round is " -"required to calculate the amount of noise that must be added to each " -"individual update, either by the server or the clients." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:67 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 -msgid "DPFedAvgAdaptive" +"In this approach, each client is responsible for performing DP. Local DP " +"avoids the need for a fully trusted aggregator, but it should be noted " +"that local DP leads to a decrease in accuracy but better privacy in " +"comparison to central DP." msgstr "" -#: ../../source/explanation-differential-privacy.rst:69 -msgid "" -"The additional functionality required to facilitate adaptive clipping has" -" been provided in :code:`DPFedAvgAdaptive`, a subclass of " -":code:`DPFedAvgFixed`. It overrides the above-mentioned methods to do the" -" following." +#: ../../source/explanation-differential-privacy.rst:116 +msgid "In this explainer, we focus on two forms of achieving Local DP:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:71 +#: ../../source/explanation-differential-privacy.rst:118 msgid "" -":code:`configure_fit()` : It intercepts the config dict returned by " -":code:`super.configure_fit()` to add the key-value pair " -":code:`dpfedavg_adaptive_clip_enabled:True` to it, which the client " -"interprets as an instruction to include an indicator bit (1 if update " -"norm <= clipping threshold, 0 otherwise) in the results returned by it." +"Each client adds noise to the local updates before sending them to the " +"server. To achieve (:math:`\\epsilon`, :math:`\\delta`)-DP, considering " +"the sensitivity of the local model to be ∆, Gaussian noise is applied " +"with a noise scale of σ where:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:73 +#: ../../source/explanation-differential-privacy.rst:120 msgid "" -":code:`aggregate_fit()` : It follows a call to " -":code:`super.aggregate_fit()` with one to :code:`__update_clip_norm__()`," -" a procedure which adjusts the clipping threshold on the basis of the " -"indicator bits received from the sampled clients." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:77 -msgid "Client-side logic" +"\\small\n" +"\\frac{∆ \\times \\sqrt{2 \\times " +"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" +"\n" msgstr "" -#: ../../source/explanation-differential-privacy.rst:79 +#: ../../source/explanation-differential-privacy.rst:125 msgid "" -"The client-side capabilities required can be completely captured through " -"wrapper logic for just the :code:`fit()` method of the " -":code:`NumPyClient` abstract class. To be precise, we need to *post-" -"process* the update computed by the wrapped client to clip it, if " -"necessary, to the threshold value supplied by the server as part of the " -"config dictionary. In addition to this, it may need to perform some extra" -" work if either (or both) of the following keys are also present in the " -"dict." +"Each client adds noise to the gradients of the model during the local " +"training (DP-SGD). More specifically, in this approach, gradients are " +"clipped and an amount of calibrated noise is injected into the gradients." msgstr "" -#: ../../source/explanation-differential-privacy.rst:81 +#: ../../source/explanation-differential-privacy.rst:128 msgid "" -":code:`dpfedavg_noise_stddev` : Generate and add the specified amount of " -"noise to the clipped update." +"Please note that these two approaches are providing privacy at different " +"levels." msgstr "" -#: ../../source/explanation-differential-privacy.rst:82 -msgid "" -":code:`dpfedavg_adaptive_clip_enabled` : Augment the metrics dict in the " -":code:`FitRes` object being returned to the server with an indicator bit," -" calculated as described earlier." +#: ../../source/explanation-differential-privacy.rst:131 +msgid "**References:**" msgstr "" -#: ../../source/explanation-differential-privacy.rst:86 -msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" +#: ../../source/explanation-differential-privacy.rst:133 +msgid "[1] Dwork et al. The Algorithmic Foundations of Differential Privacy." msgstr "" -#: ../../source/explanation-differential-privacy.rst:88 +#: ../../source/explanation-differential-privacy.rst:135 msgid "" -"Assume you have trained for :math:`n` rounds with sampling fraction " -":math:`q` and noise multiplier :math:`z`. In order to calculate the " -":math:`\\epsilon` value this would result in for a particular " -":math:`\\delta`, the following script may be used." +"[2] McMahan et al. Learning Differentially Private Recurrent Language " +"Models." msgstr "" -#: ../../source/explanation-differential-privacy.rst:98 +#: ../../source/explanation-differential-privacy.rst:137 msgid "" -"McMahan et al. \"Learning Differentially Private Recurrent Language " -"Models.\" International Conference on Learning Representations (ICLR), " -"2017." +"[3] Geyer et al. Differentially Private Federated Learning: A Client " +"Level Perspective." msgstr "" -#: ../../source/explanation-differential-privacy.rst:100 -msgid "" -"Andrew, Galen, et al. \"Differentially Private Learning with Adaptive " -"Clipping.\" Advances in Neural Information Processing Systems (NeurIPS), " -"2021." +#: ../../source/explanation-differential-privacy.rst:139 +msgid "[4] Galen et al. Differentially Private Learning with Adaptive Clipping." msgstr "" #: ../../source/explanation-federated-evaluation.rst:2 @@ -3947,6 +3666,7 @@ msgid "As a reference, this document follows the above structure." msgstr "" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 +#: ../../source/ref-api/flwr.common.Metadata.rst:2 msgid "Metadata" msgstr "" @@ -4259,13 +3979,12 @@ msgstr "" #: ../../source/how-to-configure-clients.rst:89 msgid "" "This can be achieved by customizing an existing strategy or by " -"`implementing a custom strategy from scratch " -"`_. " -"Here's a nonsensical example that customizes :code:`FedAvg` by adding a " -"custom ``\"hello\": \"world\"`` configuration key/value pair to the " -"config dict of a *single client* (only the first client in the list, the " -"other clients in this round to not receive this \"special\" config " -"value):" +":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes :code:`FedAvg`" +" by adding a custom ``\"hello\": \"world\"`` configuration key/value pair" +" to the config dict of a *single client* (only the first client in the " +"list, the other clients in this round to not receive this \"special\" " +"config value):" msgstr "" #: ../../source/how-to-configure-logging.rst:2 @@ -4602,7 +4321,7 @@ msgid "" "More sophisticated implementations can use :code:`configure_fit` to " "implement custom client selection logic. A client will only participate " "in a round if the corresponding :code:`ClientProxy` is included in the " -"the list returned from :code:`configure_fit`." +"list returned from :code:`configure_fit`." msgstr "" #: ../../source/how-to-implement-strategies.rst:240 @@ -4673,7 +4392,7 @@ msgid "" "More sophisticated implementations can use :code:`configure_evaluate` to " "implement custom client selection logic. A client will only participate " "in a round if the corresponding :code:`ClientProxy` is included in the " -"the list returned from :code:`configure_evaluate`." +"list returned from :code:`configure_evaluate`." msgstr "" #: ../../source/how-to-implement-strategies.rst:287 @@ -4805,9 +4524,7 @@ msgid "Install via Docker" msgstr "" #: ../../source/how-to-install-flower.rst:60 -msgid "" -"`How to run Flower using Docker `_" +msgid ":doc:`How to run Flower using Docker `" msgstr "" #: ../../source/how-to-install-flower.rst:63 @@ -5069,14 +4786,12 @@ msgstr "" #: ../../source/how-to-monitor-simulation.rst:234 msgid "" -"Ray Dashboard: ``_" +"Ray Dashboard: ``_" msgstr "" #: ../../source/how-to-monitor-simulation.rst:236 -msgid "" -"Ray Metrics: ``_" +msgid "Ray Metrics: ``_" msgstr "" #: ../../source/how-to-run-flower-using-docker.rst:2 @@ -5954,7 +5669,8 @@ msgstr "" msgid "" "Remove \"placeholder\" methods from subclasses of ``Client`` or " "``NumPyClient``. If you, for example, use server-side evaluation, then " -"empty placeholder implementations of ``evaluate`` are no longer necessary." +"empty placeholder implementations of ``evaluate`` are no longer " +"necessary." msgstr "" #: ../../source/how-to-upgrade-to-flower-1.0.rst:85 @@ -6093,80 +5809,225 @@ msgid "" msgstr "" #: ../../source/how-to-use-built-in-mods.rst:89 -msgid "Enjoy building more robust and flexible ``ClientApp``s with mods!" +msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" msgstr "" -#: ../../source/how-to-use-strategies.rst:2 -msgid "Use strategies" +#: ../../source/how-to-use-differential-privacy.rst:2 +msgid "Use Differential Privacy" msgstr "" -#: ../../source/how-to-use-strategies.rst:4 +#: ../../source/how-to-use-differential-privacy.rst:3 msgid "" -"Flower allows full customization of the learning process through the " -":code:`Strategy` abstraction. A number of built-in strategies are " -"provided in the core framework." +"This guide explains how you can utilize differential privacy in the " +"Flower framework. If you are not yet familiar with differential privacy, " +"you can refer to :doc:`explanation-differential-privacy`." msgstr "" -#: ../../source/how-to-use-strategies.rst:6 +#: ../../source/how-to-use-differential-privacy.rst:7 msgid "" -"There are three ways to customize the way Flower orchestrates the " -"learning process on the server side:" +"Differential Privacy in Flower is in a preview phase. If you plan to use " +"these features in a production environment with sensitive data, feel free" +" contact us to discuss your requirements and to receive guidance on how " +"to best use these features." msgstr "" -#: ../../source/how-to-use-strategies.rst:8 -msgid "Use an existing strategy, for example, :code:`FedAvg`" +#: ../../source/how-to-use-differential-privacy.rst:12 +msgid "" +"This approach consists of two seprate phases: clipping of the updates and" +" adding noise to the aggregated model. For the clipping phase, Flower " +"framework has made it possible to decide whether to perform clipping on " +"the server side or the client side." msgstr "" -#: ../../source/how-to-use-strategies.rst:9 -#: ../../source/how-to-use-strategies.rst:40 -msgid "Customize an existing strategy with callback functions" +#: ../../source/how-to-use-differential-privacy.rst:15 +msgid "" +"**Server-side Clipping**: This approach has the advantage of the server " +"enforcing uniform clipping across all clients' updates and reducing the " +"communication overhead for clipping values. However, it also has the " +"disadvantage of increasing the computational load on the server due to " +"the need to perform the clipping operation for all clients." msgstr "" -#: ../../source/how-to-use-strategies.rst:10 -#: ../../source/how-to-use-strategies.rst:87 -msgid "Implement a novel strategy" +#: ../../source/how-to-use-differential-privacy.rst:16 +msgid "" +"**Client-side Clipping**: This approach has the advantage of reducing the" +" computational overhead on the server. However, it also has the " +"disadvantage of lacking centralized control, as the server has less " +"control over the clipping process." msgstr "" -#: ../../source/how-to-use-strategies.rst:14 -msgid "Use an existing strategy" +#: ../../source/how-to-use-differential-privacy.rst:21 +msgid "Server-side Clipping" msgstr "" -#: ../../source/how-to-use-strategies.rst:16 +#: ../../source/how-to-use-differential-privacy.rst:22 msgid "" -"Flower comes with a number of popular federated learning strategies " -"built-in. A built-in strategy can be instantiated as follows:" +"For central DP with server-side clipping, there are two :code:`Strategy` " +"classes that act as wrappers around the actual :code:`Strategy` instance " +"(for example, :code:`FedAvg`). The two wrapper classes are " +":code:`DifferentialPrivacyServerSideFixedClipping` and " +":code:`DifferentialPrivacyServerSideAdaptiveClipping` for fixed and " +"adaptive clipping." msgstr "" -#: ../../source/how-to-use-strategies.rst:25 +#: ../../source/how-to-use-differential-privacy.rst:-1 +msgid "server side clipping" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:31 msgid "" -"This creates a strategy with all parameters left at their default values " -"and passes it to the :code:`start_server` function. It is usually " -"recommended to adjust a few parameters during instantiation:" +"The code sample below enables the :code:`FedAvg` strategy to use server-" +"side fixed clipping using the " +":code:`DifferentialPrivacyServerSideFixedClipping` wrapper class. The " +"same approach can be used with " +":code:`DifferentialPrivacyServerSideAdaptiveClipping` by adjusting the " +"corresponding input parameters." msgstr "" -#: ../../source/how-to-use-strategies.rst:42 +#: ../../source/how-to-use-differential-privacy.rst:52 +msgid "Client-side Clipping" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:53 msgid "" -"Existing strategies provide several ways to customize their behaviour. " -"Callback functions allow strategies to call user-provided code during " -"execution." +"For central DP with client-side clipping, the server sends the clipping " +"value to selected clients on each round. Clients can use existing Flower " +":code:`Mods` to perform the clipping. Two mods are available for fixed " +"and adaptive client-side clipping: :code:`fixedclipping_mod` and " +":code:`adaptiveclipping_mod` with corresponding server-side wrappers " +":code:`DifferentialPrivacyClientSideFixedClipping` and " +":code:`DifferentialPrivacyClientSideAdaptiveClipping`." msgstr "" -#: ../../source/how-to-use-strategies.rst:45 -msgid "Configuring client fit and client evaluate" +#: ../../source/how-to-use-differential-privacy.rst:-1 +msgid "client side clipping" msgstr "" -#: ../../source/how-to-use-strategies.rst:47 +#: ../../source/how-to-use-differential-privacy.rst:63 msgid "" -"The server can pass new configuration values to the client each round by " -"providing a function to :code:`on_fit_config_fn`. The provided function " -"will be called by the strategy and must return a dictionary of " -"configuration key values pairs that will be sent to the client. It must " -"return a dictionary of arbitrary configuration values :code:`client.fit`" -" and :code:`client.evaluate` functions during each round of federated " -"learning." +"The code sample below enables the :code:`FedAvg` strategy to use " +"differential privacy with client-side fixed clipping using both the " +":code:`DifferentialPrivacyClientSideFixedClipping` wrapper class and, on " +"the client, :code:`fixedclipping_mod`:" msgstr "" -#: ../../source/how-to-use-strategies.rst:75 +#: ../../source/how-to-use-differential-privacy.rst:80 +msgid "" +"In addition to the server-side strategy wrapper, the :code:`ClientApp` " +"needs to configure the matching :code:`fixedclipping_mod` to perform the " +"client-side clipping:" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:97 +msgid "" +"To utilize local differential privacy (DP) and add noise to the client " +"model parameters before transmitting them to the server in Flower, you " +"can use the `LocalDpMod`. The following hyperparameters need to be set: " +"clipping norm value, sensitivity, epsilon, and delta." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:-1 +msgid "local DP mod" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:104 +msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:122 +msgid "" +"Please note that the order of mods, especially those that modify " +"parameters, is important when using multiple modifiers. Typically, " +"differential privacy (DP) modifiers should be the last to operate on " +"parameters." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:125 +msgid "Local Training using Privacy Engines" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:126 +msgid "" +"For ensuring data instance-level privacy during local model training on " +"the client side, consider leveraging privacy engines such as Opacus and " +"TensorFlow Privacy. For examples of using Flower with these engines, " +"please refer to the Flower examples directory (`Opacus " +"`_, `Tensorflow" +" Privacy `_)." +msgstr "" + +#: ../../source/how-to-use-strategies.rst:2 +msgid "Use strategies" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:4 +msgid "" +"Flower allows full customization of the learning process through the " +":code:`Strategy` abstraction. A number of built-in strategies are " +"provided in the core framework." +msgstr "" + +#: ../../source/how-to-use-strategies.rst:6 +msgid "" +"There are three ways to customize the way Flower orchestrates the " +"learning process on the server side:" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:8 +msgid "Use an existing strategy, for example, :code:`FedAvg`" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:9 +#: ../../source/how-to-use-strategies.rst:40 +msgid "Customize an existing strategy with callback functions" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:10 +#: ../../source/how-to-use-strategies.rst:87 +msgid "Implement a novel strategy" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:14 +msgid "Use an existing strategy" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:16 +msgid "" +"Flower comes with a number of popular federated learning strategies " +"built-in. A built-in strategy can be instantiated as follows:" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:25 +msgid "" +"This creates a strategy with all parameters left at their default values " +"and passes it to the :code:`start_server` function. It is usually " +"recommended to adjust a few parameters during instantiation:" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:42 +msgid "" +"Existing strategies provide several ways to customize their behaviour. " +"Callback functions allow strategies to call user-provided code during " +"execution." +msgstr "" + +#: ../../source/how-to-use-strategies.rst:45 +msgid "Configuring client fit and client evaluate" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:47 +msgid "" +"The server can pass new configuration values to the client each round by " +"providing a function to :code:`on_fit_config_fn`. The provided function " +"will be called by the strategy and must return a dictionary of " +"configuration key values pairs that will be sent to the client. It must " +"return a dictionary of arbitrary configuration values :code:`client.fit`" +" and :code:`client.evaluate` functions during each round of federated " +"learning." +msgstr "" + +#: ../../source/how-to-use-strategies.rst:75 msgid "" "The :code:`on_fit_config_fn` can be used to pass arbitrary configuration " "values from server to client, and poetentially change these values each " @@ -6211,11 +6072,11 @@ msgstr "" msgid "How-to guides" msgstr "" -#: ../../source/index.rst:97 +#: ../../source/index.rst:98 msgid "Legacy example guides" msgstr "" -#: ../../source/index.rst:108 ../../source/index.rst:112 +#: ../../source/index.rst:109 ../../source/index.rst:113 msgid "Explanations" msgstr "" @@ -6223,23 +6084,23 @@ msgstr "" msgid "API reference" msgstr "" -#: ../../source/index.rst:137 +#: ../../source/index.rst:138 msgid "Reference docs" msgstr "" -#: ../../source/index.rst:153 +#: ../../source/index.rst:154 msgid "Contributor tutorials" msgstr "" -#: ../../source/index.rst:160 +#: ../../source/index.rst:161 msgid "Contributor how-to guides" msgstr "" -#: ../../source/index.rst:173 +#: ../../source/index.rst:174 msgid "Contributor explanations" msgstr "" -#: ../../source/index.rst:179 +#: ../../source/index.rst:180 msgid "Contributor references" msgstr "" @@ -6323,33 +6184,33 @@ msgid "" "specific goal." msgstr "" -#: ../../source/index.rst:110 +#: ../../source/index.rst:111 msgid "" "Understanding-oriented concept guides explain and discuss key topics and " "underlying ideas behind Flower and collaborative AI." msgstr "" -#: ../../source/index.rst:120 +#: ../../source/index.rst:121 msgid "References" msgstr "" -#: ../../source/index.rst:122 +#: ../../source/index.rst:123 msgid "Information-oriented API reference and other reference material." msgstr "" -#: ../../source/index.rst:131::1 +#: ../../source/index.rst:132::1 msgid ":py:obj:`flwr `\\" msgstr "" -#: ../../source/index.rst:131::1 flwr:1 of +#: ../../source/index.rst:132::1 flwr:1 of msgid "Flower main package." msgstr "" -#: ../../source/index.rst:148 +#: ../../source/index.rst:149 msgid "Contributor docs" msgstr "" -#: ../../source/index.rst:150 +#: ../../source/index.rst:151 msgid "" "The Flower community welcomes contributions. The following docs are " "intended to help along the way." @@ -6371,11 +6232,19 @@ msgstr "" msgid "flower-fleet-api" msgstr "" +#: ../../source/ref-api-cli.rst:37 +msgid "flower-client-app" +msgstr "" + +#: ../../source/ref-api-cli.rst:47 +msgid "flower-server-app" +msgstr "" + #: ../../source/ref-api/flwr.rst:2 msgid "flwr" msgstr "" -#: ../../source/ref-api/flwr.rst:25 ../../source/ref-api/flwr.server.rst:48 +#: ../../source/ref-api/flwr.rst:25 ../../source/ref-api/flwr.server.rst:52 msgid "Modules" msgstr "" @@ -6400,7 +6269,7 @@ msgid ":py:obj:`flwr.server `\\" msgstr "" #: ../../source/ref-api/flwr.rst:35::1 -#: ../../source/ref-api/flwr.server.rst:37::1 flwr.server:1 +#: ../../source/ref-api/flwr.server.rst:41::1 flwr.server:1 #: flwr.server.server.Server:1 of msgid "Flower server." msgstr "" @@ -6419,7 +6288,6 @@ msgstr "" #: ../../source/ref-api/flwr.client.rst:13 #: ../../source/ref-api/flwr.common.rst:13 -#: ../../source/ref-api/flwr.server.driver.rst:13 #: ../../source/ref-api/flwr.server.rst:13 #: ../../source/ref-api/flwr.simulation.rst:13 msgid "Functions" @@ -6457,10 +6325,10 @@ msgid "Start a Flower NumPyClient which connects to a gRPC server." msgstr "" #: ../../source/ref-api/flwr.client.rst:26 -#: ../../source/ref-api/flwr.common.rst:31 -#: ../../source/ref-api/flwr.server.driver.rst:24 -#: ../../source/ref-api/flwr.server.rst:28 +#: ../../source/ref-api/flwr.common.rst:32 +#: ../../source/ref-api/flwr.server.rst:29 #: ../../source/ref-api/flwr.server.strategy.rst:17 +#: ../../source/ref-api/flwr.server.workflow.rst:17 msgid "Classes" msgstr "" @@ -6475,7 +6343,7 @@ msgstr "" #: ../../source/ref-api/flwr.client.rst:33::1 msgid "" -":py:obj:`ClientApp `\\ \\(client\\_fn\\[\\, " +":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " "mods\\]\\)" msgstr "" @@ -6502,8 +6370,12 @@ msgstr "" #: ../../source/ref-api/flwr.client.Client.rst:15 #: ../../source/ref-api/flwr.client.ClientApp.rst:15 #: ../../source/ref-api/flwr.client.NumPyClient.rst:15 +#: ../../source/ref-api/flwr.common.Array.rst:15 #: ../../source/ref-api/flwr.common.ClientMessage.rst:15 +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:15 +#: ../../source/ref-api/flwr.common.Context.rst:15 #: ../../source/ref-api/flwr.common.DisconnectRes.rst:15 +#: ../../source/ref-api/flwr.common.Error.rst:15 #: ../../source/ref-api/flwr.common.EvaluateIns.rst:15 #: ../../source/ref-api/flwr.common.EvaluateRes.rst:15 #: ../../source/ref-api/flwr.common.FitIns.rst:15 @@ -6512,20 +6384,32 @@ msgstr "" #: ../../source/ref-api/flwr.common.GetParametersRes.rst:15 #: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:15 #: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:15 +#: ../../source/ref-api/flwr.common.Message.rst:15 +#: ../../source/ref-api/flwr.common.MessageType.rst:15 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:15 +#: ../../source/ref-api/flwr.common.Metadata.rst:15 +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:15 #: ../../source/ref-api/flwr.common.Parameters.rst:15 +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:15 #: ../../source/ref-api/flwr.common.ReconnectIns.rst:15 +#: ../../source/ref-api/flwr.common.RecordSet.rst:15 #: ../../source/ref-api/flwr.common.ServerMessage.rst:15 #: ../../source/ref-api/flwr.common.Status.rst:15 #: ../../source/ref-api/flwr.server.ClientManager.rst:15 +#: ../../source/ref-api/flwr.server.Driver.rst:15 #: ../../source/ref-api/flwr.server.History.rst:15 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:15 #: ../../source/ref-api/flwr.server.Server.rst:15 +#: ../../source/ref-api/flwr.server.ServerApp.rst:15 #: ../../source/ref-api/flwr.server.ServerConfig.rst:15 #: ../../source/ref-api/flwr.server.SimpleClientManager.rst:15 -#: ../../source/ref-api/flwr.server.driver.Driver.rst:15 -#: ../../source/ref-api/flwr.server.driver.GrpcDriver.rst:15 #: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:15 #: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:15 #: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:15 #: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:15 #: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:15 #: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:15 @@ -6543,6 +6427,9 @@ msgstr "" #: ../../source/ref-api/flwr.server.strategy.Krum.rst:15 #: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:15 #: ../../source/ref-api/flwr.server.strategy.Strategy.rst:15 +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 msgid "Methods" msgstr "" @@ -6619,9 +6506,12 @@ msgstr "" #: ../../source/ref-api/flwr.client.Client.rst:46 #: ../../source/ref-api/flwr.client.NumPyClient.rst:46 +#: ../../source/ref-api/flwr.common.Array.rst:28 #: ../../source/ref-api/flwr.common.ClientMessage.rst:25 #: ../../source/ref-api/flwr.common.Code.rst:19 +#: ../../source/ref-api/flwr.common.Context.rst:25 #: ../../source/ref-api/flwr.common.DisconnectRes.rst:25 +#: ../../source/ref-api/flwr.common.Error.rst:25 #: ../../source/ref-api/flwr.common.EvaluateIns.rst:25 #: ../../source/ref-api/flwr.common.EvaluateRes.rst:25 #: ../../source/ref-api/flwr.common.EventType.rst:19 @@ -6631,10 +6521,16 @@ msgstr "" #: ../../source/ref-api/flwr.common.GetParametersRes.rst:25 #: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:25 #: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:25 +#: ../../source/ref-api/flwr.common.Message.rst:37 +#: ../../source/ref-api/flwr.common.MessageType.rst:25 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:25 +#: ../../source/ref-api/flwr.common.Metadata.rst:25 #: ../../source/ref-api/flwr.common.Parameters.rst:25 #: ../../source/ref-api/flwr.common.ReconnectIns.rst:25 +#: ../../source/ref-api/flwr.common.RecordSet.rst:25 #: ../../source/ref-api/flwr.common.ServerMessage.rst:25 #: ../../source/ref-api/flwr.common.Status.rst:25 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:25 #: ../../source/ref-api/flwr.server.ServerConfig.rst:25 msgid "Attributes" msgstr "" @@ -6652,14 +6548,25 @@ msgstr "" #: flwr.client.numpy_client.NumPyClient.fit #: flwr.client.numpy_client.NumPyClient.get_parameters #: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.server.app.start_server +#: flwr.common.context.Context flwr.common.message.Error +#: flwr.common.message.Message flwr.common.message.Message.create_error_reply +#: flwr.common.message.Message.create_reply flwr.common.message.Metadata +#: flwr.common.record.parametersrecord.Array flwr.server.app.start_server #: flwr.server.client_manager.ClientManager.register #: flwr.server.client_manager.ClientManager.unregister #: flwr.server.client_manager.SimpleClientManager.register #: flwr.server.client_manager.SimpleClientManager.unregister #: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.app.start_driver flwr.server.driver.driver.Driver +#: flwr.server.compat.app.start_driver flwr.server.driver.driver.Driver +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive #: flwr.server.strategy.bulyan.Bulyan +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit #: flwr.server.strategy.fedadagrad.FedAdagrad @@ -6675,7 +6582,10 @@ msgstr "" #: flwr.server.strategy.strategy.Strategy.configure_fit #: flwr.server.strategy.strategy.Strategy.evaluate #: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.simulation.app.start_simulation of +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow +#: flwr.simulation.app.start_simulation +#: flwr.simulation.run_simulation.run_simulation of msgid "Parameters" msgstr "" @@ -6693,13 +6603,17 @@ msgstr "" #: flwr.client.numpy_client.NumPyClient.fit #: flwr.client.numpy_client.NumPyClient.get_parameters #: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.server.app.start_server +#: flwr.common.message.Message.create_reply flwr.server.app.start_server #: flwr.server.client_manager.ClientManager.num_available #: flwr.server.client_manager.ClientManager.register #: flwr.server.client_manager.SimpleClientManager.num_available #: flwr.server.client_manager.SimpleClientManager.register #: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.app.start_driver +#: flwr.server.compat.app.start_driver +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit #: flwr.server.strategy.strategy.Strategy.aggregate_evaluate @@ -6723,13 +6637,17 @@ msgstr "" #: flwr.client.client.Client.get_properties #: flwr.client.numpy_client.NumPyClient.get_parameters #: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.server.app.start_server +#: flwr.common.message.Message.create_reply flwr.server.app.start_server #: flwr.server.client_manager.ClientManager.num_available #: flwr.server.client_manager.ClientManager.register #: flwr.server.client_manager.SimpleClientManager.num_available #: flwr.server.client_manager.SimpleClientManager.register #: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.app.start_driver +#: flwr.server.compat.app.start_driver +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit #: flwr.server.strategy.strategy.Strategy.aggregate_evaluate @@ -6779,23 +6697,38 @@ msgstr "" msgid "ClientApp" msgstr "" -#: flwr.client.client_app.ClientApp:1 flwr.common.typing.ClientMessage:1 +#: flwr.client.client_app.ClientApp:1 flwr.common.constant.MessageType:1 +#: flwr.common.constant.MessageTypeLegacy:1 flwr.common.context.Context:1 +#: flwr.common.message.Error:1 flwr.common.message.Message:1 +#: flwr.common.message.Metadata:1 flwr.common.record.parametersrecord.Array:1 +#: flwr.common.record.recordset.RecordSet:1 flwr.common.typing.ClientMessage:1 #: flwr.common.typing.DisconnectRes:1 flwr.common.typing.EvaluateIns:1 #: flwr.common.typing.EvaluateRes:1 flwr.common.typing.FitIns:1 #: flwr.common.typing.FitRes:1 flwr.common.typing.GetParametersIns:1 #: flwr.common.typing.GetParametersRes:1 flwr.common.typing.GetPropertiesIns:1 #: flwr.common.typing.GetPropertiesRes:1 flwr.common.typing.Parameters:1 #: flwr.common.typing.ReconnectIns:1 flwr.common.typing.ServerMessage:1 -#: flwr.common.typing.Status:1 flwr.server.app.ServerConfig:1 -#: flwr.server.driver.driver.Driver:1 -#: flwr.server.driver.grpc_driver.GrpcDriver:1 flwr.server.history.History:1 -#: flwr.server.server.Server:1 of +#: flwr.common.typing.Status:1 flwr.server.driver.driver.Driver:1 +#: flwr.server.history.History:1 flwr.server.server.Server:1 +#: flwr.server.server_app.ServerApp:1 flwr.server.server_config.ServerConfig:1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of msgid "Bases: :py:class:`object`" msgstr "" -#: flwr.client.app.start_client:33 flwr.client.app.start_numpy_client:36 -#: flwr.client.client_app.ClientApp:4 flwr.server.app.start_server:41 -#: flwr.server.driver.app.start_driver:30 of +#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 +#: flwr.client.client_app.ClientApp:4 +#: flwr.client.client_app.ClientApp.evaluate:4 +#: flwr.client.client_app.ClientApp.query:4 +#: flwr.client.client_app.ClientApp.train:4 flwr.server.app.start_server:41 +#: flwr.server.compat.app.start_driver:32 flwr.server.server_app.ServerApp:4 +#: flwr.server.server_app.ServerApp.main:4 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:29 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:21 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:14 +#: of msgid "Examples" msgstr "" @@ -6818,6 +6751,33 @@ msgid "" "global attribute `app` that points to an object of type `ClientApp`." msgstr "" +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`evaluate `\\ \\(\\)" +msgstr "" + +#: flwr.client.client_app.ClientApp.evaluate:1 +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid "Return a decorator that registers the evaluate fn with the client app." +msgstr "" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`query `\\ \\(\\)" +msgstr "" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.query:1 of +msgid "Return a decorator that registers the query fn with the client app." +msgstr "" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`train `\\ \\(\\)" +msgstr "" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.train:1 of +msgid "Return a decorator that registers the train fn with the client app." +msgstr "" + #: ../../source/ref-api/flwr.client.NumPyClient.rst:2 msgid "NumPyClient" msgstr "" @@ -7015,7 +6975,7 @@ msgid "" msgstr "" #: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 -#: flwr.server.driver.app.start_driver:21 of +#: flwr.server.compat.app.start_driver:21 of msgid "" "The PEM-encoded root certificates as a byte string or a path string. If " "provided, a secure connection using the certificates will be established " @@ -7035,15 +6995,29 @@ msgid "" "(experimental) - 'rest': HTTP (experimental)" msgstr "" -#: flwr.client.app.start_client:34 flwr.client.app.start_numpy_client:37 of +#: flwr.client.app.start_client:31 of +msgid "" +"The maximum number of times the client will try to connect to the server " +"before giving up in case of a connection error. If set to None, there is " +"no limit to the number of tries." +msgstr "" + +#: flwr.client.app.start_client:35 of +msgid "" +"The maximum duration before the client stops trying to connect to the " +"server in case of connection error. If set to None, there is no limit to " +"the total time." +msgstr "" + +#: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of msgid "Starting a gRPC client with an insecure server connection:" msgstr "" -#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:44 of +#: flwr.client.app.start_client:49 flwr.client.app.start_numpy_client:44 of msgid "Starting an SSL-enabled gRPC client using system certificates:" msgstr "" -#: flwr.client.app.start_client:52 flwr.client.app.start_numpy_client:52 of +#: flwr.client.app.start_client:60 flwr.client.app.start_numpy_client:52 of msgid "Starting an SSL-enabled gRPC client using provided certificates:" msgstr "" @@ -7067,73 +7041,82 @@ msgstr "" msgid "common" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.record.conversion_utils.array_from_numpy:1 of +msgid "Create Array from NumPy ndarray." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:30::1 msgid ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.parameter.bytes_to_ndarray:1 of msgid "Deserialize NumPy ndarray from bytes." msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" ":py:obj:`configure `\\ \\(identifier\\[\\, " "filename\\, host\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.logger.configure:1 of msgid "Configure logging to file and/or remote log server." msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" ":py:obj:`event `\\ \\(event\\_type\\[\\, " "event\\_details\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.telemetry.event:1 of msgid "Submit create_event to ThreadPoolExecutor to avoid blocking." msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" ":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " "\\*\\*kwargs\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 logging.Logger.log:1 +#: ../../source/ref-api/flwr.common.rst:30::1 logging.Logger.log:1 #: of msgid "Log 'msg % args' with the integer severity 'level'." msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.parameter.ndarray_to_bytes:1 of msgid "Serialize NumPy ndarray to bytes." msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid ":py:obj:`now `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.date.now:1 of msgid "Construct a datetime from time.time() with time zone set to UTC." msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" ":py:obj:`ndarrays_to_parameters `\\ " "\\(ndarrays\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.parameter.ndarrays_to_parameters:1 #: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 #: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarrays_to_parameters:1 @@ -7141,187 +7124,358 @@ msgstr "" msgid "Convert NumPy ndarrays to parameters object." msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" ":py:obj:`parameters_to_ndarrays `\\ " "\\(parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.parameter.parameters_to_ndarrays:1 of msgid "Convert parameters object to NumPy ndarrays." msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " +"data\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.record.parametersrecord.Array:1 of +msgid "Array type." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`ClientMessage `\\ " "\\(\\[get\\_properties\\_res\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.ClientMessage:1 of msgid "ClientMessage is a container used to hold one result message." msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`Code `\\ \\(value\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.Code:1 of msgid "Client status codes." msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`ConfigsRecord `\\ " +"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.record.configsrecord.ConfigsRecord:1 of +msgid "Configs record." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`Context `\\ \\(state\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.context.Context:1 of +msgid "State of your run." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`DisconnectRes `\\ \\(reason\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.DisconnectRes:1 of msgid "DisconnectRes message from client to server." msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`EvaluateIns `\\ \\(parameters\\, " "config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.EvaluateIns:1 of msgid "Evaluate instructions for a client." msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " "num\\_examples\\, metrics\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.EvaluateRes:1 of msgid "Evaluate response from a client." msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`EventType `\\ \\(value\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.telemetry.EventType:1 of msgid "Types of telemetry events." msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.FitIns:1 of msgid "Fit instructions for a client." msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`FitRes `\\ \\(status\\, parameters\\, " "num\\_examples\\, metrics\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.FitRes:1 of msgid "Fit response from a client." msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.message.Error:1 of +msgid "A dataclass that stores information about an error that occurred." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`GetParametersIns `\\ \\(config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.GetParametersIns:1 of msgid "Parameters request for a client." msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`GetParametersRes `\\ \\(status\\, " "parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.GetParametersRes:1 of msgid "Response when asked to return parameters." msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`GetPropertiesIns `\\ \\(config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.GetPropertiesIns:1 of msgid "Properties request for a client." msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`GetPropertiesRes `\\ \\(status\\, " "properties\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.GetPropertiesRes:1 of msgid "Properties response from a client." msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " +"error\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.message.Message:1 of +msgid "State of your application from the viewpoint of the entity using it." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`MessageType `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.constant.MessageType:1 of +msgid "Message type." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`MessageTypeLegacy `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.constant.MessageTypeLegacy:1 of +msgid "Legacy message type." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`Metadata `\\ \\(run\\_id\\, " +"message\\_id\\, src\\_node\\_id\\, ...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.message.Metadata:1 of +msgid "A dataclass holding metadata associated with the current message." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`MetricsRecord `\\ " +"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.record.metricsrecord.MetricsRecord:1 of +msgid "Metrics record." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`NDArray `\\" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" "alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " ":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`Parameters `\\ \\(tensors\\, " "tensor\\_type\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.Parameters:1 of msgid "Model parameters." msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`ParametersRecord `\\ " +"\\(\\[array\\_dict\\, keep\\_input\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.record.parametersrecord.ParametersRecord:1 of +msgid "Parameters record." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`ReconnectIns `\\ \\(seconds\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.ReconnectIns:1 of msgid "ReconnectIns message from server to client." msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`RecordSet `\\ " +"\\(\\[parameters\\_records\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.record.recordset.RecordSet:1 of +msgid "RecordSet stores groups of parameters, metrics and configs." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`ServerMessage `\\ " "\\(\\[get\\_properties\\_ins\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.ServerMessage:1 of msgid "ServerMessage is a container used to hold one instruction message." msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`Status `\\ \\(code\\, message\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.Status:1 of msgid "Client status." msgstr "" +#: ../../source/ref-api/flwr.common.Array.rst:2 +msgid "Array" +msgstr "" + +#: flwr.common.record.parametersrecord.Array:3 of +msgid "" +"A dataclass containing serialized data from an array-like or tensor-like " +"object along with some metadata about it." +msgstr "" + +#: flwr.common.record.parametersrecord.Array:6 of +msgid "" +"A string representing the data type of the serialised object (e.g. " +"`np.float32`)" +msgstr "" + +#: flwr.common.record.parametersrecord.Array:8 of +msgid "" +"A list representing the shape of the unserialized array-like object. This" +" is used to deserialize the data (depending on the serialization method) " +"or simply as a metadata field." +msgstr "" + +#: flwr.common.record.parametersrecord.Array:12 of +msgid "" +"A string indicating the type of serialisation mechanism used to generate " +"the bytes in `data` from an array-like or tensor-like object." +msgstr "" + +#: flwr.common.record.parametersrecord.Array:15 of +msgid "A buffer of bytes containing the data." +msgstr "" + +#: ../../source/ref-api/flwr.common.Array.rst:26::1 +msgid ":py:obj:`numpy `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Array.rst:26::1 +#: flwr.common.record.parametersrecord.Array.numpy:1 of +msgid "Return the array as a NumPy array." +msgstr "" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`dtype `\\" +msgstr "" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`shape `\\" +msgstr "" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`stype `\\" +msgstr "" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`data `\\" +msgstr "" + #: ../../source/ref-api/flwr.common.ClientMessage.rst:2 msgid "ClientMessage" msgstr "" @@ -7380,6 +7534,104 @@ msgid "" "`\\" msgstr "" +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:2 +msgid "ConfigsRecord" +msgstr "" + +#: flwr.common.record.configsrecord.ConfigsRecord:1 of +msgid "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:obj:`~typing.Union`\\ [:py:class:`int`, " +":py:class:`float`, :py:class:`str`, :py:class:`bytes`, :py:class:`bool`, " +":py:class:`~typing.List`\\ [:py:class:`int`], :py:class:`~typing.List`\\ " +"[:py:class:`float`], :py:class:`~typing.List`\\ [:py:class:`str`], " +":py:class:`~typing.List`\\ [:py:class:`bytes`], " +":py:class:`~typing.List`\\ [:py:class:`bool`]]]" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1 +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid "Remove all items from R." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:1 +#: flwr.common.record.metricsrecord.MetricsRecord.count_bytes:1 +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:1 +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid "Return number of Bytes stored in this object." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 +#: flwr.common.record.typeddict.TypedDict.get:1 of +msgid "d defaults to None." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 +#: flwr.common.record.typeddict.TypedDict.pop:1 of +msgid "If key is not found, d is returned if given, otherwise KeyError is raised." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 +#: flwr.common.record.typeddict.TypedDict.update:1 of +msgid "Update R from dict/iterable E and F." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:3 of +msgid "This function counts booleans as occupying 1 Byte." +msgstr "" + +#: ../../source/ref-api/flwr.common.Context.rst:2 +msgid "Context" +msgstr "" + +#: flwr.common.context.Context:3 of +msgid "" +"Holds records added by the entity in a given run and that will stay " +"local. This means that the data it holds will never leave the system it's" +" running from. This can be used as an intermediate storage or scratchpad " +"when executing mods. It can also be used as a memory to access at " +"different points during the lifecycle of this entity (e.g. across " +"multiple rounds)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Context.rst:28::1 +msgid ":py:obj:`state `\\" +msgstr "" + #: ../../source/ref-api/flwr.common.DisconnectRes.rst:2 msgid "DisconnectRes" msgstr "" @@ -7388,6 +7640,34 @@ msgstr "" msgid ":py:obj:`reason `\\" msgstr "" +#: ../../source/ref-api/flwr.common.Error.rst:2 +msgid "Error" +msgstr "" + +#: flwr.common.message.Error:3 of +msgid "An identifier for the error." +msgstr "" + +#: flwr.common.message.Error:5 of +msgid "A reason for why the error arose (e.g. an exception stack-trace)" +msgstr "" + +#: flwr.common.Error.code:1::1 of +msgid ":py:obj:`code `\\" +msgstr "" + +#: flwr.common.Error.code:1 flwr.common.Error.code:1::1 of +msgid "Error code." +msgstr "" + +#: flwr.common.Error.code:1::1 of +msgid ":py:obj:`reason `\\" +msgstr "" + +#: flwr.common.Error.code:1::1 flwr.common.Error.reason:1 of +msgid "Reason reported about the error." +msgstr "" + #: ../../source/ref-api/flwr.common.EvaluateIns.rst:2 msgid "EvaluateIns" msgstr "" @@ -7608,11 +7888,283 @@ msgstr "" msgid ":py:obj:`properties `\\" msgstr "" -#: ../../source/ref-api/flwr.common.NDArray.rst:2 -msgid "NDArray" +#: ../../source/ref-api/flwr.common.Message.rst:2 +msgid "Message" msgstr "" -#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +#: flwr.common.Message.content:1::1 flwr.common.Message.metadata:1 +#: flwr.common.message.Message:3 of +msgid "A dataclass including information about the message to be executed." +msgstr "" + +#: flwr.common.message.Message:5 of +msgid "" +"Holds records either sent by another entity (e.g. sent by the server-side" +" logic to a client, or vice-versa) or that will be sent to it." +msgstr "" + +#: flwr.common.message.Message:8 of +msgid "" +"A dataclass that captures information about an error that took place when" +" processing another message." +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid "" +":py:obj:`create_error_reply `\\ " +"\\(error\\, ttl\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_error_reply:1 of +msgid "Construct a reply message indicating an error happened." +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid "" +":py:obj:`create_reply `\\ \\(content\\," +" ttl\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_reply:1 of +msgid "Create a reply to this message with specified content and TTL." +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid ":py:obj:`has_content `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_content:1 of +msgid "Return True if message has content, else False." +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid ":py:obj:`has_error `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_error:1 of +msgid "Return True if message has an error, else False." +msgstr "" + +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`content `\\" +msgstr "" + +#: flwr.common.Message.content:1 flwr.common.Message.content:1::1 +#: of +msgid "The content of this message." +msgstr "" + +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`error `\\" +msgstr "" + +#: flwr.common.Message.content:1::1 flwr.common.Message.error:1 of +msgid "Error captured by this message." +msgstr "" + +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`metadata `\\" +msgstr "" + +#: flwr.common.message.Message.create_error_reply:3 of +msgid "The error that was encountered." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.ttl:1 flwr.common.message.Message.create_error_reply:5 +#: flwr.common.message.Message.create_reply:9 flwr.common.message.Metadata:16 +#: of +msgid "Time-to-live for this message." +msgstr "" + +#: flwr.common.message.Message.create_reply:3 of +msgid "" +"The method generates a new `Message` as a reply to this message. It " +"inherits 'run_id', 'src_node_id', 'dst_node_id', and 'message_type' from " +"this message and sets 'reply_to_message' to the ID of this message." +msgstr "" + +#: flwr.common.message.Message.create_reply:7 of +msgid "The content for the reply message." +msgstr "" + +#: flwr.common.message.Message.create_reply:12 of +msgid "A new `Message` instance representing the reply." +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageType.rst:2 +msgid "MessageType" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`EVALUATE `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`QUERY `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`TRAIN `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:2 +msgid "MessageTypeLegacy" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +msgid ":py:obj:`GET_PARAMETERS `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +msgid ":py:obj:`GET_PROPERTIES `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of +msgid "An identifier for the current run." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.message_id:1 flwr.common.message.Metadata:5 of +msgid "An identifier for the current message." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.src_node_id:1 flwr.common.message.Metadata:7 of +msgid "An identifier for the node sending this message." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1 +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.message.Metadata:9 of +msgid "An identifier for the node receiving this message." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.reply_to_message:1 flwr.common.message.Metadata:11 of +msgid "An identifier for the message this message replies to." +msgstr "" + +#: flwr.common.message.Metadata:13 of +msgid "" +"An identifier for grouping messages. In some settings, this is used as " +"the FL round." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.message_type:1 flwr.common.message.Metadata:18 of +msgid "A string that encodes the action to be executed on the receiving end." +msgstr "" + +#: flwr.common.message.Metadata:21 of +msgid "" +"An identifier that can be used when loading a particular data partition " +"for a ClientApp. Making use of this identifier is more relevant when " +"conducting simulations." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`dst_node_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`group_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.group_id:1 of +msgid "An identifier for grouping messages." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`message_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`message_type `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`partition_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.partition_id:1 of +msgid "An identifier telling which data partition a ClientApp should use." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`reply_to_message `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`run_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`src_node_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`ttl `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:2 +msgid "MetricsRecord" +msgstr "" + +#: flwr.common.record.metricsrecord.MetricsRecord:1 of +msgid "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:obj:`~typing.Union`\\ [:py:class:`int`, " +":py:class:`float`, :py:class:`~typing.List`\\ [:py:class:`int`], " +":py:class:`~typing.List`\\ [:py:class:`float`]]]" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.NDArray.rst:2 +msgid "NDArray" +msgstr "" + +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 msgid ":py:obj:`tensors `\\" msgstr "" @@ -7620,6 +8172,65 @@ msgstr "" msgid ":py:obj:`tensor_type `\\" msgstr "" +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:2 +msgid "ParametersRecord" +msgstr "" + +#: flwr.common.record.parametersrecord.ParametersRecord:1 of +msgid "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" +msgstr "" + +#: flwr.common.record.parametersrecord.ParametersRecord:3 of +msgid "" +"A dataclass storing named Arrays in order. This means that it holds " +"entries as an OrderedDict[str, Array]. ParametersRecord objects can be " +"viewed as an equivalent to PyTorch's state_dict, but holding serialised " +"tensors instead." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:3 of +msgid "" +"Note that a small amount of Bytes might also be included in this counting" +" that correspond to metadata of the serialized object (e.g. of NumPy " +"array) needed for deseralization." +msgstr "" + #: ../../source/ref-api/flwr.common.ReconnectIns.rst:2 msgid "ReconnectIns" msgstr "" @@ -7628,6 +8239,37 @@ msgstr "" msgid ":py:obj:`seconds `\\" msgstr "" +#: ../../source/ref-api/flwr.common.RecordSet.rst:2 +msgid "RecordSet" +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`configs_records `\\" +msgstr "" + +#: flwr.common.RecordSet.configs_records:1 +#: flwr.common.RecordSet.configs_records:1::1 of +msgid "Dictionary holding ConfigsRecord instances." +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`metrics_records `\\" +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.metrics_records:1 of +msgid "Dictionary holding MetricsRecord instances." +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`parameters_records `\\" +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.parameters_records:1 of +msgid "Dictionary holding ParametersRecord instances." +msgstr "" + #: ../../source/ref-api/flwr.common.ServerMessage.rst:2 msgid "ServerMessage" msgstr "" @@ -7664,6 +8306,10 @@ msgstr "" msgid ":py:obj:`message `\\" msgstr "" +#: ../../source/ref-api/flwr.common.array_from_numpy.rst:2 +msgid "array\\_from\\_numpy" +msgstr "" + #: ../../source/ref-api/flwr.common.bytes_to_ndarray.rst:2 msgid "bytes\\_to\\_ndarray" msgstr "" @@ -7711,113 +8357,157 @@ msgstr "" msgid "server" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 msgid ":py:obj:`run_driver_api `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 #: flwr.server.app.run_driver_api:1 of msgid "Run Flower server (Driver API)." msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 msgid ":py:obj:`run_fleet_api `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 #: flwr.server.app.run_fleet_api:1 of msgid "Run Flower server (Fleet API)." msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 msgid ":py:obj:`run_server_app `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 -#: flwr.server.app.run_server_app:1 of +#: ../../source/ref-api/flwr.server.rst:27::1 +#: flwr.server.run_serverapp.run_server_app:1 of msgid "Run Flower server app." msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 msgid ":py:obj:`run_superlink `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 #: flwr.server.app.run_superlink:1 of msgid "Run Flower server (Driver API and Fleet API)." msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 +msgid "" +":py:obj:`start_driver `\\ \\(\\*\\[\\, " +"server\\_address\\, server\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:27::1 +#: flwr.server.compat.app.start_driver:1 of +msgid "Start a Flower Driver API server." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:27::1 msgid "" ":py:obj:`start_server `\\ \\(\\*\\[\\, " "server\\_address\\, server\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 #: flwr.server.app.start_server:1 of msgid "Start a Flower server using the gRPC transport layer." msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 msgid ":py:obj:`ClientManager `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 #: flwr.server.client_manager.ClientManager:1 of msgid "Abstract base class for managing Flower clients." msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 +msgid "" +":py:obj:`Driver `\\ " +"\\(\\[driver\\_service\\_address\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:41::1 +#: flwr.server.driver.driver.Driver:1 of +msgid "`Driver` class provides an interface to the Driver API." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:41::1 msgid ":py:obj:`History `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 #: flwr.server.history.History:1 of msgid "History class for training and/or evaluation metrics collection." msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 +msgid "" +":py:obj:`LegacyContext `\\ \\(state\\[\\, " +"config\\, strategy\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:41::1 +#: flwr.server.compat.legacy_context.LegacyContext:1 of +msgid "Legacy Context." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:41::1 msgid "" ":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " "strategy\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 +msgid "" +":py:obj:`ServerApp `\\ \\(\\[server\\, config\\, " +"strategy\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:41::1 +#: flwr.server.server_app.ServerApp:1 of +msgid "Flower ServerApp." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:41::1 msgid "" ":py:obj:`ServerConfig `\\ \\(\\[num\\_rounds\\," " round\\_timeout\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 -#: flwr.server.app.ServerConfig:1 of +#: ../../source/ref-api/flwr.server.rst:41::1 +#: flwr.server.server_config.ServerConfig:1 of msgid "Flower server config." msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 msgid ":py:obj:`SimpleClientManager `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 #: flwr.server.client_manager.SimpleClientManager:1 of msgid "Provides a pool of available clients." msgstr "" -#: ../../source/ref-api/flwr.server.rst:56::1 -msgid ":py:obj:`flwr.server.driver `\\" +#: ../../source/ref-api/flwr.server.rst:60::1 +msgid ":py:obj:`flwr.server.strategy `\\" msgstr "" -#: ../../source/ref-api/flwr.server.rst:56::1 flwr.server.driver:1 -#: of -msgid "Flower driver SDK." +#: ../../source/ref-api/flwr.server.rst:60::1 +#: flwr.server.strategy:1 of +msgid "Contains the strategy abstraction and different implementations." msgstr "" -#: ../../source/ref-api/flwr.server.rst:56::1 -msgid ":py:obj:`flwr.server.strategy `\\" +#: ../../source/ref-api/flwr.server.rst:60::1 +msgid ":py:obj:`flwr.server.workflow `\\" msgstr "" -#: ../../source/ref-api/flwr.server.rst:56::1 -#: flwr.server.strategy:1 of -msgid "Contains the strategy abstraction and different implementations." +#: ../../source/ref-api/flwr.server.rst:60::1 +#: flwr.server.workflow:1 of +msgid "Workflows." msgstr "" #: ../../source/ref-api/flwr.server.ClientManager.rst:2 @@ -7912,54 +8602,258 @@ msgstr "" msgid "This method is idempotent." msgstr "" -#: ../../source/ref-api/flwr.server.History.rst:2 -msgid "History" +#: ../../source/ref-api/flwr.server.Driver.rst:2 +msgid "Driver" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.server.driver.driver.Driver:3 of msgid "" -":py:obj:`add_loss_centralized " -"`\\ \\(server\\_round\\, " -"loss\\)" +"The IPv4 or IPv6 address of the Driver API server. Defaults to " +"`\"[::]:9091\"`." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1 -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "Add one loss entry (from centralized evaluation)." +#: flwr.server.app.start_server:28 flwr.server.driver.driver.Driver:6 of +msgid "" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order: * CA certificate. * " +"server certificate. * server private key." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.server.app.start_server:28 flwr.server.driver.driver.Driver:6 of msgid "" -":py:obj:`add_loss_distributed " -"`\\ \\(server\\_round\\, " -"loss\\)" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order:" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_loss_distributed:1 of -msgid "Add one loss entry (from distributed evaluation)." +#: flwr.server.app.start_server:32 flwr.server.driver.driver.Driver:10 of +msgid "CA certificate." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "" -":py:obj:`add_metrics_centralized " -"`\\ \\(server\\_round\\, " -"metrics\\)" +#: flwr.server.app.start_server:33 flwr.server.driver.driver.Driver:11 of +msgid "server certificate." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_centralized:1 of -msgid "Add metrics entries (from centralized evaluation)." +#: flwr.server.app.start_server:34 flwr.server.driver.driver.Driver:12 of +msgid "server private key." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "" -":py:obj:`add_metrics_distributed " -"`\\ \\(server\\_round\\, " -"metrics\\)" +#: flwr.server.driver.driver.Driver.close:1::1 of +msgid ":py:obj:`close `\\ \\(\\)" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.driver.driver.Driver.close:1 +#: flwr.server.driver.driver.Driver.close:1::1 of +msgid "Disconnect from the SuperLink if connected." +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 of +msgid "" +":py:obj:`create_message `\\ " +"\\(content\\, message\\_type\\, ...\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 +#: flwr.server.driver.driver.Driver.create_message:1 of +msgid "Create a new message with specified parameters." +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 of +msgid ":py:obj:`get_node_ids `\\ \\(\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 +#: flwr.server.driver.driver.Driver.get_node_ids:1 of +msgid "Get node IDs." +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 of +msgid "" +":py:obj:`pull_messages `\\ " +"\\(message\\_ids\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 +#: flwr.server.driver.driver.Driver.pull_messages:1 of +msgid "Pull messages based on message IDs." +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 of +msgid "" +":py:obj:`push_messages `\\ " +"\\(messages\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 +#: flwr.server.driver.driver.Driver.push_messages:1 of +msgid "Push messages to specified node IDs." +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 of +msgid "" +":py:obj:`send_and_receive `\\ " +"\\(messages\\, \\*\\[\\, timeout\\]\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 +#: flwr.server.driver.driver.Driver.send_and_receive:1 of +msgid "Push messages to specified node IDs and pull the reply messages." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:3 of +msgid "" +"This method constructs a new `Message` with given content and metadata. " +"The `run_id` and `src_node_id` will be set automatically." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:6 of +msgid "" +"The content for the new message. This holds records that are to be sent " +"to the destination node." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:9 of +msgid "" +"The type of the message, defining the action to be executed on the " +"receiving end." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:12 of +msgid "The ID of the destination node to which the message is being sent." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:14 of +msgid "" +"The ID of the group to which this message is associated. In some " +"settings, this is used as the FL round." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:17 of +msgid "" +"Time-to-live for the round trip of this message, i.e., the time from " +"sending this message to receiving a reply. It specifies the duration for " +"which the message and its potential reply are considered valid." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:22 of +msgid "" +"**message** -- A new `Message` instance with the specified content and " +"metadata." +msgstr "" + +#: flwr.server.driver.driver.Driver.pull_messages:3 of +msgid "" +"This method is used to collect messages from the SuperLink that " +"correspond to a set of given message IDs." +msgstr "" + +#: flwr.server.driver.driver.Driver.pull_messages:6 of +msgid "An iterable of message IDs for which reply messages are to be retrieved." +msgstr "" + +#: flwr.server.driver.driver.Driver.pull_messages:9 of +msgid "**messages** -- An iterable of messages received." +msgstr "" + +#: flwr.server.driver.driver.Driver.push_messages:3 of +msgid "" +"This method takes an iterable of messages and sends each message to the " +"node specified in `dst_node_id`." +msgstr "" + +#: flwr.server.driver.driver.Driver.push_messages:6 +#: flwr.server.driver.driver.Driver.send_and_receive:7 of +msgid "An iterable of messages to be sent." +msgstr "" + +#: flwr.server.driver.driver.Driver.push_messages:9 of +msgid "" +"**message_ids** -- An iterable of IDs for the messages that were sent, " +"which can be used to pull replies." +msgstr "" + +#: flwr.server.driver.driver.Driver.send_and_receive:3 of +msgid "" +"This method sends a list of messages to their destination node IDs and " +"then waits for the replies. It continues to pull replies until either all" +" replies are received or the specified timeout duration is exceeded." +msgstr "" + +#: flwr.server.driver.driver.Driver.send_and_receive:9 of +msgid "" +"The timeout duration in seconds. If specified, the method will wait for " +"replies for this duration. If `None`, there is no time limit and the " +"method will wait until replies for all messages are received." +msgstr "" + +#: flwr.server.driver.driver.Driver.send_and_receive:14 of +msgid "**replies** -- An iterable of reply messages received from the SuperLink." +msgstr "" + +#: flwr.server.driver.driver.Driver.send_and_receive:18 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:53 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:60 +#: of +msgid "Notes" +msgstr "" + +#: flwr.server.driver.driver.Driver.send_and_receive:19 of +msgid "" +"This method uses `push_messages` to send the messages and `pull_messages`" +" to collect the replies. If `timeout` is set, the method may not return " +"replies for all sent messages. A message remains valid until its TTL, " +"which is not affected by `timeout`." +msgstr "" + +#: ../../source/ref-api/flwr.server.History.rst:2 +msgid "History" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_loss_centralized " +"`\\ \\(server\\_round\\, " +"loss\\)" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1 +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "Add one loss entry (from centralized evaluation)." +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_loss_distributed " +"`\\ \\(server\\_round\\, " +"loss\\)" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_loss_distributed:1 of +msgid "Add one loss entry (from distributed evaluation)." +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_metrics_centralized " +"`\\ \\(server\\_round\\, " +"metrics\\)" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_centralized:1 of +msgid "Add metrics entries (from centralized evaluation)." +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_metrics_distributed " +"`\\ \\(server\\_round\\, " +"metrics\\)" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 #: flwr.server.history.History.add_metrics_distributed:1 of msgid "Add metrics entries (from distributed evaluation)." msgstr "" @@ -7976,6 +8870,34 @@ msgstr "" msgid "Add metrics entries (from distributed fit)." msgstr "" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:2 +msgid "LegacyContext" +msgstr "" + +#: flwr.server.compat.legacy_context.LegacyContext:1 of +msgid "Bases: :py:class:`~flwr.common.context.Context`" +msgstr "" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +msgid ":py:obj:`config `\\" +msgstr "" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +msgid ":py:obj:`strategy `\\" +msgstr "" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +msgid ":py:obj:`client_manager `\\" +msgstr "" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +msgid ":py:obj:`history `\\" +msgstr "" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +msgid ":py:obj:`state `\\" +msgstr "" + #: flwr.server.server.Server.client_manager:1::1 of msgid ":py:obj:`client_manager `\\ \\(\\)" msgstr "" @@ -8047,11 +8969,32 @@ msgstr "" msgid "Replace server strategy." msgstr "" +#: ../../source/ref-api/flwr.server.ServerApp.rst:2 +msgid "ServerApp" +msgstr "" + +#: flwr.server.server_app.ServerApp:5 of +msgid "Use the `ServerApp` with an existing `Strategy`:" +msgstr "" + +#: flwr.server.server_app.ServerApp:15 of +msgid "Use the `ServerApp` with a custom main function:" +msgstr "" + +#: flwr.server.server_app.ServerApp.main:1::1 of +msgid ":py:obj:`main `\\ \\(\\)" +msgstr "" + +#: flwr.server.server_app.ServerApp.main:1 +#: flwr.server.server_app.ServerApp.main:1::1 of +msgid "Return a decorator that registers the main fn with the server app." +msgstr "" + #: ../../source/ref-api/flwr.server.ServerConfig.rst:2 msgid "ServerConfig" msgstr "" -#: flwr.server.app.ServerConfig:3 of +#: flwr.server.server_config.ServerConfig:3 of msgid "" "All attributes have default values which allows users to configure just " "the ones they care about." @@ -8125,488 +9068,381 @@ msgstr "" msgid "**success**" msgstr "" -#: ../../source/ref-api/flwr.server.driver.rst:2 -msgid "driver" +#: ../../source/ref-api/flwr.server.run_driver_api.rst:2 +msgid "run\\_driver\\_api" msgstr "" -#: ../../source/ref-api/flwr.server.driver.rst:22::1 -msgid "" -":py:obj:`start_driver `\\ \\(\\*\\[\\, " -"server\\_address\\, server\\, ...\\]\\)" +#: ../../source/ref-api/flwr.server.run_fleet_api.rst:2 +msgid "run\\_fleet\\_api" msgstr "" -#: ../../source/ref-api/flwr.server.driver.rst:22::1 -#: flwr.server.driver.app.start_driver:1 of -msgid "Start a Flower Driver API server." +#: ../../source/ref-api/flwr.server.run_server_app.rst:2 +msgid "run\\_server\\_app" msgstr "" -#: ../../source/ref-api/flwr.server.driver.rst:30::1 -msgid "" -":py:obj:`Driver `\\ " -"\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#: ../../source/ref-api/flwr.server.run_superlink.rst:2 +msgid "run\\_superlink" msgstr "" -#: ../../source/ref-api/flwr.server.driver.rst:30::1 -#: flwr.server.driver.driver.Driver:1 of -msgid "`Driver` class provides an interface to the Driver API." +#: ../../source/ref-api/flwr.server.start_driver.rst:2 +msgid "start\\_driver" msgstr "" -#: ../../source/ref-api/flwr.server.driver.rst:30::1 +#: flwr.server.compat.app.start_driver:3 of msgid "" -":py:obj:`GrpcDriver `\\ " -"\\(\\[driver\\_service\\_address\\, ...\\]\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.driver.rst:30::1 -#: flwr.server.driver.grpc_driver.GrpcDriver:1 of -msgid "`GrpcDriver` provides access to the gRPC Driver API/service." +"The IPv4 or IPv6 address of the Driver API server. Defaults to " +"`\"[::]:8080\"`." msgstr "" -#: ../../source/ref-api/flwr.server.driver.Driver.rst:2 -msgid "Driver" +#: flwr.server.compat.app.start_driver:6 of +msgid "" +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, then `start_driver` will create one." msgstr "" -#: flwr.server.driver.driver.Driver:3 of +#: flwr.server.app.start_server:9 flwr.server.compat.app.start_driver:10 +#: flwr.simulation.app.start_simulation:28 of msgid "" -"The IPv4 or IPv6 address of the Driver API server. Defaults to " -"`\"[::]:9091\"`." +"Currently supported values are `num_rounds` (int, default: 1) and " +"`round_timeout` in seconds (float, default: None)." msgstr "" -#: flwr.server.app.start_server:28 flwr.server.driver.driver.Driver:6 of +#: flwr.server.app.start_server:12 flwr.server.compat.app.start_driver:13 of msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order: * CA certificate. * " -"server certificate. * server private key." +"An implementation of the abstract base class " +"`flwr.server.strategy.Strategy`. If no strategy is provided, then " +"`start_server` will use `flwr.server.strategy.FedAvg`." msgstr "" -#: flwr.server.app.start_server:28 flwr.server.driver.driver.Driver:6 of +#: flwr.server.compat.app.start_driver:17 of msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order:" +"An implementation of the class `flwr.server.ClientManager`. If no " +"implementation is provided, then `start_driver` will use " +"`flwr.server.SimpleClientManager`." msgstr "" -#: flwr.server.app.start_server:32 flwr.server.driver.driver.Driver:10 of -msgid "CA certificate." +#: flwr.server.compat.app.start_driver:25 of +msgid "The Driver object to use." msgstr "" -#: flwr.server.app.start_server:33 flwr.server.driver.driver.Driver:11 of -msgid "server certificate." +#: flwr.server.app.start_server:37 flwr.server.compat.app.start_driver:28 of +msgid "**hist** -- Object containing training and evaluation metrics." msgstr "" -#: flwr.server.app.start_server:34 flwr.server.driver.driver.Driver:12 of -msgid "server private key." +#: flwr.server.compat.app.start_driver:33 of +msgid "Starting a driver that connects to an insecure server:" msgstr "" -#: flwr.server.driver.driver.Driver.get_nodes:1::1 of -msgid ":py:obj:`get_nodes `\\ \\(\\)" +#: flwr.server.compat.app.start_driver:37 of +msgid "Starting a driver that connects to an SSL-enabled server:" msgstr "" -#: flwr.server.driver.driver.Driver.get_nodes:1 -#: flwr.server.driver.driver.Driver.get_nodes:1::1 of -msgid "Get node IDs." +#: ../../source/ref-api/flwr.server.start_server.rst:2 +msgid "start\\_server" msgstr "" -#: flwr.server.driver.driver.Driver.get_nodes:1::1 of -msgid "" -":py:obj:`pull_task_res `\\ " -"\\(task\\_ids\\)" +#: flwr.server.app.start_server:3 of +msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." msgstr "" -#: flwr.server.driver.driver.Driver.get_nodes:1::1 -#: flwr.server.driver.driver.Driver.pull_task_res:1 -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 -#: flwr.server.driver.grpc_driver.GrpcDriver.pull_task_res:1 of -msgid "Get task results." +#: flwr.server.app.start_server:5 of +msgid "" +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, then `start_server` will create one." msgstr "" -#: flwr.server.driver.driver.Driver.get_nodes:1::1 of +#: flwr.server.app.start_server:16 of msgid "" -":py:obj:`push_task_ins `\\ " -"\\(task\\_ins\\_list\\)" +"An implementation of the abstract base class `flwr.server.ClientManager`." +" If no implementation is provided, then `start_server` will use " +"`flwr.server.client_manager.SimpleClientManager`." msgstr "" -#: flwr.server.driver.driver.Driver.get_nodes:1::1 -#: flwr.server.driver.driver.Driver.push_task_ins:1 -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 -#: flwr.server.driver.grpc_driver.GrpcDriver.push_task_ins:1 of -msgid "Schedule tasks." +#: flwr.server.app.start_server:21 of +msgid "" +"The maximum length of gRPC messages that can be exchanged with the Flower" +" clients. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower clients need to be started with the same value (see " +"`flwr.client.start_client`), otherwise clients will not know about the " +"increased limit and block larger messages." msgstr "" -#: ../../source/ref-api/flwr.server.driver.GrpcDriver.rst:2 -msgid "GrpcDriver" +#: flwr.server.app.start_server:42 of +msgid "Starting an insecure server:" msgstr "" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of -msgid ":py:obj:`connect `\\ \\(\\)" +#: flwr.server.app.start_server:46 of +msgid "Starting an SSL-enabled server:" msgstr "" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1 -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of -msgid "Connect to the Driver API." +#: ../../source/ref-api/flwr.server.strategy.rst:2 +msgid "strategy" msgstr "" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`create_run `\\ " -"\\(req\\)" +":py:obj:`Bulyan `\\ \\(\\*\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" msgstr "" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 -#: flwr.server.driver.grpc_driver.GrpcDriver.create_run:1 of -msgid "Request for run ID." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.bulyan.Bulyan:1 of +msgid "Bulyan strategy." msgstr "" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of -msgid ":py:obj:`disconnect `\\ \\(\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`DPFedAvgAdaptive `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\)" msgstr "" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 -#: flwr.server.driver.grpc_driver.GrpcDriver.disconnect:1 of -msgid "Disconnect from the Driver API." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." msgstr "" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of -msgid ":py:obj:`get_nodes `\\ \\(req\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`DPFedAvgFixed `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" msgstr "" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 -#: flwr.server.driver.grpc_driver.GrpcDriver.get_nodes:1 of -msgid "Get client IDs." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of +msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." msgstr "" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`pull_task_res `\\ " -"\\(req\\)" +":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " +"`\\ " +"\\(...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 +#: of +msgid "Strategy wrapper for central DP with client-side adaptive clipping." msgstr "" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`push_task_ins `\\ " -"\\(req\\)" +":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.driver.start_driver.rst:2 -msgid "start\\_driver" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 +#: of +msgid "Strategy wrapper for central DP with server-side adaptive clipping." msgstr "" -#: flwr.server.driver.app.start_driver:3 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"The IPv4 or IPv6 address of the Driver API server. Defaults to " -"`\"[::]:8080\"`." +":py:obj:`DifferentialPrivacyClientSideFixedClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: flwr.server.driver.app.start_driver:6 of -msgid "" -"A server implementation, either `flwr.server.Server` or a subclass " -"thereof. If no instance is provided, then `start_driver` will create one." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 +#: of +msgid "Strategy wrapper for central DP with client-side fixed clipping." msgstr "" -#: flwr.server.app.start_server:9 flwr.server.driver.app.start_driver:10 -#: flwr.simulation.app.start_simulation:28 of -msgid "" -"Currently supported values are `num_rounds` (int, default: 1) and " -"`round_timeout` in seconds (float, default: None)." -msgstr "" - -#: flwr.server.app.start_server:12 flwr.server.driver.app.start_driver:13 of -msgid "" -"An implementation of the abstract base class " -"`flwr.server.strategy.Strategy`. If no strategy is provided, then " -"`start_server` will use `flwr.server.strategy.FedAvg`." -msgstr "" - -#: flwr.server.driver.app.start_driver:17 of -msgid "" -"An implementation of the class `flwr.server.ClientManager`. If no " -"implementation is provided, then `start_driver` will use " -"`flwr.server.SimpleClientManager`." -msgstr "" - -#: flwr.server.app.start_server:37 flwr.server.driver.app.start_driver:26 of -msgid "**hist** -- Object containing training and evaluation metrics." -msgstr "" - -#: flwr.server.driver.app.start_driver:31 of -msgid "Starting a driver that connects to an insecure server:" -msgstr "" - -#: flwr.server.driver.app.start_driver:35 of -msgid "Starting a driver that connects to an SSL-enabled server:" -msgstr "" - -#: ../../source/ref-api/flwr.server.run_driver_api.rst:2 -msgid "run\\_driver\\_api" -msgstr "" - -#: ../../source/ref-api/flwr.server.run_fleet_api.rst:2 -msgid "run\\_fleet\\_api" -msgstr "" - -#: ../../source/ref-api/flwr.server.run_server_app.rst:2 -msgid "run\\_server\\_app" -msgstr "" - -#: ../../source/ref-api/flwr.server.run_superlink.rst:2 -msgid "run\\_superlink" -msgstr "" - -#: ../../source/ref-api/flwr.server.start_server.rst:2 -msgid "start\\_server" -msgstr "" - -#: flwr.server.app.start_server:3 of -msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." -msgstr "" - -#: flwr.server.app.start_server:5 of -msgid "" -"A server implementation, either `flwr.server.Server` or a subclass " -"thereof. If no instance is provided, then `start_server` will create one." -msgstr "" - -#: flwr.server.app.start_server:16 of -msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`." -" If no implementation is provided, then `start_server` will use " -"`flwr.server.client_manager.SimpleClientManager`." -msgstr "" - -#: flwr.server.app.start_server:21 of -msgid "" -"The maximum length of gRPC messages that can be exchanged with the Flower" -" clients. The default should be sufficient for most models. Users who " -"train very large models might need to increase this value. Note that the " -"Flower clients need to be started with the same value (see " -"`flwr.client.start_client`), otherwise clients will not know about the " -"increased limit and block larger messages." -msgstr "" - -#: flwr.server.app.start_server:42 of -msgid "Starting an insecure server:" -msgstr "" - -#: flwr.server.app.start_server:46 of -msgid "Starting an SSL-enabled server:" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:2 -msgid "strategy" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`FaultTolerantFedAvg " -"`\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +":py:obj:`DifferentialPrivacyServerSideFixedClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of -msgid "Configurable fault-tolerant FedAvg strategy implementation." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 +#: of +msgid "Strategy wrapper for central DP with server-side fixed clipping." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " "fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedadagrad.FedAdagrad:1 of msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedAdam `\\ \\(\\*\\[\\, " "fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedadam.FedAdam:1 of msgid "FedAdam - Adaptive Federated Optimization using Adam." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedAvg `\\ \\(\\*\\[\\, " "fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedavg.FedAvg:1 #: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of msgid "Federated Averaging strategy." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -msgid "" -":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " -"\\*\\*kwargs\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of -msgid "Configurable FedXgbNnAvg strategy implementation." -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -msgid "" -":py:obj:`FedXgbBagging `\\ " -"\\(\\[evaluate\\_function\\]\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of -msgid "Configurable FedXgbBagging strategy implementation." -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -msgid "" -":py:obj:`FedXgbCyclic `\\ " -"\\(\\*\\*kwargs\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of -msgid "Configurable FedXgbCyclic strategy implementation." -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedAvgAndroid `\\ " "\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " "fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedavgm.FedAvgM:1 of msgid "Federated Averaging with Momentum strategy." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedMedian `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedmedian.FedMedian:1 of +msgid "Configurable FedMedian strategy implementation." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedOpt `\\ \\(\\*\\[\\, " "fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedopt.FedOpt:1 of msgid "Federated Optim strategy." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedProx `\\ \\(\\*\\[\\, " "fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedprox.FedProx:1 of msgid "Federated Optimization strategy." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`FedYogi `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +":py:obj:`FedTrimmedAvg `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedyogi.FedYogi:1 of -msgid "FedYogi [Reddi et al., 2020] strategy." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of +msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " -"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" +":py:obj:`FedXgbBagging `\\ " +"\\(\\[evaluate\\_function\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.qfedavg.QFedAvg:1 of -msgid "Configurable QFedAvg strategy implementation." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of +msgid "Configurable FedXgbBagging strategy implementation." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`FedMedian `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +":py:obj:`FedXgbCyclic `\\ " +"\\(\\*\\*kwargs\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedmedian.FedMedian:1 of -msgid "Configurable FedMedian strategy implementation." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of +msgid "Configurable FedXgbCyclic strategy implementation." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`FedTrimmedAvg `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of -msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of +msgid "Configurable FedXgbNnAvg strategy implementation." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`Krum `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +":py:obj:`FedYogi `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.krum.Krum:1 of -msgid "Krum [Blanchard et al., 2017] strategy." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "FedYogi [Reddi et al., 2020] strategy." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`Bulyan `\\ \\(\\*\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" +":py:obj:`FaultTolerantFedAvg " +"`\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.bulyan.Bulyan:1 of -msgid "Bulyan strategy." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of +msgid "Configurable fault-tolerant FedAvg strategy implementation." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`DPFedAvgAdaptive `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\)" +":py:obj:`Krum `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of -msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.krum.Krum:1 of +msgid "Krum [Blanchard et al., 2017] strategy." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`DPFedAvgFixed `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" +":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " +"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of -msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.qfedavg.QFedAvg:1 of +msgid "Configurable QFedAvg strategy implementation." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid ":py:obj:`Strategy `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.strategy.Strategy:1 of msgid "Abstract base class for server strategy implementations." msgstr "" @@ -8806,6 +9642,14 @@ msgid "" "parameters\\, ...\\)" msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_evaluate:1 #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 #: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 @@ -8827,6 +9671,14 @@ msgid "" "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_fit:1 #: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 @@ -8920,6 +9772,10 @@ msgstr "" msgid "Return the sample size and the required number of available clients." msgstr "" +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 +msgid "DPFedAvgAdaptive" +msgstr "" + #: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of msgid "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" msgstr "" @@ -8937,6 +9793,14 @@ msgid "" "\\(server\\_round\\, results\\, ...\\)" msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of @@ -8985,6 +9849,14 @@ msgid "" "\\(server\\_round\\, parameters\\)" msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.evaluate:1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of msgid "Evaluate model parameters using an evaluation function from the strategy." @@ -8998,6 +9870,14 @@ msgid "" "\\(client\\_manager\\)" msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.initialize_parameters:1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of msgid "Initialize global model parameters using given strategy." @@ -9031,6 +9911,14 @@ msgid "" "round of federated evaluation." msgstr "" +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 +msgid "DPFedAvgFixed" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 #: flwr.server.strategy.fedavg.FedAvg:1 #: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of @@ -9112,9270 +10000,11353 @@ msgid "" "round of federated learning." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 -msgid "FaultTolerantFedAvg" +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:2 +msgid "DifferentialPrivacyClientSideAdaptiveClipping" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:3 #: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +msgid "Use `adaptiveclipping_mod` modifier at the client side." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:5 #: of msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +"In comparison to `DifferentialPrivacyServerSideAdaptiveClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideAdaptiveClipping` expects clipping to " +"happen on the client-side, usually by using the built-in " +"`adaptiveclipping_mod`." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_fit:1 -#: flwr.server.strategy.fedadagrad.FedAdagrad.aggregate_fit:1 -#: flwr.server.strategy.fedadam.FedAdam.aggregate_fit:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_fit:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_fit:1 -#: flwr.server.strategy.fedavgm.FedAvgM.aggregate_fit:1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.aggregate_fit:1 -#: flwr.server.strategy.fedyogi.FedYogi.aggregate_fit:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_fit:1 of -msgid "Aggregate fit results using weighted average." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:10 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:3 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:10 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:3 +#: of +msgid "The strategy to which DP functionalities will be added by this wrapper." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:12 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:5 #: of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid "The noise multiplier for the Gaussian mechanism for model updates." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:14 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:7 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:17 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:10 #: of -msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid "The number of clients that are sampled on each round." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:16 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:9 #: of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"The initial value of clipping norm. Defaults to 0.1. Andrew et al. " +"recommends to set to 0.1." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:19 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:12 #: of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +msgid "The desired quantile of updates which should be clipped. Defaults to 0.5." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:21 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:14 #: of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"The learning rate for the clipping norm adaptation. Defaults to 0.2. " +"Andrew et al. recommends to set to 0.2." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:24 #: of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"The stddev of the noise added to the count of updates currently below the" +" estimate. Andrew et al. recommends to set to `expected_num_records/20`" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 -#: ../../source/ref-changelog.md:839 -msgid "FedAdagrad" -msgstr "" - -#: flwr.server.strategy.fedadagrad.FedAdagrad:1 -#: flwr.server.strategy.fedadam.FedAdam:1 -#: flwr.server.strategy.fedyogi.FedYogi:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" -msgstr "" - -#: flwr.server.strategy.fedadagrad.FedAdagrad:3 -#: flwr.server.strategy.fedadam.FedAdam:3 flwr.server.strategy.fedopt.FedOpt:3 -#: flwr.server.strategy.fedyogi.FedYogi:3 of -msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" -msgstr "" - -#: flwr.server.strategy.fedadagrad.FedAdagrad:21 -#: flwr.server.strategy.fedadagrad.FedAdagrad:23 -#: flwr.server.strategy.fedadam.FedAdam:25 -#: flwr.server.strategy.fedadam.FedAdam:27 -#: flwr.server.strategy.fedavg.FedAvg:29 flwr.server.strategy.fedavg.FedAvg:31 -#: flwr.server.strategy.fedopt.FedOpt:25 flwr.server.strategy.fedopt.FedOpt:27 -#: flwr.server.strategy.fedprox.FedProx:61 -#: flwr.server.strategy.fedprox.FedProx:63 -#: flwr.server.strategy.fedyogi.FedYogi:28 -#: flwr.server.strategy.fedyogi.FedYogi:30 of -msgid "Metrics aggregation function, optional." -msgstr "" - -#: flwr.server.strategy.fedadagrad.FedAdagrad:29 -#: flwr.server.strategy.fedadam.FedAdam:29 -#: flwr.server.strategy.fedopt.FedOpt:29 of -msgid "Server-side learning rate. Defaults to 1e-1." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:30 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:23 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:15 +#: of +msgid "Create a strategy:" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:31 -#: flwr.server.strategy.fedadam.FedAdam:31 -#: flwr.server.strategy.fedopt.FedOpt:31 of -msgid "Client-side learning rate. Defaults to 1e-1." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:34 +#: of +msgid "" +"Wrap the strategy with the " +"`DifferentialPrivacyClientSideAdaptiveClipping` wrapper:" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:33 -#: flwr.server.strategy.fedadam.FedAdam:37 -#: flwr.server.strategy.fedopt.FedOpt:37 of -msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:40 +#: of +msgid "On the client, add the `adaptiveclipping_mod` to the client-side mods:" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" ":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`aggregate_fit `\\" +":py:obj:`aggregate_fit " +"`\\" " \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_fit:1 +#: of +msgid "Aggregate training results and update clip norms." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" ":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`configure_fit `\\" +":py:obj:`configure_fit " +"`\\" " \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" ":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"`\\" +" \\(client\\_manager\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:2 +msgid "DifferentialPrivacyClientSideFixedClipping" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:3 +#: of +msgid "Use `fixedclipping_mod` modifier at the client side." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:2 -msgid "FedAdam" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:5 +#: of +msgid "" +"In comparison to `DifferentialPrivacyServerSideFixedClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideFixedClipping` expects clipping to happen " +"on the client-side, usually by using the built-in `fixedclipping_mod`." msgstr "" -#: flwr.server.strategy.fedadam.FedAdam:33 -#: flwr.server.strategy.fedyogi.FedYogi:36 of -msgid "Momentum parameter. Defaults to 0.9." +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:12 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:5 +#: of +msgid "" +"The noise multiplier for the Gaussian mechanism for model updates. A " +"value of 1.0 or higher is recommended for strong privacy." msgstr "" -#: flwr.server.strategy.fedadam.FedAdam:35 -#: flwr.server.strategy.fedyogi.FedYogi:38 of -msgid "Second moment parameter. Defaults to 0.99." +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:15 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:8 +#: of +msgid "The value of the clipping norm." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:26 +#: of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +"Wrap the strategy with the `DifferentialPrivacyClientSideFixedClipping` " +"wrapper:" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:32 +#: of +msgid "On the client, add the `fixedclipping_mod` to the client-side mods:" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_fit:1 +#: of +msgid "Add noise to the aggregated parameters." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:2 -msgid "FedAvg" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:3 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:3 of -msgid "Implementation based on https://arxiv.org/abs/1602.05629" +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:2 +msgid "DifferentialPrivacyServerSideAdaptiveClipping" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:5 flwr.server.strategy.fedprox.FedProx:37 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:17 #: of msgid "" -"Fraction of clients used during training. In case `min_fit_clients` is " -"larger than `fraction_fit * available_clients`, `min_fit_clients` will " -"still be sampled. Defaults to 1.0." +"The standard deviation of the noise added to the count of updates below " +"the estimate. Andrew et al. recommends to set to " +"`expected_num_records/20`" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:9 flwr.server.strategy.fedprox.FedProx:41 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:27 #: of msgid "" -"Fraction of clients used during validation. In case " -"`min_evaluate_clients` is larger than `fraction_evaluate * " -"available_clients`, `min_evaluate_clients` will still be sampled. " -"Defaults to 1.0." +"Wrap the strategy with the DifferentialPrivacyServerSideAdaptiveClipping " +"wrapper" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" ":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" ":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" ":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"`\\" +" \\(client\\_manager\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:2 +msgid "DifferentialPrivacyServerSideFixedClipping" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:19 +#: of msgid "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:2 -msgid "FedAvgAndroid" +"Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping " +"wrapper" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: of msgid "" ":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: of msgid "" ":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:1 #: of -msgid "" -":py:obj:`bytes_to_ndarray " -"`\\ \\(tensor\\)" -msgstr "" - -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.bytes_to_ndarray:1 of -msgid "Deserialize NumPy array from bytes." +msgid "Compute the updates, clip, and pass them for aggregation." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: of msgid "" ":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: of msgid "" ":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: of msgid "" ":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"`\\" +" \\(client\\_manager\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:3 #: of -msgid "" -":py:obj:`ndarray_to_bytes " -"`\\ \\(ndarray\\)" +msgid "Afterward, add noise to the aggregated parameters." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarray_to_bytes:1 of -msgid "Serialize NumPy array to bytes." +#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 +msgid "FaultTolerantFedAvg" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: of msgid "" -":py:obj:`ndarrays_to_parameters " -"`\\ " -"\\(ndarrays\\)" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedadagrad.FedAdagrad.aggregate_fit:1 +#: flwr.server.strategy.fedadam.FedAdam.aggregate_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_fit:1 +#: flwr.server.strategy.fedavgm.FedAvgM.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.aggregate_fit:1 +#: flwr.server.strategy.fedyogi.FedYogi.aggregate_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using weighted average." +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: of msgid "" -":py:obj:`parameters_to_ndarrays " -"`\\ " -"\\(parameters\\)" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.parameters_to_ndarrays:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: of -msgid "Convert parameters object to NumPy weights." +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:2 -msgid "FedAvgM" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.server.strategy.fedavgm.FedAvgM:3 of -msgid "Implementation based on https://arxiv.org/abs/1909.06335" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.strategy.fedavgm.FedAvgM:25 of +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"Server-side learning rate used in server-side optimization. Defaults to " -"1.0." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.strategy.fedavgm.FedAvgM:28 of -msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." +#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 +#: ../../source/ref-changelog.md:839 +msgid "FedAdagrad" +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:1 +#: flwr.server.strategy.fedadam.FedAdam:1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:3 +#: flwr.server.strategy.fedadam.FedAdam:3 flwr.server.strategy.fedopt.FedOpt:3 +#: flwr.server.strategy.fedyogi.FedYogi:3 of +msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad:23 +#: flwr.server.strategy.fedadam.FedAdam:25 +#: flwr.server.strategy.fedadam.FedAdam:27 +#: flwr.server.strategy.fedavg.FedAvg:29 flwr.server.strategy.fedavg.FedAvg:31 +#: flwr.server.strategy.fedopt.FedOpt:25 flwr.server.strategy.fedopt.FedOpt:27 +#: flwr.server.strategy.fedprox.FedProx:61 +#: flwr.server.strategy.fedprox.FedProx:63 +#: flwr.server.strategy.fedyogi.FedYogi:28 +#: flwr.server.strategy.fedyogi.FedYogi:30 of +msgid "Metrics aggregation function, optional." +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:29 +#: flwr.server.strategy.fedadam.FedAdam:29 +#: flwr.server.strategy.fedopt.FedOpt:29 of +msgid "Server-side learning rate. Defaults to 1e-1." +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:31 +#: flwr.server.strategy.fedadam.FedAdam:31 +#: flwr.server.strategy.fedopt.FedOpt:31 of +msgid "Client-side learning rate. Defaults to 1e-1." +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:33 +#: flwr.server.strategy.fedadam.FedAdam:37 +#: flwr.server.strategy.fedopt.FedOpt:37 of +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`aggregate_fit `\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_fit `\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`evaluate `\\ " +":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`initialize_parameters " -"`\\ " +"`\\ " "\\(client\\_manager\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`num_evaluation_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`num_fit_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:2 -msgid "FedMedian" +#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:2 +msgid "FedAdam" +msgstr "" + +#: flwr.server.strategy.fedadam.FedAdam:33 +#: flwr.server.strategy.fedyogi.FedYogi:36 of +msgid "Momentum parameter. Defaults to 0.9." +msgstr "" + +#: flwr.server.strategy.fedadam.FedAdam:35 +#: flwr.server.strategy.fedyogi.FedYogi:38 of +msgid "Second moment parameter. Defaults to 0.99." msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`aggregate_fit `\\ " +":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of -msgid "Aggregate fit results using median." -msgstr "" - #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`configure_fit `\\ " +":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`evaluate `\\ " +":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`initialize_parameters " -"`\\ " +"`\\ " "\\(client\\_manager\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`num_evaluation_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`num_fit_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:2 -msgid "FedOpt" +#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:2 +msgid "FedAvg" msgstr "" -#: flwr.server.strategy.fedopt.FedOpt:33 of -msgid "Momentum parameter. Defaults to 0.0." +#: flwr.server.strategy.fedavg.FedAvg:3 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:3 of +msgid "Implementation based on https://arxiv.org/abs/1602.05629" msgstr "" -#: flwr.server.strategy.fedopt.FedOpt:35 of -msgid "Second moment parameter. Defaults to 0.0." +#: flwr.server.strategy.fedavg.FedAvg:5 flwr.server.strategy.fedprox.FedProx:37 +#: of +msgid "" +"Fraction of clients used during training. In case `min_fit_clients` is " +"larger than `fraction_fit * available_clients`, `min_fit_clients` will " +"still be sampled. Defaults to 1.0." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg:9 flwr.server.strategy.fedprox.FedProx:41 +#: of +msgid "" +"Fraction of clients used during validation. In case " +"`min_evaluate_clients` is larger than `fraction_evaluate * " +"available_clients`, `min_evaluate_clients` will still be sampled. " +"Defaults to 1.0." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg:33 of +msgid "Enable (True) or disable (False) in-place aggregation of model updates." msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " +"`\\ \\(server\\_round\\, " "results\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`aggregate_fit `\\ " +":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " +"`\\ \\(server\\_round\\, " "parameters\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`configure_fit `\\ " +":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`evaluate `\\ " +":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`initialize_parameters " -"`\\ " +"`\\ " "\\(client\\_manager\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`num_evaluation_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`num_fit_clients `\\" +":py:obj:`num_fit_clients `\\" " \\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:2 -msgid "FedProx" -msgstr "" - -#: flwr.server.strategy.fedprox.FedProx:3 of -msgid "Implementation based on https://arxiv.org/abs/1812.06127" +#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:2 +msgid "FedAvgAndroid" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:5 of +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"The strategy in itself will not be different than FedAvg, the client " -"needs to be adjusted. A proximal term needs to be added to the loss " -"function during the training:" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:9 of +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" -"\n" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:12 of +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"Where $w^t$ are the global parameters and $w$ are the local weights the " -"function will be optimized with." +":py:obj:`bytes_to_ndarray " +"`\\ \\(tensor\\)" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:15 of -msgid "In PyTorch, for example, the loss would go from:" +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.bytes_to_ndarray:1 of +msgid "Deserialize NumPy array from bytes." msgstr "" -#: flwr.server.strategy.fedprox.FedProx:21 of -msgid "To:" -msgstr "" - -#: flwr.server.strategy.fedprox.FedProx:30 of -msgid "" -"With `global_params` being a copy of the parameters before the training " -"takes place." -msgstr "" - -#: flwr.server.strategy.fedprox.FedProx:65 of +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"The weight of the proximal term used in the optimization. 0.0 makes this " -"strategy equivalent to FedAvg, and the higher the coefficient, the more " -"regularization will be used (that is, the client parameters will need to " -"be closer to the server parameters during training)." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`ndarray_to_bytes " +"`\\ \\(ndarray\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarray_to_bytes:1 of +msgid "Serialize NumPy array to bytes." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`ndarrays_to_parameters " +"`\\ " +"\\(ndarrays\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" ":py:obj:`num_evaluation_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" ":py:obj:`num_fit_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of -msgid "Sends the proximal factor mu to the clients" +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`parameters_to_ndarrays " +"`\\ " +"\\(parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:2 -msgid "FedTrimmedAvg" +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.parameters_to_ndarrays:1 +#: of +msgid "Convert parameters object to NumPy weights." msgstr "" -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:3 of -msgid "Implemented based on: https://arxiv.org/abs/1803.01498" +#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:2 +msgid "FedAvgM" msgstr "" -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:25 of -msgid "Fraction to cut off of both tails of the distribution. Defaults to 0.2." +#: flwr.server.strategy.fedavgm.FedAvgM:3 of +msgid "Implementation based on https://arxiv.org/abs/1909.06335" +msgstr "" + +#: flwr.server.strategy.fedavgm.FedAvgM:25 of +msgid "" +"Server-side learning rate used in server-side optimization. Defaults to " +"1.0." +msgstr "" + +#: flwr.server.strategy.fedavgm.FedAvgM:28 of +msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`aggregate_fit " -"`\\ " +":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.aggregate_fit:1 of -msgid "Aggregate fit results using trimmed average." -msgstr "" - #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`configure_fit " -"`\\ " +":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`evaluate `\\ " +":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`initialize_parameters " -"`\\ " +"`\\ " "\\(client\\_manager\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`num_evaluation_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`num_fit_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:2 -msgid "FedXgbBagging" +#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:2 +msgid "FedMedian" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`aggregate_evaluate " -"`\\ " +"`\\ " "\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "Aggregate evaluation metrics using average." -msgstr "" - -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`aggregate_fit " -"`\\ " +":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_fit:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_fit:1 of -msgid "Aggregate fit results using bagging." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of +msgid "Aggregate fit results using median." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`configure_evaluate " -"`\\ " +"`\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`configure_fit " -"`\\ " +":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`evaluate `\\ " +":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`initialize_parameters " -"`\\ " +"`\\ " "\\(client\\_manager\\)" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`num_evaluation_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`num_fit_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:2 -msgid "FedXgbCyclic" +#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:2 +msgid "FedOpt" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fedopt.FedOpt:33 of +msgid "Momentum parameter. Defaults to 0.0." +msgstr "" + +#: flwr.server.strategy.fedopt.FedOpt:35 of +msgid "Second moment parameter. Defaults to 0.0." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`aggregate_fit " -"`\\ \\(server\\_round\\," -" results\\, failures\\)" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`configure_fit " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`evaluate `\\ " +":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`initialize_parameters " -"`\\ " +"`\\ " "\\(client\\_manager\\)" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`num_evaluation_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:2 -msgid "FedXgbNnAvg" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:5 of -msgid "" -"This strategy is deprecated, but a copy of it is available in Flower " -"Baselines: " -"https://github.com/adap/flower/tree/main/baselines/hfedxgboost." +#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:2 +msgid "FedProx" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +#: flwr.server.strategy.fedprox.FedProx:3 of +msgid "Implementation based on https://arxiv.org/abs/1812.06127" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedprox.FedProx:5 of msgid "" -":py:obj:`aggregate_fit " -"`\\ \\(server\\_round\\, " -"results\\, failures\\)" +"The strategy in itself will not be different than FedAvg, the client " +"needs to be adjusted. A proximal term needs to be added to the loss " +"function during the training:" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedprox.FedProx:9 of msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" +"\n" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedprox.FedProx:12 of msgid "" -":py:obj:`configure_fit " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +"Where $w^t$ are the global parameters and $w$ are the local weights the " +"function will be optimized with." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: flwr.server.strategy.fedprox.FedProx:15 of +msgid "In PyTorch, for example, the loss would go from:" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.server.strategy.fedprox.FedProx:21 of +msgid "To:" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedprox.FedProx:30 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"With `global_params` being a copy of the parameters before the training " +"takes place." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedprox.FedProx:65 of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:2 -msgid "FedYogi" -msgstr "" - -#: flwr.server.strategy.fedyogi.FedYogi:32 of -msgid "Server-side learning rate. Defaults to 1e-2." -msgstr "" - -#: flwr.server.strategy.fedyogi.FedYogi:34 of -msgid "Client-side learning rate. Defaults to 0.0316." -msgstr "" - -#: flwr.server.strategy.fedyogi.FedYogi:40 of -msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-3." +"The weight of the proximal term used in the optimization. 0.0 makes this " +"strategy equivalent to FedAvg, and the higher the coefficient, the more " +"regularization will be used (that is, the client parameters will need to " +"be closer to the server parameters during training)." msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," +"`\\ \\(server\\_round\\," " results\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`aggregate_fit `\\ " +":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," +"`\\ \\(server\\_round\\," " parameters\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`configure_fit `\\ " +":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`evaluate `\\ " +":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`initialize_parameters " -"`\\ " +"`\\ " "\\(client\\_manager\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`num_evaluation_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`num_fit_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.Krum.rst:2 -msgid "Krum" +#: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of +msgid "Sends the proximal factor mu to the clients" msgstr "" -#: flwr.server.strategy.krum.Krum:3 of -msgid "Implementation based on https://arxiv.org/abs/1703.02757" +#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:2 +msgid "FedTrimmedAvg" msgstr "" -#: flwr.server.strategy.krum.Krum:17 of -msgid "" -"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in" -" that case classical Krum is applied." +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:3 of +msgid "Implemented based on: https://arxiv.org/abs/1803.01498" +msgstr "" + +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:25 of +msgid "Fraction to cut off of both tails of the distribution. Defaults to 0.2." msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`aggregate_fit `\\ " +":py:obj:`aggregate_fit " +"`\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.krum.Krum.aggregate_fit:1 of -msgid "Aggregate fit results using Krum." +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using trimmed average." msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`configure_fit `\\ " +":py:obj:`configure_fit " +"`\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`evaluate `\\ " +":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`initialize_parameters " -"`\\ " +"`\\ " "\\(client\\_manager\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`num_evaluation_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`num_fit_clients `\\ " +":py:obj:`num_fit_clients " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:2 -msgid "QFedAvg" +#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:2 +msgid "FedXgbBagging" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" ":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "Aggregate evaluation metrics using average." +msgstr "" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`aggregate_fit `\\ " +":py:obj:`aggregate_fit " +"`\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_fit:1 of +msgid "Aggregate fit results using bagging." +msgstr "" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" ":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`configure_fit `\\ " +":py:obj:`configure_fit " +"`\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`evaluate `\\ " +":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" ":py:obj:`initialize_parameters " -"`\\ " +"`\\ " "\\(client\\_manager\\)" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" ":py:obj:`num_evaluation_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" ":py:obj:`num_fit_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:2 -msgid "Strategy" +#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:2 +msgid "FedXgbCyclic" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 #: of msgid "" ":py:obj:`aggregate_evaluate " -"`\\ " +"`\\ " "\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of -msgid "Aggregate evaluation results." -msgstr "" - -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 #: of msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" - -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of -msgid "Aggregate training results." +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\," +" results\\, failures\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 #: of msgid "" ":py:obj:`configure_evaluate " -"`\\ " +"`\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 #: of msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 #: of msgid "" -":py:obj:`evaluate `\\ " +":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.evaluate:1 of -msgid "Evaluate the current model parameters." -msgstr "" - -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 #: of msgid "" ":py:obj:`initialize_parameters " -"`\\ " +"`\\ " "\\(client\\_manager\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:1 of -msgid "Initialize the (global) model parameters." +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:5 of +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"Successful updates from the previously selected and configured clients. " -"Each pair of `(ClientProxy, FitRes` constitutes a successful update from " -"one of the previously selected clients. Not that not all previously " -"selected clients are necessarily included in this list: a client might " -"drop out and not submit a result. For each client that did not submit an " -"update, there should be an `Exception` in `failures`." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:13 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:13 of -msgid "Exceptions that occurred while the server was waiting for client updates." +#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:2 +msgid "FedXgbNnAvg" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:16 of +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:5 of msgid "" -"**aggregation_result** -- The aggregated evaluation result. Aggregation " -"typically uses some variant of a weighted average." +"This strategy is deprecated, but a copy of it is available in Flower " +"Baselines: " +"https://github.com/adap/flower/tree/main/baselines/hfedxgboost." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:5 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Successful updates from the previously selected and configured clients. " -"Each pair of `(ClientProxy, FitRes)` constitutes a successful update from" -" one of the previously selected clients. Not that not all previously " -"selected clients are necessarily included in this list: a client might " -"drop out and not submit a result. For each client that did not submit an " -"update, there should be an `Exception` in `failures`." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:17 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**parameters** -- If parameters are returned, then the server will treat " -"these as the new global model parameters (i.e., it will replace the " -"previous parameters with the ones returned from this method). If `None` " -"is returned (e.g., because there were only failures and no viable " -"results) then the server will no update the previous model parameters, " -"the updates received in this round are discarded, and the global model " -"parameters remain the same." +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\, " +"results\\, failures\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.evaluate:3 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"This function can be used to perform centralized (i.e., server-side) " -"evaluation of model parameters." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.evaluate:11 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**evaluation_result** -- The evaluation result, usually a Tuple " -"containing loss and a dictionary containing task-specific metrics (e.g., " -"accuracy)." +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:6 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**parameters** -- If parameters are returned, then the server will treat " -"these as the initial global model parameters." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:2 -msgid "simulation" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:17::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`start_simulation `\\ \\(\\*\\," -" client\\_fn\\[\\, ...\\]\\)" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:17::1 -#: flwr.simulation.app.start_simulation:1 of -msgid "Start a Ray-based Flower simulation server." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 -msgid "start\\_simulation" +#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:2 +msgid "FedYogi" msgstr "" -#: flwr.simulation.app.start_simulation:3 of -msgid "" -"A function creating client instances. The function must take a single " -"`str` argument called `cid`. It should return a single client instance of" -" type Client. Note that the created client instances are ephemeral and " -"will often be destroyed after a single method invocation. Since client " -"instances are not long-lived, they should not attempt to carry state over" -" method invocations. Any state required by the instance (model, dataset, " -"hyperparameters, ...) should be (re-)created in either the call to " -"`client_fn` or the call to any of the client methods (e.g., load " -"evaluation data in the `evaluate` method itself)." +#: flwr.server.strategy.fedyogi.FedYogi:32 of +msgid "Server-side learning rate. Defaults to 1e-2." msgstr "" -#: flwr.simulation.app.start_simulation:13 of +#: flwr.server.strategy.fedyogi.FedYogi:34 of +msgid "Client-side learning rate. Defaults to 0.0316." +msgstr "" + +#: flwr.server.strategy.fedyogi.FedYogi:40 of +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-3." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The total number of clients in this simulation. This must be set if " -"`clients_ids` is not set and vice-versa." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -#: flwr.simulation.app.start_simulation:16 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"List `client_id`s for each client. This is only required if `num_clients`" -" is not set. Setting both `num_clients` and `clients_ids` with " -"`len(clients_ids)` not equal to `num_clients` generates an error." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.simulation.app.start_simulation:20 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"CPU and GPU resources for a single client. Supported keys are `num_cpus` " -"and `num_gpus`. To understand the GPU utilization caused by `num_gpus`, " -"as well as using custom resources, please consult the Ray documentation." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: flwr.simulation.app.start_simulation:25 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"An implementation of the abstract base class `flwr.server.Server`. If no " -"instance is provided, then `start_server` will create one." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.simulation.app.start_simulation:31 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"An implementation of the abstract base class `flwr.server.Strategy`. If " -"no strategy is provided, then `start_server` will use " -"`flwr.server.strategy.FedAvg`." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.simulation.app.start_simulation:35 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`." -" If no implementation is provided, then `start_simulation` will use " -"`flwr.server.client_manager.SimpleClientManager`." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.simulation.app.start_simulation:39 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args: { \"ignore_reinit_error\": True, " -"\"include_dashboard\": False } An empty dictionary can be used " -"(ray_init_args={}) to prevent any arguments from being passed to " -"ray.init." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.simulation.app.start_simulation:39 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args:" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.simulation.app.start_simulation:43 of -msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +#: ../../source/ref-api/flwr.server.strategy.Krum.rst:2 +msgid "Krum" msgstr "" -#: flwr.simulation.app.start_simulation:45 of -msgid "" -"An empty dictionary can be used (ray_init_args={}) to prevent any " -"arguments from being passed to ray.init." +#: flwr.server.strategy.krum.Krum:3 of +msgid "Implementation based on https://arxiv.org/abs/1703.02757" msgstr "" -#: flwr.simulation.app.start_simulation:48 of +#: flwr.server.strategy.krum.Krum:17 of msgid "" -"Set to True to prevent `ray.shutdown()` in case " -"`ray.is_initialized()=True`." +"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in" +" that case classical Krum is applied." msgstr "" -#: flwr.simulation.app.start_simulation:50 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Optionally specify the type of actor to use. The actor object, which " -"persists throughout the simulation, will be the process in charge of " -"running the clients' jobs (i.e. their `fit()` method)." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -#: flwr.simulation.app.start_simulation:54 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"If you want to create your own Actor classes, you might need to pass some" -" input argument. You can use this dictionary for such purpose." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.simulation.app.start_simulation:57 of -msgid "" -"(default: \"DEFAULT\") Optional string (\"DEFAULT\" or \"SPREAD\") for " -"the VCE to choose in which node the actor is placed. If you are an " -"advanced user needed more control you can use lower-level scheduling " -"strategies to pin actors to specific compute nodes (e.g. via " -"NodeAffinitySchedulingStrategy). Please note this is an advanced feature." -" For all details, please refer to the Ray documentation: " -"https://docs.ray.io/en/latest/ray-core/scheduling/index.html" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.krum.Krum.aggregate_fit:1 of +msgid "Aggregate fit results using Krum." msgstr "" -#: flwr.simulation.app.start_simulation:66 of -msgid "**hist** -- Object containing metrics from training." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:1 -msgid "Changelog" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:3 -msgid "Unreleased" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:17 -#: ../../source/ref-changelog.md:110 ../../source/ref-changelog.md:210 -#: ../../source/ref-changelog.md:294 ../../source/ref-changelog.md:358 -#: ../../source/ref-changelog.md:416 ../../source/ref-changelog.md:485 -#: ../../source/ref-changelog.md:614 ../../source/ref-changelog.md:656 -#: ../../source/ref-changelog.md:723 ../../source/ref-changelog.md:789 -#: ../../source/ref-changelog.md:834 ../../source/ref-changelog.md:873 -#: ../../source/ref-changelog.md:906 ../../source/ref-changelog.md:956 -msgid "What's new?" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:80 -#: ../../source/ref-changelog.md:192 ../../source/ref-changelog.md:282 -#: ../../source/ref-changelog.md:346 ../../source/ref-changelog.md:404 -#: ../../source/ref-changelog.md:473 ../../source/ref-changelog.md:535 -#: ../../source/ref-changelog.md:554 ../../source/ref-changelog.md:710 -#: ../../source/ref-changelog.md:781 ../../source/ref-changelog.md:818 -#: ../../source/ref-changelog.md:861 -msgid "Incompatible changes" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:9 -msgid "v1.7.0 (2024-02-05)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients `\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:11 ../../source/ref-changelog.md:104 -#: ../../source/ref-changelog.md:204 ../../source/ref-changelog.md:288 -#: ../../source/ref-changelog.md:352 ../../source/ref-changelog.md:410 -#: ../../source/ref-changelog.md:479 ../../source/ref-changelog.md:548 -msgid "Thanks to our contributors" +#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:2 +msgid "QFedAvg" msgstr "" -#: ../../source/ref-changelog.md:13 ../../source/ref-changelog.md:106 -#: ../../source/ref-changelog.md:206 ../../source/ref-changelog.md:290 -#: ../../source/ref-changelog.md:354 ../../source/ref-changelog.md:412 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"We would like to give our special thanks to all the contributors who made" -" the new version of Flower possible (in `git shortlog` order):" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:15 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " -"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " -"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " -"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " -"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " -"Shaaban`, `Yan Gao`, `Yasar Abbas` " +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:19 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce stateful clients (experimental)** " -"([#2770](https://github.com/adap/flower/pull/2770), " -"[#2686](https://github.com/adap/flower/pull/2686), " -"[#2696](https://github.com/adap/flower/pull/2696), " -"[#2643](https://github.com/adap/flower/pull/2643), " -"[#2769](https://github.com/adap/flower/pull/2769))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:21 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"Subclasses of `Client` and `NumPyClient` can now store local state that " -"remains on the client. Let's start with the highlight first: this new " -"feature is compatible with both simulated clients (via " -"`start_simulation`) and networked clients (via `start_client`). It's also" -" the first preview of new abstractions like `Context` and `RecordSet`. " -"Clients can access state of type `RecordSet` via `state: RecordSet = " -"self.context.state`. Changes to this `RecordSet` are preserved across " -"different rounds of execution to enable stateful computations in a " -"unified way across simulation and deployment." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:23 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"**Improve performance** " -"([#2293](https://github.com/adap/flower/pull/2293))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:25 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" -"place aggregation to reduce memory consumption. The Flower client " -"serialization/deserialization has been rewritten from the ground up, " -"which results in significant speedups, especially when the client-side " -"training time is short." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:27 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"**Support Federated Learning with Apple MLX and Flower** " -"([#2693](https://github.com/adap/flower/pull/2693))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:29 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"Flower has official support for federated learning using [Apple " -"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " -"example." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:31 -msgid "" -"**Introduce new XGBoost cyclic strategy** " -"([#2666](https://github.com/adap/flower/pull/2666), " -"[#2668](https://github.com/adap/flower/pull/2668))" +#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:2 +msgid "Strategy" msgstr "" -#: ../../source/ref-changelog.md:33 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"A new strategy called `FedXgbCyclic` supports a client-by-client style of" -" training (often called cyclic). The `xgboost-comprehensive` code example" -" shows how to use it in a full project. In addition to that, `xgboost-" -"comprehensive` now also supports simulation mode. With this, Flower " -"offers best-in-class XGBoost support." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:35 -msgid "" -"**Support Python 3.11** " -"([#2394](https://github.com/adap/flower/pull/2394))" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "Aggregate evaluation results." msgstr "" -#: ../../source/ref-changelog.md:37 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " -"ensure better support for users using more recent Python versions." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:39 -msgid "" -"**Update gRPC and ProtoBuf dependencies** " -"([#2814](https://github.com/adap/flower/pull/2814))" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of +msgid "Aggregate training results." msgstr "" -#: ../../source/ref-changelog.md:41 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"The `grpcio` and `protobuf` dependencies were updated to their latest " -"versions for improved security and performance." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:43 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce Docker image for Flower server** " -"([#2700](https://github.com/adap/flower/pull/2700), " -"[#2688](https://github.com/adap/flower/pull/2688), " -"[#2705](https://github.com/adap/flower/pull/2705), " -"[#2695](https://github.com/adap/flower/pull/2695), " -"[#2747](https://github.com/adap/flower/pull/2747), " -"[#2746](https://github.com/adap/flower/pull/2746), " -"[#2680](https://github.com/adap/flower/pull/2680), " -"[#2682](https://github.com/adap/flower/pull/2682), " -"[#2701](https://github.com/adap/flower/pull/2701))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:45 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"The Flower server can now be run using an official Docker image. A new " -"how-to guide explains [how to run Flower using " -"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" -"docker.html). An official Flower client Docker image will follow." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:47 -msgid "" -"**Introduce** `flower-via-docker-compose` **example** " -"([#2626](https://github.com/adap/flower/pull/2626))" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.evaluate:1 of +msgid "Evaluate the current model parameters." msgstr "" -#: ../../source/ref-changelog.md:49 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce** `quickstart-sklearn-tabular` **example** " -"([#2719](https://github.com/adap/flower/pull/2719))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:51 -msgid "" -"**Introduce** `custom-metrics` **example** " -"([#1958](https://github.com/adap/flower/pull/1958))" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:1 of +msgid "Initialize the (global) model parameters." msgstr "" -#: ../../source/ref-changelog.md:53 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:5 of msgid "" -"**Update code examples to use Flower Datasets** " -"([#2450](https://github.com/adap/flower/pull/2450), " -"[#2456](https://github.com/adap/flower/pull/2456), " -"[#2318](https://github.com/adap/flower/pull/2318), " -"[#2712](https://github.com/adap/flower/pull/2712))" +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes` constitutes a successful update from " +"one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." msgstr "" -#: ../../source/ref-changelog.md:55 -msgid "" -"Several code examples were updated to use [Flower " -"Datasets](https://flower.ai/docs/datasets/)." +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:13 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:13 of +msgid "Exceptions that occurred while the server was waiting for client updates." msgstr "" -#: ../../source/ref-changelog.md:57 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:16 of msgid "" -"**General updates to Flower Examples** " -"([#2381](https://github.com/adap/flower/pull/2381), " -"[#2805](https://github.com/adap/flower/pull/2805), " -"[#2782](https://github.com/adap/flower/pull/2782), " -"[#2806](https://github.com/adap/flower/pull/2806), " -"[#2829](https://github.com/adap/flower/pull/2829), " -"[#2825](https://github.com/adap/flower/pull/2825), " -"[#2816](https://github.com/adap/flower/pull/2816), " -"[#2726](https://github.com/adap/flower/pull/2726), " -"[#2659](https://github.com/adap/flower/pull/2659), " -"[#2655](https://github.com/adap/flower/pull/2655))" -msgstr "" - -#: ../../source/ref-changelog.md:59 -msgid "Many Flower code examples received substantial updates." +"**aggregation_result** -- The aggregated evaluation result. Aggregation " +"typically uses some variant of a weighted average." msgstr "" -#: ../../source/ref-changelog.md:61 ../../source/ref-changelog.md:154 -msgid "**Update Flower Baselines**" +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:5 of +msgid "" +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes)` constitutes a successful update from" +" one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." msgstr "" -#: ../../source/ref-changelog.md:63 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:17 of msgid "" -"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " -"[#2771](https://github.com/adap/flower/pull/2771))" +"**parameters** -- If parameters are returned, then the server will treat " +"these as the new global model parameters (i.e., it will replace the " +"previous parameters with the ones returned from this method). If `None` " +"is returned (e.g., because there were only failures and no viable " +"results) then the server will no update the previous model parameters, " +"the updates received in this round are discarded, and the global model " +"parameters remain the same." msgstr "" -#: ../../source/ref-changelog.md:64 -msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" +#: flwr.server.strategy.strategy.Strategy.evaluate:3 of +msgid "" +"This function can be used to perform centralized (i.e., server-side) " +"evaluation of model parameters." msgstr "" -#: ../../source/ref-changelog.md:65 -msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" +#: flwr.server.strategy.strategy.Strategy.evaluate:11 of +msgid "" +"**evaluation_result** -- The evaluation result, usually a Tuple " +"containing loss and a dictionary containing task-specific metrics (e.g., " +"accuracy)." msgstr "" -#: ../../source/ref-changelog.md:66 -msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:6 of +msgid "" +"**parameters** -- If parameters are returned, then the server will treat " +"these as the initial global model parameters." msgstr "" -#: ../../source/ref-changelog.md:67 -msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" +#: ../../source/ref-api/flwr.server.workflow.rst:2 +msgid "workflow" msgstr "" -#: ../../source/ref-changelog.md:68 -msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +msgid "" +":py:obj:`DefaultWorkflow `\\ " +"\\(\\[fit\\_workflow\\, ...\\]\\)" msgstr "" -#: ../../source/ref-changelog.md:70 -msgid "" -"**Improve documentation** " -"([#2674](https://github.com/adap/flower/pull/2674), " -"[#2480](https://github.com/adap/flower/pull/2480), " -"[#2826](https://github.com/adap/flower/pull/2826), " -"[#2727](https://github.com/adap/flower/pull/2727), " -"[#2761](https://github.com/adap/flower/pull/2761), " -"[#2900](https://github.com/adap/flower/pull/2900))" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 of +msgid "Default workflow in Flower." msgstr "" -#: ../../source/ref-changelog.md:72 +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 msgid "" -"**Improved testing and development infrastructure** " -"([#2797](https://github.com/adap/flower/pull/2797), " -"[#2676](https://github.com/adap/flower/pull/2676), " -"[#2644](https://github.com/adap/flower/pull/2644), " -"[#2656](https://github.com/adap/flower/pull/2656), " -"[#2848](https://github.com/adap/flower/pull/2848), " -"[#2675](https://github.com/adap/flower/pull/2675), " -"[#2735](https://github.com/adap/flower/pull/2735), " -"[#2767](https://github.com/adap/flower/pull/2767), " -"[#2732](https://github.com/adap/flower/pull/2732), " -"[#2744](https://github.com/adap/flower/pull/2744), " -"[#2681](https://github.com/adap/flower/pull/2681), " -"[#2699](https://github.com/adap/flower/pull/2699), " -"[#2745](https://github.com/adap/flower/pull/2745), " -"[#2734](https://github.com/adap/flower/pull/2734), " -"[#2731](https://github.com/adap/flower/pull/2731), " -"[#2652](https://github.com/adap/flower/pull/2652), " -"[#2720](https://github.com/adap/flower/pull/2720), " -"[#2721](https://github.com/adap/flower/pull/2721), " -"[#2717](https://github.com/adap/flower/pull/2717), " -"[#2864](https://github.com/adap/flower/pull/2864), " -"[#2694](https://github.com/adap/flower/pull/2694), " -"[#2709](https://github.com/adap/flower/pull/2709), " -"[#2658](https://github.com/adap/flower/pull/2658), " -"[#2796](https://github.com/adap/flower/pull/2796), " -"[#2692](https://github.com/adap/flower/pull/2692), " -"[#2657](https://github.com/adap/flower/pull/2657), " -"[#2813](https://github.com/adap/flower/pull/2813), " -"[#2661](https://github.com/adap/flower/pull/2661), " -"[#2398](https://github.com/adap/flower/pull/2398))" +":py:obj:`SecAggPlusWorkflow `\\ " +"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" msgstr "" -#: ../../source/ref-changelog.md:74 -msgid "" -"The Flower testing and development infrastructure has received " -"substantial updates. This makes Flower 1.7 the most tested release ever." +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of +msgid "The workflow for the SecAgg+ protocol." msgstr "" -#: ../../source/ref-changelog.md:76 +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 msgid "" -"**Update dependencies** " -"([#2753](https://github.com/adap/flower/pull/2753), " -"[#2651](https://github.com/adap/flower/pull/2651), " -"[#2739](https://github.com/adap/flower/pull/2739), " -"[#2837](https://github.com/adap/flower/pull/2837), " -"[#2788](https://github.com/adap/flower/pull/2788), " -"[#2811](https://github.com/adap/flower/pull/2811), " -"[#2774](https://github.com/adap/flower/pull/2774), " -"[#2790](https://github.com/adap/flower/pull/2790), " -"[#2751](https://github.com/adap/flower/pull/2751), " -"[#2850](https://github.com/adap/flower/pull/2850), " -"[#2812](https://github.com/adap/flower/pull/2812), " -"[#2872](https://github.com/adap/flower/pull/2872), " -"[#2736](https://github.com/adap/flower/pull/2736), " -"[#2756](https://github.com/adap/flower/pull/2756), " -"[#2857](https://github.com/adap/flower/pull/2857), " -"[#2757](https://github.com/adap/flower/pull/2757), " -"[#2810](https://github.com/adap/flower/pull/2810), " -"[#2740](https://github.com/adap/flower/pull/2740), " -"[#2789](https://github.com/adap/flower/pull/2789))" +":py:obj:`SecAggWorkflow `\\ " +"\\(reconstruction\\_threshold\\, \\*\\)" msgstr "" -#: ../../source/ref-changelog.md:78 -msgid "" -"**General improvements** " -"([#2803](https://github.com/adap/flower/pull/2803), " -"[#2847](https://github.com/adap/flower/pull/2847), " -"[#2877](https://github.com/adap/flower/pull/2877), " -"[#2690](https://github.com/adap/flower/pull/2690), " -"[#2889](https://github.com/adap/flower/pull/2889), " -"[#2874](https://github.com/adap/flower/pull/2874), " -"[#2819](https://github.com/adap/flower/pull/2819), " -"[#2689](https://github.com/adap/flower/pull/2689), " -"[#2457](https://github.com/adap/flower/pull/2457), " -"[#2870](https://github.com/adap/flower/pull/2870), " -"[#2669](https://github.com/adap/flower/pull/2669), " -"[#2876](https://github.com/adap/flower/pull/2876), " -"[#2885](https://github.com/adap/flower/pull/2885), " -"[#2858](https://github.com/adap/flower/pull/2858), " -"[#2867](https://github.com/adap/flower/pull/2867), " -"[#2351](https://github.com/adap/flower/pull/2351), " -"[#2886](https://github.com/adap/flower/pull/2886), " -"[#2860](https://github.com/adap/flower/pull/2860), " -"[#2828](https://github.com/adap/flower/pull/2828), " -"[#2869](https://github.com/adap/flower/pull/2869), " -"[#2875](https://github.com/adap/flower/pull/2875), " -"[#2733](https://github.com/adap/flower/pull/2733), " -"[#2488](https://github.com/adap/flower/pull/2488), " -"[#2646](https://github.com/adap/flower/pull/2646), " -"[#2879](https://github.com/adap/flower/pull/2879), " -"[#2821](https://github.com/adap/flower/pull/2821), " -"[#2855](https://github.com/adap/flower/pull/2855), " -"[#2800](https://github.com/adap/flower/pull/2800), " -"[#2807](https://github.com/adap/flower/pull/2807), " -"[#2801](https://github.com/adap/flower/pull/2801), " -"[#2804](https://github.com/adap/flower/pull/2804), " -"[#2851](https://github.com/adap/flower/pull/2851), " -"[#2787](https://github.com/adap/flower/pull/2787), " -"[#2852](https://github.com/adap/flower/pull/2852), " -"[#2672](https://github.com/adap/flower/pull/2672), " -"[#2759](https://github.com/adap/flower/pull/2759))" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +msgid "The workflow for the SecAgg protocol." msgstr "" -#: ../../source/ref-changelog.md:82 -msgid "" -"**Deprecate** `start_numpy_client` " -"([#2563](https://github.com/adap/flower/pull/2563), " -"[#2718](https://github.com/adap/flower/pull/2718))" +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:2 +msgid "DefaultWorkflow" msgstr "" -#: ../../source/ref-changelog.md:84 -msgid "" -"Until now, clients of type `NumPyClient` needed to be started via " -"`start_numpy_client`. In our efforts to consolidate framework APIs, we " -"have introduced changes, and now all client types should start via " -"`start_client`. To continue using `NumPyClient` clients, you simply need " -"to first call the `.to_client()` method and then pass returned `Client` " -"object to `start_client`. The examples and the documentation have been " -"updated accordingly." +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:2 +msgid "SecAggPlusWorkflow" msgstr "" -#: ../../source/ref-changelog.md:86 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:3 +#: of msgid "" -"**Deprecate legacy DP wrappers** " -"([#2749](https://github.com/adap/flower/pull/2749))" +"The SecAgg+ protocol ensures the secure summation of integer vectors " +"owned by multiple parties, without accessing any individual integer " +"vector. This workflow allows the server to compute the weighted average " +"of model parameters across all clients, ensuring individual contributions" +" remain private. This is achieved by clients sending both, a weighting " +"factor and a weighted version of the locally updated parameters, both of " +"which are masked for privacy. Specifically, each client uploads \"[w, w *" +" params]\" with masks, where weighting factor 'w' is the number of " +"examples ('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." msgstr "" -#: ../../source/ref-changelog.md:88 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:14 +#: of msgid "" -"Legacy DP wrapper classes are deprecated, but still functional. This is " -"in preparation for an all-new pluggable version of differential privacy " -"support in Flower." +"The protocol involves four main stages: - 'setup': Send SecAgg+ " +"configuration to clients and collect their public keys. - 'share keys': " +"Broadcast public keys among clients and collect encrypted secret" msgstr "" -#: ../../source/ref-changelog.md:90 -msgid "" -"**Make optional arg** `--callable` **in** `flower-client` **a required " -"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:17 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:17 +#: of +msgid "key shares." msgstr "" -#: ../../source/ref-changelog.md:92 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:18 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:18 +#: of msgid "" -"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " -"([#2890](https://github.com/adap/flower/pull/2890))" +"'collect masked vectors': Forward encrypted secret key shares to target " +"clients and collect masked model parameters." msgstr "" -#: ../../source/ref-changelog.md:94 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:20 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:20 +#: of msgid "" -"**Drop experimental** `Task` **fields** " -"([#2866](https://github.com/adap/flower/pull/2866), " -"[#2865](https://github.com/adap/flower/pull/2865))" +"'unmask': Collect secret key shares to decrypt and aggregate the model " +"parameters." msgstr "" -#: ../../source/ref-changelog.md:96 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:22 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:22 +#: of msgid "" -"Experimental fields `sa`, `legacy_server_message` and " -"`legacy_client_message` were removed from `Task` message. The removed " -"fields are superseded by the new `RecordSet` abstraction." +"Only the aggregated model parameters are exposed and passed to " +"`Strategy.aggregate_fit`, ensuring individual data privacy." msgstr "" -#: ../../source/ref-changelog.md:98 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:25 +#: of msgid "" -"**Retire MXNet examples** " -"([#2724](https://github.com/adap/flower/pull/2724))" +"The number of shares into which each client's private key is split under " +"the SecAgg+ protocol. If specified as a float, it represents the " +"proportion of all selected clients, and the number of shares will be set " +"dynamically in the run time. A private key can be reconstructed from " +"these shares, allowing for the secure aggregation of model updates. Each " +"client sends one share to each of its neighbors while retaining one." msgstr "" -#: ../../source/ref-changelog.md:100 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:25 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:32 +#: of msgid "" -"The development of the MXNet fremework has ended and the project is now " -"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " -"examples won't receive updates." +"The minimum number of shares required to reconstruct a client's private " +"key, or, if specified as a float, it represents the proportion of the " +"total number of shares needed for reconstruction. This threshold ensures " +"privacy by allowing for the recovery of contributions from dropped " +"clients during aggregation, without compromising individual client data." msgstr "" -#: ../../source/ref-changelog.md:102 -msgid "v1.6.0 (2023-11-28)" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:31 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:38 +#: of +msgid "" +"The maximum value of the weight that can be assigned to any single " +"client's update during the weighted average calculation on the server " +"side, e.g., in the FedAvg algorithm." msgstr "" -#: ../../source/ref-changelog.md:108 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:35 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:42 +#: of msgid "" -"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " -"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " -"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," -" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " -"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " -"`cnxdeveloper`, `k3nfalt` " +"The range within which model parameters are clipped before quantization. " +"This parameter ensures each model parameter is bounded within " +"[-clipping_range, clipping_range], facilitating quantization." msgstr "" -#: ../../source/ref-changelog.md:112 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:39 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:46 +#: of msgid "" -"**Add experimental support for Python 3.12** " -"([#2565](https://github.com/adap/flower/pull/2565))" +"The size of the range into which floating-point model parameters are " +"quantized, mapping each parameter to an integer in [0, " +"quantization_range-1]. This facilitates cryptographic operations on the " +"model updates." msgstr "" -#: ../../source/ref-changelog.md:114 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:43 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:50 +#: of msgid "" -"**Add new XGBoost examples** " -"([#2612](https://github.com/adap/flower/pull/2612), " -"[#2554](https://github.com/adap/flower/pull/2554), " -"[#2617](https://github.com/adap/flower/pull/2617), " -"[#2618](https://github.com/adap/flower/pull/2618), " -"[#2619](https://github.com/adap/flower/pull/2619), " -"[#2567](https://github.com/adap/flower/pull/2567))" +"The range of values from which random mask entries are uniformly sampled " +"([0, modulus_range-1]). `modulus_range` must be less than 4294967296. " +"Please use 2**n values for `modulus_range` to prevent overflow issues." msgstr "" -#: ../../source/ref-changelog.md:116 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:47 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:54 +#: of msgid "" -"We have added a new `xgboost-quickstart` example alongside a new " -"`xgboost-comprehensive` example that goes more in-depth." +"The timeout duration in seconds. If specified, the workflow will wait for" +" replies for this duration each time. If `None`, there is no time limit " +"and the workflow will wait until replies for all messages are received." msgstr "" -#: ../../source/ref-changelog.md:118 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:61 +#: of msgid "" -"**Add Vertical FL example** " -"([#2598](https://github.com/adap/flower/pull/2598))" +"Generally, higher `num_shares` means more robust to dropouts while " +"increasing the computational costs; higher `reconstruction_threshold` " +"means better privacy guarantees but less tolerance to dropouts." msgstr "" -#: ../../source/ref-changelog.md:120 -msgid "" -"We had many questions about Vertical Federated Learning using Flower, so " -"we decided to add an simple example for it on the [Titanic " -"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " -"tutorial (in the README)." +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:58 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:64 +#: of +msgid "Too large `max_weight` may compromise the precision of the quantization." msgstr "" -#: ../../source/ref-changelog.md:122 -msgid "" -"**Support custom** `ClientManager` **in** `start_driver()` " -"([#2292](https://github.com/adap/flower/pull/2292))" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:59 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:65 +#: of +msgid "`modulus_range` must be 2**n and larger than `quantization_range`." msgstr "" -#: ../../source/ref-changelog.md:124 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:66 +#: of msgid "" -"**Update REST API to support create and delete nodes** " -"([#2283](https://github.com/adap/flower/pull/2283))" +"When `num_shares` is a float, it is interpreted as the proportion of all " +"selected clients, and hence the number of shares will be determined in " +"the runtime. This allows for dynamic adjustment based on the total number" +" of participating clients." msgstr "" -#: ../../source/ref-changelog.md:126 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:69 +#: of msgid "" -"**Update the Android SDK** " -"([#2187](https://github.com/adap/flower/pull/2187))" +"Similarly, when `reconstruction_threshold` is a float, it is interpreted " +"as the proportion of the number of shares needed for the reconstruction " +"of a private key. This feature enables flexibility in setting the " +"security threshold relative to the number of distributed shares." msgstr "" -#: ../../source/ref-changelog.md:128 -msgid "Add gRPC request-response capability to the Android SDK." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:73 +#: of +msgid "" +"`num_shares`, `reconstruction_threshold`, and the quantization parameters" +" (`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg+" +" protocol." msgstr "" -#: ../../source/ref-changelog.md:130 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"**Update the C++ SDK** " -"([#2537](https://github.com/adap/flower/pull/2537), " -"[#2528](https://github.com/adap/flower/pull/2528), " -"[#2523](https://github.com/adap/flower/pull/2523), " -"[#2522](https://github.com/adap/flower/pull/2522))" +":py:obj:`collect_masked_vectors_stage " +"`\\" +" \\(driver\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:132 -msgid "Add gRPC request-response capability to the C++ SDK." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "Execute the 'collect masked vectors' stage." msgstr "" -#: ../../source/ref-changelog.md:134 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"**Make HTTPS the new default** " -"([#2591](https://github.com/adap/flower/pull/2591), " -"[#2636](https://github.com/adap/flower/pull/2636))" +":py:obj:`setup_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -#: ../../source/ref-changelog.md:136 -msgid "" -"Flower is moving to HTTPS by default. The new `flower-server` requires " -"passing `--certificates`, but users can enable `--insecure` to use HTTP " -"for prototyping. The same applies to `flower-client`, which can either " -"use user-provided credentials or gRPC-bundled certificates to connect to " -"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " -"enable insecure HTTP connections." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.setup_stage:1 +#: of +msgid "Execute the 'setup' stage." msgstr "" -#: ../../source/ref-changelog.md:138 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"For backward compatibility, `start_client()` and `start_numpy_client()` " -"will still start in insecure mode by default. In a future release, " -"insecure connections will require user opt-in by passing `insecure=True`." +":py:obj:`share_keys_stage " +"`\\ " +"\\(driver\\, context\\, state\\)" msgstr "" -#: ../../source/ref-changelog.md:140 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.share_keys_stage:1 +#: of +msgid "Execute the 'share keys' stage." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " -"[#2390](https://github.com/adap/flower/pull/2390), " -"[#2493](https://github.com/adap/flower/pull/2493))" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -#: ../../source/ref-changelog.md:142 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.unmask_stage:1 +#: of +msgid "Execute the 'unmask' stage." +msgstr "" + +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:2 +msgid "SecAggWorkflow" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of msgid "" -"Using the `client_fn`, Flower clients can interchangeably run as " -"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" -" `start_simulation`) without requiring changes to how the client class is" -" defined and instantiated. The `to_client()` function is introduced to " -"convert a `NumPyClient` to a `Client`." +"Bases: " +":py:class:`~flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow`" msgstr "" -#: ../../source/ref-changelog.md:144 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:3 of msgid "" -"**Add new** `Bulyan` **strategy** " -"([#1817](https://github.com/adap/flower/pull/1817), " -"[#1891](https://github.com/adap/flower/pull/1891))" +"The SecAgg protocol ensures the secure summation of integer vectors owned" +" by multiple parties, without accessing any individual integer vector. " +"This workflow allows the server to compute the weighted average of model " +"parameters across all clients, ensuring individual contributions remain " +"private. This is achieved by clients sending both, a weighting factor and" +" a weighted version of the locally updated parameters, both of which are " +"masked for privacy. Specifically, each client uploads \"[w, w * params]\"" +" with masks, where weighting factor 'w' is the number of examples " +"('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." msgstr "" -#: ../../source/ref-changelog.md:146 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:14 of msgid "" -"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " -"2018](https://arxiv.org/abs/1802.07927)" +"The protocol involves four main stages: - 'setup': Send SecAgg " +"configuration to clients and collect their public keys. - 'share keys': " +"Broadcast public keys among clients and collect encrypted secret" msgstr "" -#: ../../source/ref-changelog.md:148 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:54 of msgid "" -"**Add new** `XGB Bagging` **strategy** " -"([#2611](https://github.com/adap/flower/pull/2611))" +"Each client's private key is split into N shares under the SecAgg " +"protocol, where N is the number of selected clients." msgstr "" -#: ../../source/ref-changelog.md:150 ../../source/ref-changelog.md:152 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:56 of msgid "" -"**Introduce `WorkloadState`** " -"([#2564](https://github.com/adap/flower/pull/2564), " -"[#2632](https://github.com/adap/flower/pull/2632))" +"Generally, higher `reconstruction_threshold` means better privacy " +"guarantees but less tolerance to dropouts." msgstr "" -#: ../../source/ref-changelog.md:156 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:60 of msgid "" -"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " -"[#2286](https://github.com/adap/flower/pull/2286), " -"[#2509](https://github.com/adap/flower/pull/2509))" +"When `reconstruction_threshold` is a float, it is interpreted as the " +"proportion of the number of all selected clients needed for the " +"reconstruction of a private key. This feature enables flexibility in " +"setting the security threshold relative to the number of selected " +"clients." msgstr "" -#: ../../source/ref-changelog.md:158 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:64 of msgid "" -"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " -"[#2400](https://github.com/adap/flower/pull/2400))" +"`reconstruction_threshold`, and the quantization parameters " +"(`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg " +"protocol." msgstr "" -#: ../../source/ref-changelog.md:160 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " -"[#2507](https://github.com/adap/flower/pull/2507))" +":py:obj:`collect_masked_vectors_stage " +"`\\ " +"\\(driver\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:162 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " -"[#2508](https://github.com/adap/flower/pull/2508))" +":py:obj:`setup_stage `\\" +" \\(driver\\, context\\, state\\)" msgstr "" -#: ../../source/ref-changelog.md:164 -msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`share_keys_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -#: ../../source/ref-changelog.md:166 -msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -#: ../../source/ref-changelog.md:168 -msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" +#: ../../source/ref-api/flwr.simulation.rst:2 +msgid "simulation" msgstr "" -#: ../../source/ref-changelog.md:170 -msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" +#: ../../source/ref-api/flwr.simulation.rst:19::1 +msgid "" +":py:obj:`start_simulation `\\ \\(\\*\\," +" client\\_fn\\[\\, ...\\]\\)" msgstr "" -#: ../../source/ref-changelog.md:172 -msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" +#: ../../source/ref-api/flwr.simulation.rst:19::1 +#: flwr.simulation.app.start_simulation:1 of +msgid "Start a Ray-based Flower simulation server." msgstr "" -#: ../../source/ref-changelog.md:174 -msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" +#: ../../source/ref-api/flwr.simulation.rst:19::1 +msgid "" +":py:obj:`run_simulation_from_cli " +"`\\ \\(\\)" msgstr "" -#: ../../source/ref-changelog.md:176 -msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" +#: ../../source/ref-api/flwr.simulation.rst:19::1 +#: flwr.simulation.run_simulation.run_simulation_from_cli:1 of +msgid "Run Simulation Engine from the CLI." msgstr "" -#: ../../source/ref-changelog.md:178 +#: ../../source/ref-api/flwr.simulation.rst:19::1 msgid "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " -"[#2615](https://github.com/adap/flower/pull/2615))" +":py:obj:`run_simulation `\\ " +"\\(server\\_app\\, client\\_app\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:180 -msgid "" -"**General updates to Flower Examples** " -"([#2384](https://github.com/adap/flower/pull/2384), " -"[#2425](https://github.com/adap/flower/pull/2425), " -"[#2526](https://github.com/adap/flower/pull/2526), " -"[#2302](https://github.com/adap/flower/pull/2302), " -"[#2545](https://github.com/adap/flower/pull/2545))" +#: ../../source/ref-api/flwr.simulation.rst:19::1 +#: flwr.simulation.run_simulation.run_simulation:1 of +msgid "Run a Flower App using the Simulation Engine." msgstr "" -#: ../../source/ref-changelog.md:182 -msgid "" -"**General updates to Flower Baselines** " -"([#2301](https://github.com/adap/flower/pull/2301), " -"[#2305](https://github.com/adap/flower/pull/2305), " -"[#2307](https://github.com/adap/flower/pull/2307), " -"[#2327](https://github.com/adap/flower/pull/2327), " -"[#2435](https://github.com/adap/flower/pull/2435), " -"[#2462](https://github.com/adap/flower/pull/2462), " -"[#2463](https://github.com/adap/flower/pull/2463), " -"[#2461](https://github.com/adap/flower/pull/2461), " -"[#2469](https://github.com/adap/flower/pull/2469), " -"[#2466](https://github.com/adap/flower/pull/2466), " -"[#2471](https://github.com/adap/flower/pull/2471), " -"[#2472](https://github.com/adap/flower/pull/2472), " -"[#2470](https://github.com/adap/flower/pull/2470))" +#: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 +msgid "run\\_simulation" msgstr "" -#: ../../source/ref-changelog.md:184 +#: flwr.simulation.run_simulation.run_simulation:3 of msgid "" -"**General updates to the simulation engine** " -"([#2331](https://github.com/adap/flower/pull/2331), " -"[#2447](https://github.com/adap/flower/pull/2447), " -"[#2448](https://github.com/adap/flower/pull/2448), " -"[#2294](https://github.com/adap/flower/pull/2294))" +"The `ServerApp` to be executed. It will send messages to different " +"`ClientApp` instances running on different (virtual) SuperNodes." msgstr "" -#: ../../source/ref-changelog.md:186 +#: flwr.simulation.run_simulation.run_simulation:6 of msgid "" -"**General updates to Flower SDKs** " -"([#2288](https://github.com/adap/flower/pull/2288), " -"[#2429](https://github.com/adap/flower/pull/2429), " -"[#2555](https://github.com/adap/flower/pull/2555), " -"[#2543](https://github.com/adap/flower/pull/2543), " -"[#2544](https://github.com/adap/flower/pull/2544), " -"[#2597](https://github.com/adap/flower/pull/2597), " -"[#2623](https://github.com/adap/flower/pull/2623))" +"The `ClientApp` to be executed by each of the SuperNodes. It will receive" +" messages sent by the `ServerApp`." msgstr "" -#: ../../source/ref-changelog.md:188 +#: flwr.simulation.run_simulation.run_simulation:9 of msgid "" -"**General improvements** " -"([#2309](https://github.com/adap/flower/pull/2309), " -"[#2310](https://github.com/adap/flower/pull/2310), " -"[#2313](https://github.com/adap/flower/pull/2313), " -"[#2316](https://github.com/adap/flower/pull/2316), " -"[#2317](https://github.com/adap/flower/pull/2317), " -"[#2349](https://github.com/adap/flower/pull/2349), " -"[#2360](https://github.com/adap/flower/pull/2360), " -"[#2402](https://github.com/adap/flower/pull/2402), " -"[#2446](https://github.com/adap/flower/pull/2446), " -"[#2561](https://github.com/adap/flower/pull/2561), " -"[#2273](https://github.com/adap/flower/pull/2273), " -"[#2267](https://github.com/adap/flower/pull/2267), " -"[#2274](https://github.com/adap/flower/pull/2274), " -"[#2275](https://github.com/adap/flower/pull/2275), " -"[#2432](https://github.com/adap/flower/pull/2432), " -"[#2251](https://github.com/adap/flower/pull/2251), " -"[#2321](https://github.com/adap/flower/pull/2321), " -"[#1936](https://github.com/adap/flower/pull/1936), " -"[#2408](https://github.com/adap/flower/pull/2408), " -"[#2413](https://github.com/adap/flower/pull/2413), " -"[#2401](https://github.com/adap/flower/pull/2401), " -"[#2531](https://github.com/adap/flower/pull/2531), " -"[#2534](https://github.com/adap/flower/pull/2534), " -"[#2535](https://github.com/adap/flower/pull/2535), " -"[#2521](https://github.com/adap/flower/pull/2521), " -"[#2553](https://github.com/adap/flower/pull/2553), " -"[#2596](https://github.com/adap/flower/pull/2596))" +"Number of nodes that run a ClientApp. They can be sampled by a Driver in " +"the ServerApp and receive a Message describing what the ClientApp should " +"perform." msgstr "" -#: ../../source/ref-changelog.md:190 ../../source/ref-changelog.md:280 -#: ../../source/ref-changelog.md:344 ../../source/ref-changelog.md:398 -#: ../../source/ref-changelog.md:465 -msgid "Flower received many improvements under the hood, too many to list here." +#: flwr.simulation.run_simulation.run_simulation:13 of +msgid "A simulation backend that runs `ClientApp`s." msgstr "" -#: ../../source/ref-changelog.md:194 +#: flwr.simulation.run_simulation.run_simulation:15 of msgid "" -"**Remove support for Python 3.7** " -"([#2280](https://github.com/adap/flower/pull/2280), " -"[#2299](https://github.com/adap/flower/pull/2299), " -"[#2304](https://github.com/adap/flower/pull/2304), " -"[#2306](https://github.com/adap/flower/pull/2306), " -"[#2355](https://github.com/adap/flower/pull/2355), " -"[#2356](https://github.com/adap/flower/pull/2356))" +"'A dictionary, e.g {\"\": , \"\": } to " +"configure a backend. Values supported in are those included by " +"`flwr.common.typing.ConfigsRecordValues`." msgstr "" -#: ../../source/ref-changelog.md:196 +#: flwr.simulation.run_simulation.run_simulation:19 of msgid "" -"Python 3.7 support was deprecated in Flower 1.5, and this release removes" -" support. Flower now requires Python 3.8." +"A boolean to indicate whether to enable GPU growth on the main thread. " +"This is desirable if you make use of a TensorFlow model on your " +"`ServerApp` while having your `ClientApp` running on the same GPU. " +"Without enabling this, you might encounter an out-of-memory error because" +" TensorFlow, by default, allocates all GPU memory. Read more about how " +"`tf.config.experimental.set_memory_growth()` works in the TensorFlow " +"documentation: https://www.tensorflow.org/api/stable." msgstr "" -#: ../../source/ref-changelog.md:198 +#: flwr.simulation.run_simulation.run_simulation:26 of msgid "" -"**Remove experimental argument** `rest` **from** `start_client` " -"([#2324](https://github.com/adap/flower/pull/2324))" +"When diabled, only INFO, WARNING and ERROR log messages will be shown. If" +" enabled, DEBUG-level logs will be displayed." msgstr "" -#: ../../source/ref-changelog.md:200 +#: ../../source/ref-api/flwr.simulation.run_simulation_from_cli.rst:2 +msgid "run\\_simulation\\_from\\_cli" +msgstr "" + +#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 +msgid "start\\_simulation" +msgstr "" + +#: flwr.simulation.app.start_simulation:3 of msgid "" -"The (still experimental) argument `rest` was removed from `start_client` " -"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " -"experimental REST API instead." +"A function creating client instances. The function must take a single " +"`str` argument called `cid`. It should return a single client instance of" +" type Client. Note that the created client instances are ephemeral and " +"will often be destroyed after a single method invocation. Since client " +"instances are not long-lived, they should not attempt to carry state over" +" method invocations. Any state required by the instance (model, dataset, " +"hyperparameters, ...) should be (re-)created in either the call to " +"`client_fn` or the call to any of the client methods (e.g., load " +"evaluation data in the `evaluate` method itself)." msgstr "" -#: ../../source/ref-changelog.md:202 -msgid "v1.5.0 (2023-08-31)" +#: flwr.simulation.app.start_simulation:13 of +msgid "" +"The total number of clients in this simulation. This must be set if " +"`clients_ids` is not set and vice-versa." msgstr "" -#: ../../source/ref-changelog.md:208 +#: flwr.simulation.app.start_simulation:16 of msgid "" -"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " -"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " -"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " -"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " +"List `client_id`s for each client. This is only required if `num_clients`" +" is not set. Setting both `num_clients` and `clients_ids` with " +"`len(clients_ids)` not equal to `num_clients` generates an error." msgstr "" -#: ../../source/ref-changelog.md:212 +#: flwr.simulation.app.start_simulation:20 of msgid "" -"**Introduce new simulation engine** " -"([#1969](https://github.com/adap/flower/pull/1969), " -"[#2221](https://github.com/adap/flower/pull/2221), " -"[#2248](https://github.com/adap/flower/pull/2248))" +"CPU and GPU resources for a single client. Supported keys are `num_cpus` " +"and `num_gpus`. To understand the GPU utilization caused by `num_gpus`, " +"as well as using custom resources, please consult the Ray documentation." msgstr "" -#: ../../source/ref-changelog.md:214 +#: flwr.simulation.app.start_simulation:25 of msgid "" -"The new simulation engine has been rewritten from the ground up, yet it " -"remains fully backwards compatible. It offers much improved stability and" -" memory handling, especially when working with GPUs. Simulations " -"transparently adapt to different settings to scale simulation in CPU-" -"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." +"An implementation of the abstract base class `flwr.server.Server`. If no " +"instance is provided, then `start_server` will create one." msgstr "" -#: ../../source/ref-changelog.md:216 +#: flwr.simulation.app.start_simulation:31 of msgid "" -"Comprehensive documentation includes a new [how-to run " -"simulations](https://flower.ai/docs/framework/how-to-run-" -"simulations.html) guide, new [simulation-" -"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " -"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" -"tensorflow.html) notebooks, and a new [YouTube tutorial " -"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." +"An implementation of the abstract base class `flwr.server.Strategy`. If " +"no strategy is provided, then `start_server` will use " +"`flwr.server.strategy.FedAvg`." msgstr "" -#: ../../source/ref-changelog.md:218 +#: flwr.simulation.app.start_simulation:35 of msgid "" -"**Restructure Flower Docs** " -"([#1824](https://github.com/adap/flower/pull/1824), " -"[#1865](https://github.com/adap/flower/pull/1865), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1887](https://github.com/adap/flower/pull/1887), " -"[#1919](https://github.com/adap/flower/pull/1919), " -"[#1922](https://github.com/adap/flower/pull/1922), " -"[#1920](https://github.com/adap/flower/pull/1920), " -"[#1923](https://github.com/adap/flower/pull/1923), " -"[#1924](https://github.com/adap/flower/pull/1924), " -"[#1962](https://github.com/adap/flower/pull/1962), " -"[#2006](https://github.com/adap/flower/pull/2006), " -"[#2133](https://github.com/adap/flower/pull/2133), " -"[#2203](https://github.com/adap/flower/pull/2203), " -"[#2215](https://github.com/adap/flower/pull/2215), " -"[#2122](https://github.com/adap/flower/pull/2122), " -"[#2223](https://github.com/adap/flower/pull/2223), " -"[#2219](https://github.com/adap/flower/pull/2219), " -"[#2232](https://github.com/adap/flower/pull/2232), " -"[#2233](https://github.com/adap/flower/pull/2233), " -"[#2234](https://github.com/adap/flower/pull/2234), " -"[#2235](https://github.com/adap/flower/pull/2235), " -"[#2237](https://github.com/adap/flower/pull/2237), " -"[#2238](https://github.com/adap/flower/pull/2238), " -"[#2242](https://github.com/adap/flower/pull/2242), " -"[#2231](https://github.com/adap/flower/pull/2231), " -"[#2243](https://github.com/adap/flower/pull/2243), " -"[#2227](https://github.com/adap/flower/pull/2227))" +"An implementation of the abstract base class `flwr.server.ClientManager`." +" If no implementation is provided, then `start_simulation` will use " +"`flwr.server.client_manager.SimpleClientManager`." msgstr "" -#: ../../source/ref-changelog.md:220 +#: flwr.simulation.app.start_simulation:39 of msgid "" -"Much effort went into a completely restructured Flower docs experience. " -"The documentation on [flower.ai/docs](flower.ai/docs) is now divided " -"into Flower Framework, Flower Baselines, Flower Android SDK, Flower iOS " -"SDK, and code example projects." +"Optional dictionary containing arguments for the call to `ray.init`. If " +"ray_init_args is None (the default), Ray will be initialized with the " +"following default args: { \"ignore_reinit_error\": True, " +"\"include_dashboard\": False } An empty dictionary can be used " +"(ray_init_args={}) to prevent any arguments from being passed to " +"ray.init." msgstr "" -#: ../../source/ref-changelog.md:222 +#: flwr.simulation.app.start_simulation:39 of msgid "" -"**Introduce Flower Swift SDK** " -"([#1858](https://github.com/adap/flower/pull/1858), " -"[#1897](https://github.com/adap/flower/pull/1897))" +"Optional dictionary containing arguments for the call to `ray.init`. If " +"ray_init_args is None (the default), Ray will be initialized with the " +"following default args:" msgstr "" -#: ../../source/ref-changelog.md:224 +#: flwr.simulation.app.start_simulation:43 of +msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +msgstr "" + +#: flwr.simulation.app.start_simulation:45 of msgid "" -"This is the first preview release of the Flower Swift SDK. Flower support" -" on iOS is improving, and alongside the Swift SDK and code example, there" -" is now also an iOS quickstart tutorial." +"An empty dictionary can be used (ray_init_args={}) to prevent any " +"arguments from being passed to ray.init." msgstr "" -#: ../../source/ref-changelog.md:226 +#: flwr.simulation.app.start_simulation:48 of msgid "" -"**Introduce Flower Android SDK** " -"([#2131](https://github.com/adap/flower/pull/2131))" +"Set to True to prevent `ray.shutdown()` in case " +"`ray.is_initialized()=True`." msgstr "" -#: ../../source/ref-changelog.md:228 +#: flwr.simulation.app.start_simulation:50 of msgid "" -"This is the first preview release of the Flower Kotlin SDK. Flower " -"support on Android is improving, and alongside the Kotlin SDK and code " -"example, there is now also an Android quickstart tutorial." +"Optionally specify the type of actor to use. The actor object, which " +"persists throughout the simulation, will be the process in charge of " +"executing a ClientApp wrapping input argument `client_fn`." msgstr "" -#: ../../source/ref-changelog.md:230 +#: flwr.simulation.app.start_simulation:54 of msgid "" -"**Introduce new end-to-end testing infrastructure** " -"([#1842](https://github.com/adap/flower/pull/1842), " -"[#2071](https://github.com/adap/flower/pull/2071), " -"[#2072](https://github.com/adap/flower/pull/2072), " -"[#2068](https://github.com/adap/flower/pull/2068), " -"[#2067](https://github.com/adap/flower/pull/2067), " -"[#2069](https://github.com/adap/flower/pull/2069), " -"[#2073](https://github.com/adap/flower/pull/2073), " -"[#2070](https://github.com/adap/flower/pull/2070), " -"[#2074](https://github.com/adap/flower/pull/2074), " -"[#2082](https://github.com/adap/flower/pull/2082), " -"[#2084](https://github.com/adap/flower/pull/2084), " -"[#2093](https://github.com/adap/flower/pull/2093), " -"[#2109](https://github.com/adap/flower/pull/2109), " -"[#2095](https://github.com/adap/flower/pull/2095), " -"[#2140](https://github.com/adap/flower/pull/2140), " -"[#2137](https://github.com/adap/flower/pull/2137), " -"[#2165](https://github.com/adap/flower/pull/2165))" +"If you want to create your own Actor classes, you might need to pass some" +" input argument. You can use this dictionary for such purpose." msgstr "" -#: ../../source/ref-changelog.md:232 +#: flwr.simulation.app.start_simulation:57 of msgid "" -"A new testing infrastructure ensures that new changes stay compatible " -"with existing framework integrations or strategies." +"(default: \"DEFAULT\") Optional string (\"DEFAULT\" or \"SPREAD\") for " +"the VCE to choose in which node the actor is placed. If you are an " +"advanced user needed more control you can use lower-level scheduling " +"strategies to pin actors to specific compute nodes (e.g. via " +"NodeAffinitySchedulingStrategy). Please note this is an advanced feature." +" For all details, please refer to the Ray documentation: " +"https://docs.ray.io/en/latest/ray-core/scheduling/index.html" msgstr "" -#: ../../source/ref-changelog.md:234 -msgid "**Deprecate Python 3.7**" +#: flwr.simulation.app.start_simulation:66 of +msgid "**hist** -- Object containing metrics from training." msgstr "" -#: ../../source/ref-changelog.md:236 -msgid "" -"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" -" Python 3.7 is now deprecated and will be removed in an upcoming release." +#: ../../source/ref-changelog.md:1 +msgid "Changelog" msgstr "" -#: ../../source/ref-changelog.md:238 -msgid "" -"**Add new** `FedTrimmedAvg` **strategy** " -"([#1769](https://github.com/adap/flower/pull/1769), " -"[#1853](https://github.com/adap/flower/pull/1853))" +#: ../../source/ref-changelog.md:3 +msgid "Unreleased" msgstr "" -#: ../../source/ref-changelog.md:240 -msgid "" -"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " -"2018](https://arxiv.org/abs/1803.01498)." +#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:17 +#: ../../source/ref-changelog.md:110 ../../source/ref-changelog.md:210 +#: ../../source/ref-changelog.md:294 ../../source/ref-changelog.md:358 +#: ../../source/ref-changelog.md:416 ../../source/ref-changelog.md:485 +#: ../../source/ref-changelog.md:614 ../../source/ref-changelog.md:656 +#: ../../source/ref-changelog.md:723 ../../source/ref-changelog.md:789 +#: ../../source/ref-changelog.md:834 ../../source/ref-changelog.md:873 +#: ../../source/ref-changelog.md:906 ../../source/ref-changelog.md:956 +msgid "What's new?" msgstr "" -#: ../../source/ref-changelog.md:242 -msgid "" -"**Introduce start_driver** " -"([#1697](https://github.com/adap/flower/pull/1697))" +#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:80 +#: ../../source/ref-changelog.md:192 ../../source/ref-changelog.md:282 +#: ../../source/ref-changelog.md:346 ../../source/ref-changelog.md:404 +#: ../../source/ref-changelog.md:473 ../../source/ref-changelog.md:535 +#: ../../source/ref-changelog.md:554 ../../source/ref-changelog.md:710 +#: ../../source/ref-changelog.md:781 ../../source/ref-changelog.md:818 +#: ../../source/ref-changelog.md:861 +msgid "Incompatible changes" msgstr "" -#: ../../source/ref-changelog.md:244 -msgid "" -"In addition to `start_server` and using the raw Driver API, there is a " -"new `start_driver` function that allows for running `start_server` " -"scripts as a Flower driver with only a single-line code change. Check out" -" the `mt-pytorch` code example to see a working example using " -"`start_driver`." +#: ../../source/ref-changelog.md:9 +msgid "v1.7.0 (2024-02-05)" msgstr "" -#: ../../source/ref-changelog.md:246 -msgid "" -"**Add parameter aggregation to** `mt-pytorch` **code example** " -"([#1785](https://github.com/adap/flower/pull/1785))" +#: ../../source/ref-changelog.md:11 ../../source/ref-changelog.md:104 +#: ../../source/ref-changelog.md:204 ../../source/ref-changelog.md:288 +#: ../../source/ref-changelog.md:352 ../../source/ref-changelog.md:410 +#: ../../source/ref-changelog.md:479 ../../source/ref-changelog.md:548 +msgid "Thanks to our contributors" msgstr "" -#: ../../source/ref-changelog.md:248 +#: ../../source/ref-changelog.md:13 ../../source/ref-changelog.md:106 +#: ../../source/ref-changelog.md:206 ../../source/ref-changelog.md:290 +#: ../../source/ref-changelog.md:354 ../../source/ref-changelog.md:412 msgid "" -"The `mt-pytorch` example shows how to aggregate parameters when writing a" -" driver script. The included `driver.py` and `server.py` have been " -"aligned to demonstrate both the low-level way and the high-level way of " -"building server-side logic." +"We would like to give our special thanks to all the contributors who made" +" the new version of Flower possible (in `git shortlog` order):" msgstr "" -#: ../../source/ref-changelog.md:250 +#: ../../source/ref-changelog.md:15 msgid "" -"**Migrate experimental REST API to Starlette** " -"([2171](https://github.com/adap/flower/pull/2171))" +"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " +"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " +"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " +"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " +"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " +"Shaaban`, `Yan Gao`, `Yasar Abbas` " msgstr "" -#: ../../source/ref-changelog.md:252 +#: ../../source/ref-changelog.md:19 msgid "" -"The (experimental) REST API used to be implemented in " -"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" -" use [Starlette](https://www.starlette.io/) directly." +"**Introduce stateful clients (experimental)** " +"([#2770](https://github.com/adap/flower/pull/2770), " +"[#2686](https://github.com/adap/flower/pull/2686), " +"[#2696](https://github.com/adap/flower/pull/2696), " +"[#2643](https://github.com/adap/flower/pull/2643), " +"[#2769](https://github.com/adap/flower/pull/2769))" msgstr "" -#: ../../source/ref-changelog.md:254 +#: ../../source/ref-changelog.md:21 msgid "" -"Please note: The REST request-response API is still experimental and will" -" likely change significantly over time." +"Subclasses of `Client` and `NumPyClient` can now store local state that " +"remains on the client. Let's start with the highlight first: this new " +"feature is compatible with both simulated clients (via " +"`start_simulation`) and networked clients (via `start_client`). It's also" +" the first preview of new abstractions like `Context` and `RecordSet`. " +"Clients can access state of type `RecordSet` via `state: RecordSet = " +"self.context.state`. Changes to this `RecordSet` are preserved across " +"different rounds of execution to enable stateful computations in a " +"unified way across simulation and deployment." msgstr "" -#: ../../source/ref-changelog.md:256 +#: ../../source/ref-changelog.md:23 msgid "" -"**Introduce experimental gRPC request-response API** " -"([#1867](https://github.com/adap/flower/pull/1867), " -"[#1901](https://github.com/adap/flower/pull/1901))" +"**Improve performance** " +"([#2293](https://github.com/adap/flower/pull/2293))" msgstr "" -#: ../../source/ref-changelog.md:258 +#: ../../source/ref-changelog.md:25 msgid "" -"In addition to the existing gRPC API (based on bidirectional streaming) " -"and the experimental REST API, there is now a new gRPC API that uses a " -"request-response model to communicate with client nodes." +"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" +"place aggregation to reduce memory consumption. The Flower client " +"serialization/deserialization has been rewritten from the ground up, " +"which results in significant speedups, especially when the client-side " +"training time is short." msgstr "" -#: ../../source/ref-changelog.md:260 +#: ../../source/ref-changelog.md:27 msgid "" -"Please note: The gRPC request-response API is still experimental and will" -" likely change significantly over time." +"**Support Federated Learning with Apple MLX and Flower** " +"([#2693](https://github.com/adap/flower/pull/2693))" msgstr "" -#: ../../source/ref-changelog.md:262 +#: ../../source/ref-changelog.md:29 msgid "" -"**Replace the experimental** `start_client(rest=True)` **with the new** " -"`start_client(transport=\"rest\")` " -"([#1880](https://github.com/adap/flower/pull/1880))" +"Flower has official support for federated learning using [Apple " +"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " +"example." msgstr "" -#: ../../source/ref-changelog.md:264 +#: ../../source/ref-changelog.md:31 msgid "" -"The (experimental) `start_client` argument `rest` was deprecated in " -"favour of a new argument `transport`. `start_client(transport=\"rest\")` " -"will yield the same behaviour as `start_client(rest=True)` did before. " -"All code should migrate to the new argument `transport`. The deprecated " -"argument `rest` will be removed in a future release." +"**Introduce new XGBoost cyclic strategy** " +"([#2666](https://github.com/adap/flower/pull/2666), " +"[#2668](https://github.com/adap/flower/pull/2668))" msgstr "" -#: ../../source/ref-changelog.md:266 +#: ../../source/ref-changelog.md:33 msgid "" -"**Add a new gRPC option** " -"([#2197](https://github.com/adap/flower/pull/2197))" +"A new strategy called `FedXgbCyclic` supports a client-by-client style of" +" training (often called cyclic). The `xgboost-comprehensive` code example" +" shows how to use it in a full project. In addition to that, `xgboost-" +"comprehensive` now also supports simulation mode. With this, Flower " +"offers best-in-class XGBoost support." msgstr "" -#: ../../source/ref-changelog.md:268 +#: ../../source/ref-changelog.md:35 msgid "" -"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" -" option set to 0 by default. This prevents the clients from sending " -"keepalive pings when there is no outstanding stream." +"**Support Python 3.11** " +"([#2394](https://github.com/adap/flower/pull/2394))" msgstr "" -#: ../../source/ref-changelog.md:270 +#: ../../source/ref-changelog.md:37 msgid "" -"**Improve example notebooks** " -"([#2005](https://github.com/adap/flower/pull/2005))" +"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " +"ensure better support for users using more recent Python versions." msgstr "" -#: ../../source/ref-changelog.md:272 -msgid "There's a new 30min Federated Learning PyTorch tutorial!" +#: ../../source/ref-changelog.md:39 +msgid "" +"**Update gRPC and ProtoBuf dependencies** " +"([#2814](https://github.com/adap/flower/pull/2814))" msgstr "" -#: ../../source/ref-changelog.md:274 +#: ../../source/ref-changelog.md:41 msgid "" -"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " -"[#1873](https://github.com/adap/flower/pull/1873), " -"[#1981](https://github.com/adap/flower/pull/1981), " -"[#1988](https://github.com/adap/flower/pull/1988), " -"[#1984](https://github.com/adap/flower/pull/1984), " -"[#1982](https://github.com/adap/flower/pull/1982), " -"[#2112](https://github.com/adap/flower/pull/2112), " -"[#2144](https://github.com/adap/flower/pull/2144), " -"[#2174](https://github.com/adap/flower/pull/2174), " -"[#2225](https://github.com/adap/flower/pull/2225), " -"[#2183](https://github.com/adap/flower/pull/2183))" +"The `grpcio` and `protobuf` dependencies were updated to their latest " +"versions for improved security and performance." msgstr "" -#: ../../source/ref-changelog.md:276 +#: ../../source/ref-changelog.md:43 msgid "" -"Many examples have received significant updates, including simplified " -"advanced-tensorflow and advanced-pytorch examples, improved macOS " -"compatibility of TensorFlow examples, and code examples for simulation. A" -" major upgrade is that all code examples now have a `requirements.txt` " -"(in addition to `pyproject.toml`)." +"**Introduce Docker image for Flower server** " +"([#2700](https://github.com/adap/flower/pull/2700), " +"[#2688](https://github.com/adap/flower/pull/2688), " +"[#2705](https://github.com/adap/flower/pull/2705), " +"[#2695](https://github.com/adap/flower/pull/2695), " +"[#2747](https://github.com/adap/flower/pull/2747), " +"[#2746](https://github.com/adap/flower/pull/2746), " +"[#2680](https://github.com/adap/flower/pull/2680), " +"[#2682](https://github.com/adap/flower/pull/2682), " +"[#2701](https://github.com/adap/flower/pull/2701))" msgstr "" -#: ../../source/ref-changelog.md:278 +#: ../../source/ref-changelog.md:45 msgid "" -"**General improvements** " -"([#1872](https://github.com/adap/flower/pull/1872), " -"[#1866](https://github.com/adap/flower/pull/1866), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1837](https://github.com/adap/flower/pull/1837), " -"[#1477](https://github.com/adap/flower/pull/1477), " -"[#2171](https://github.com/adap/flower/pull/2171))" +"The Flower server can now be run using an official Docker image. A new " +"how-to guide explains [how to run Flower using " +"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html). An official Flower client Docker image will follow." msgstr "" -#: ../../source/ref-changelog.md:284 ../../source/ref-changelog.md:348 -#: ../../source/ref-changelog.md:406 ../../source/ref-changelog.md:475 -#: ../../source/ref-changelog.md:537 -msgid "None" +#: ../../source/ref-changelog.md:47 +msgid "" +"**Introduce** `flower-via-docker-compose` **example** " +"([#2626](https://github.com/adap/flower/pull/2626))" msgstr "" -#: ../../source/ref-changelog.md:286 -msgid "v1.4.0 (2023-04-21)" +#: ../../source/ref-changelog.md:49 +msgid "" +"**Introduce** `quickstart-sklearn-tabular` **example** " +"([#2719](https://github.com/adap/flower/pull/2719))" msgstr "" -#: ../../source/ref-changelog.md:292 +#: ../../source/ref-changelog.md:51 msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " -"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " -"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " -"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " -"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" +"**Introduce** `custom-metrics` **example** " +"([#1958](https://github.com/adap/flower/pull/1958))" msgstr "" -#: ../../source/ref-changelog.md:296 +#: ../../source/ref-changelog.md:53 msgid "" -"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " -"example)** ([#1694](https://github.com/adap/flower/pull/1694), " -"[#1709](https://github.com/adap/flower/pull/1709), " -"[#1715](https://github.com/adap/flower/pull/1715), " -"[#1717](https://github.com/adap/flower/pull/1717), " -"[#1763](https://github.com/adap/flower/pull/1763), " -"[#1795](https://github.com/adap/flower/pull/1795))" +"**Update code examples to use Flower Datasets** " +"([#2450](https://github.com/adap/flower/pull/2450), " +"[#2456](https://github.com/adap/flower/pull/2456), " +"[#2318](https://github.com/adap/flower/pull/2318), " +"[#2712](https://github.com/adap/flower/pull/2712))" msgstr "" -#: ../../source/ref-changelog.md:298 +#: ../../source/ref-changelog.md:55 msgid "" -"XGBoost is a tree-based ensemble machine learning algorithm that uses " -"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" -" " -"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," -" and a [code " -"example](https://github.com/adap/flower/tree/main/examples/xgboost-quickstart)" -" that demonstrates the usage of this new strategy in an XGBoost project." +"Several code examples were updated to use [Flower " +"Datasets](https://flower.ai/docs/datasets/)." msgstr "" -#: ../../source/ref-changelog.md:300 +#: ../../source/ref-changelog.md:57 msgid "" -"**Introduce iOS SDK (preview)** " -"([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" +"**General updates to Flower Examples** " +"([#2381](https://github.com/adap/flower/pull/2381), " +"[#2805](https://github.com/adap/flower/pull/2805), " +"[#2782](https://github.com/adap/flower/pull/2782), " +"[#2806](https://github.com/adap/flower/pull/2806), " +"[#2829](https://github.com/adap/flower/pull/2829), " +"[#2825](https://github.com/adap/flower/pull/2825), " +"[#2816](https://github.com/adap/flower/pull/2816), " +"[#2726](https://github.com/adap/flower/pull/2726), " +"[#2659](https://github.com/adap/flower/pull/2659), " +"[#2655](https://github.com/adap/flower/pull/2655))" msgstr "" -#: ../../source/ref-changelog.md:302 -msgid "" -"This is a major update for anyone wanting to implement Federated Learning" -" on iOS mobile devices. We now have a swift iOS SDK present under " -"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" -" that will facilitate greatly the app creating process. To showcase its " -"use, the [iOS " -"example](https://github.com/adap/flower/tree/main/examples/ios) has also " -"been updated!" +#: ../../source/ref-changelog.md:59 +msgid "Many Flower code examples received substantial updates." msgstr "" -#: ../../source/ref-changelog.md:304 -msgid "" -"**Introduce new \"What is Federated Learning?\" tutorial** " -"([#1657](https://github.com/adap/flower/pull/1657), " -"[#1721](https://github.com/adap/flower/pull/1721))" +#: ../../source/ref-changelog.md:61 ../../source/ref-changelog.md:154 +msgid "**Update Flower Baselines**" msgstr "" -#: ../../source/ref-changelog.md:306 +#: ../../source/ref-changelog.md:63 msgid "" -"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" -"what-is-federated-learning.html) in our documentation explains the basics" -" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" -" Learning to start their journey with Flower. Forward it to anyone who's " -"interested in Federated Learning!" +"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " +"[#2771](https://github.com/adap/flower/pull/2771))" msgstr "" -#: ../../source/ref-changelog.md:308 -msgid "" -"**Introduce new Flower Baseline: FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679))" +#: ../../source/ref-changelog.md:64 +msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" msgstr "" -#: ../../source/ref-changelog.md:310 -msgid "" -"This new baseline replicates the MNIST+CNN task from the paper [Federated" -" Optimization in Heterogeneous Networks (Li et al., " -"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," -" which aims at making convergence more robust in heterogeneous settings." +#: ../../source/ref-changelog.md:65 +msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" msgstr "" -#: ../../source/ref-changelog.md:312 -msgid "" -"**Introduce new Flower Baseline: FedAvg FEMNIST** " -"([#1655](https://github.com/adap/flower/pull/1655))" +#: ../../source/ref-changelog.md:66 +msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" msgstr "" -#: ../../source/ref-changelog.md:314 -msgid "" -"This new baseline replicates an experiment evaluating the performance of " -"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " -"Benchmark for Federated Settings (Caldas et al., " -"2018)](https://arxiv.org/abs/1812.01097)." +#: ../../source/ref-changelog.md:67 +msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" msgstr "" -#: ../../source/ref-changelog.md:316 -msgid "" -"**Introduce (experimental) REST API** " -"([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" +#: ../../source/ref-changelog.md:68 +msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" msgstr "" -#: ../../source/ref-changelog.md:318 +#: ../../source/ref-changelog.md:70 msgid "" -"A new REST API has been introduced as an alternative to the gRPC-based " -"communication stack. In this initial version, the REST API only supports " -"anonymous clients." +"**Improve documentation** " +"([#2674](https://github.com/adap/flower/pull/2674), " +"[#2480](https://github.com/adap/flower/pull/2480), " +"[#2826](https://github.com/adap/flower/pull/2826), " +"[#2727](https://github.com/adap/flower/pull/2727), " +"[#2761](https://github.com/adap/flower/pull/2761), " +"[#2900](https://github.com/adap/flower/pull/2900))" msgstr "" -#: ../../source/ref-changelog.md:320 +#: ../../source/ref-changelog.md:72 msgid "" -"Please note: The REST API is still experimental and will likely change " -"significantly over time." +"**Improved testing and development infrastructure** " +"([#2797](https://github.com/adap/flower/pull/2797), " +"[#2676](https://github.com/adap/flower/pull/2676), " +"[#2644](https://github.com/adap/flower/pull/2644), " +"[#2656](https://github.com/adap/flower/pull/2656), " +"[#2848](https://github.com/adap/flower/pull/2848), " +"[#2675](https://github.com/adap/flower/pull/2675), " +"[#2735](https://github.com/adap/flower/pull/2735), " +"[#2767](https://github.com/adap/flower/pull/2767), " +"[#2732](https://github.com/adap/flower/pull/2732), " +"[#2744](https://github.com/adap/flower/pull/2744), " +"[#2681](https://github.com/adap/flower/pull/2681), " +"[#2699](https://github.com/adap/flower/pull/2699), " +"[#2745](https://github.com/adap/flower/pull/2745), " +"[#2734](https://github.com/adap/flower/pull/2734), " +"[#2731](https://github.com/adap/flower/pull/2731), " +"[#2652](https://github.com/adap/flower/pull/2652), " +"[#2720](https://github.com/adap/flower/pull/2720), " +"[#2721](https://github.com/adap/flower/pull/2721), " +"[#2717](https://github.com/adap/flower/pull/2717), " +"[#2864](https://github.com/adap/flower/pull/2864), " +"[#2694](https://github.com/adap/flower/pull/2694), " +"[#2709](https://github.com/adap/flower/pull/2709), " +"[#2658](https://github.com/adap/flower/pull/2658), " +"[#2796](https://github.com/adap/flower/pull/2796), " +"[#2692](https://github.com/adap/flower/pull/2692), " +"[#2657](https://github.com/adap/flower/pull/2657), " +"[#2813](https://github.com/adap/flower/pull/2813), " +"[#2661](https://github.com/adap/flower/pull/2661), " +"[#2398](https://github.com/adap/flower/pull/2398))" msgstr "" -#: ../../source/ref-changelog.md:322 +#: ../../source/ref-changelog.md:74 msgid "" -"**Improve the (experimental) Driver API** " -"([#1663](https://github.com/adap/flower/pull/1663), " -"[#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" +"The Flower testing and development infrastructure has received " +"substantial updates. This makes Flower 1.7 the most tested release ever." msgstr "" -#: ../../source/ref-changelog.md:324 +#: ../../source/ref-changelog.md:76 msgid "" -"The Driver API is still an experimental feature, but this release " -"introduces some major upgrades. One of the main improvements is the " -"introduction of an SQLite database to store server state on disk (instead" -" of in-memory). Another improvement is that tasks (instructions or " -"results) that have been delivered will now be deleted. This greatly " -"improves the memory efficiency of a long-running Flower server." +"**Update dependencies** " +"([#2753](https://github.com/adap/flower/pull/2753), " +"[#2651](https://github.com/adap/flower/pull/2651), " +"[#2739](https://github.com/adap/flower/pull/2739), " +"[#2837](https://github.com/adap/flower/pull/2837), " +"[#2788](https://github.com/adap/flower/pull/2788), " +"[#2811](https://github.com/adap/flower/pull/2811), " +"[#2774](https://github.com/adap/flower/pull/2774), " +"[#2790](https://github.com/adap/flower/pull/2790), " +"[#2751](https://github.com/adap/flower/pull/2751), " +"[#2850](https://github.com/adap/flower/pull/2850), " +"[#2812](https://github.com/adap/flower/pull/2812), " +"[#2872](https://github.com/adap/flower/pull/2872), " +"[#2736](https://github.com/adap/flower/pull/2736), " +"[#2756](https://github.com/adap/flower/pull/2756), " +"[#2857](https://github.com/adap/flower/pull/2857), " +"[#2757](https://github.com/adap/flower/pull/2757), " +"[#2810](https://github.com/adap/flower/pull/2810), " +"[#2740](https://github.com/adap/flower/pull/2740), " +"[#2789](https://github.com/adap/flower/pull/2789))" msgstr "" -#: ../../source/ref-changelog.md:326 +#: ../../source/ref-changelog.md:78 msgid "" -"**Fix spilling issues related to Ray during simulations** " -"([#1698](https://github.com/adap/flower/pull/1698))" +"**General improvements** " +"([#2803](https://github.com/adap/flower/pull/2803), " +"[#2847](https://github.com/adap/flower/pull/2847), " +"[#2877](https://github.com/adap/flower/pull/2877), " +"[#2690](https://github.com/adap/flower/pull/2690), " +"[#2889](https://github.com/adap/flower/pull/2889), " +"[#2874](https://github.com/adap/flower/pull/2874), " +"[#2819](https://github.com/adap/flower/pull/2819), " +"[#2689](https://github.com/adap/flower/pull/2689), " +"[#2457](https://github.com/adap/flower/pull/2457), " +"[#2870](https://github.com/adap/flower/pull/2870), " +"[#2669](https://github.com/adap/flower/pull/2669), " +"[#2876](https://github.com/adap/flower/pull/2876), " +"[#2885](https://github.com/adap/flower/pull/2885), " +"[#2858](https://github.com/adap/flower/pull/2858), " +"[#2867](https://github.com/adap/flower/pull/2867), " +"[#2351](https://github.com/adap/flower/pull/2351), " +"[#2886](https://github.com/adap/flower/pull/2886), " +"[#2860](https://github.com/adap/flower/pull/2860), " +"[#2828](https://github.com/adap/flower/pull/2828), " +"[#2869](https://github.com/adap/flower/pull/2869), " +"[#2875](https://github.com/adap/flower/pull/2875), " +"[#2733](https://github.com/adap/flower/pull/2733), " +"[#2488](https://github.com/adap/flower/pull/2488), " +"[#2646](https://github.com/adap/flower/pull/2646), " +"[#2879](https://github.com/adap/flower/pull/2879), " +"[#2821](https://github.com/adap/flower/pull/2821), " +"[#2855](https://github.com/adap/flower/pull/2855), " +"[#2800](https://github.com/adap/flower/pull/2800), " +"[#2807](https://github.com/adap/flower/pull/2807), " +"[#2801](https://github.com/adap/flower/pull/2801), " +"[#2804](https://github.com/adap/flower/pull/2804), " +"[#2851](https://github.com/adap/flower/pull/2851), " +"[#2787](https://github.com/adap/flower/pull/2787), " +"[#2852](https://github.com/adap/flower/pull/2852), " +"[#2672](https://github.com/adap/flower/pull/2672), " +"[#2759](https://github.com/adap/flower/pull/2759))" msgstr "" -#: ../../source/ref-changelog.md:328 +#: ../../source/ref-changelog.md:82 msgid "" -"While running long simulations, `ray` was sometimes spilling huge amounts" -" of data that would make the training unable to continue. This is now " -"fixed! 🎉" +"**Deprecate** `start_numpy_client` " +"([#2563](https://github.com/adap/flower/pull/2563), " +"[#2718](https://github.com/adap/flower/pull/2718))" msgstr "" -#: ../../source/ref-changelog.md:330 +#: ../../source/ref-changelog.md:84 msgid "" -"**Add new example using** `TabNet` **and Flower** " -"([#1725](https://github.com/adap/flower/pull/1725))" +"Until now, clients of type `NumPyClient` needed to be started via " +"`start_numpy_client`. In our efforts to consolidate framework APIs, we " +"have introduced changes, and now all client types should start via " +"`start_client`. To continue using `NumPyClient` clients, you simply need " +"to first call the `.to_client()` method and then pass returned `Client` " +"object to `start_client`. The examples and the documentation have been " +"updated accordingly." msgstr "" -#: ../../source/ref-changelog.md:332 +#: ../../source/ref-changelog.md:86 msgid "" -"TabNet is a powerful and flexible framework for training machine learning" -" models on tabular data. We now have a federated example using Flower: " -"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples/quickstart-tabnet)." +"**Deprecate legacy DP wrappers** " +"([#2749](https://github.com/adap/flower/pull/2749))" msgstr "" -#: ../../source/ref-changelog.md:334 +#: ../../source/ref-changelog.md:88 msgid "" -"**Add new how-to guide for monitoring simulations** " -"([#1649](https://github.com/adap/flower/pull/1649))" +"Legacy DP wrapper classes are deprecated, but still functional. This is " +"in preparation for an all-new pluggable version of differential privacy " +"support in Flower." msgstr "" -#: ../../source/ref-changelog.md:336 +#: ../../source/ref-changelog.md:90 msgid "" -"We now have a documentation guide to help users monitor their performance" -" during simulations." +"**Make optional arg** `--callable` **in** `flower-client` **a required " +"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" msgstr "" -#: ../../source/ref-changelog.md:338 +#: ../../source/ref-changelog.md:92 msgid "" -"**Add training metrics to** `History` **object during simulations** " -"([#1696](https://github.com/adap/flower/pull/1696))" +"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " +"([#2890](https://github.com/adap/flower/pull/2890))" msgstr "" -#: ../../source/ref-changelog.md:340 +#: ../../source/ref-changelog.md:94 msgid "" -"The `fit_metrics_aggregation_fn` can be used to aggregate training " -"metrics, but previous releases did not save the results in the `History` " -"object. This is now the case!" +"**Drop experimental** `Task` **fields** " +"([#2866](https://github.com/adap/flower/pull/2866), " +"[#2865](https://github.com/adap/flower/pull/2865))" msgstr "" -#: ../../source/ref-changelog.md:342 +#: ../../source/ref-changelog.md:96 msgid "" -"**General improvements** " -"([#1659](https://github.com/adap/flower/pull/1659), " -"[#1646](https://github.com/adap/flower/pull/1646), " -"[#1647](https://github.com/adap/flower/pull/1647), " -"[#1471](https://github.com/adap/flower/pull/1471), " -"[#1648](https://github.com/adap/flower/pull/1648), " -"[#1651](https://github.com/adap/flower/pull/1651), " -"[#1652](https://github.com/adap/flower/pull/1652), " -"[#1653](https://github.com/adap/flower/pull/1653), " -"[#1659](https://github.com/adap/flower/pull/1659), " -"[#1665](https://github.com/adap/flower/pull/1665), " -"[#1670](https://github.com/adap/flower/pull/1670), " -"[#1672](https://github.com/adap/flower/pull/1672), " -"[#1677](https://github.com/adap/flower/pull/1677), " -"[#1684](https://github.com/adap/flower/pull/1684), " -"[#1683](https://github.com/adap/flower/pull/1683), " -"[#1686](https://github.com/adap/flower/pull/1686), " -"[#1682](https://github.com/adap/flower/pull/1682), " -"[#1685](https://github.com/adap/flower/pull/1685), " -"[#1692](https://github.com/adap/flower/pull/1692), " -"[#1705](https://github.com/adap/flower/pull/1705), " -"[#1708](https://github.com/adap/flower/pull/1708), " -"[#1711](https://github.com/adap/flower/pull/1711), " -"[#1713](https://github.com/adap/flower/pull/1713), " -"[#1714](https://github.com/adap/flower/pull/1714), " -"[#1718](https://github.com/adap/flower/pull/1718), " -"[#1716](https://github.com/adap/flower/pull/1716), " -"[#1723](https://github.com/adap/flower/pull/1723), " -"[#1735](https://github.com/adap/flower/pull/1735), " -"[#1678](https://github.com/adap/flower/pull/1678), " -"[#1750](https://github.com/adap/flower/pull/1750), " -"[#1753](https://github.com/adap/flower/pull/1753), " -"[#1736](https://github.com/adap/flower/pull/1736), " -"[#1766](https://github.com/adap/flower/pull/1766), " -"[#1760](https://github.com/adap/flower/pull/1760), " -"[#1775](https://github.com/adap/flower/pull/1775), " -"[#1776](https://github.com/adap/flower/pull/1776), " -"[#1777](https://github.com/adap/flower/pull/1777), " -"[#1779](https://github.com/adap/flower/pull/1779), " -"[#1784](https://github.com/adap/flower/pull/1784), " -"[#1773](https://github.com/adap/flower/pull/1773), " -"[#1755](https://github.com/adap/flower/pull/1755), " -"[#1789](https://github.com/adap/flower/pull/1789), " -"[#1788](https://github.com/adap/flower/pull/1788), " -"[#1798](https://github.com/adap/flower/pull/1798), " -"[#1799](https://github.com/adap/flower/pull/1799), " -"[#1739](https://github.com/adap/flower/pull/1739), " -"[#1800](https://github.com/adap/flower/pull/1800), " -"[#1804](https://github.com/adap/flower/pull/1804), " -"[#1805](https://github.com/adap/flower/pull/1805))" +"Experimental fields `sa`, `legacy_server_message` and " +"`legacy_client_message` were removed from `Task` message. The removed " +"fields are superseded by the new `RecordSet` abstraction." msgstr "" -#: ../../source/ref-changelog.md:350 -msgid "v1.3.0 (2023-02-06)" +#: ../../source/ref-changelog.md:98 +msgid "" +"**Retire MXNet examples** " +"([#2724](https://github.com/adap/flower/pull/2724))" msgstr "" -#: ../../source/ref-changelog.md:356 +#: ../../source/ref-changelog.md:100 msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" +"The development of the MXNet fremework has ended and the project is now " +"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " +"examples won't receive updates." msgstr "" -#: ../../source/ref-changelog.md:360 -msgid "" -"**Add support for** `workload_id` **and** `group_id` **in Driver API** " -"([#1595](https://github.com/adap/flower/pull/1595))" +#: ../../source/ref-changelog.md:102 +msgid "v1.6.0 (2023-11-28)" msgstr "" -#: ../../source/ref-changelog.md:362 +#: ../../source/ref-changelog.md:108 msgid "" -"The (experimental) Driver API now supports a `workload_id` that can be " -"used to identify which workload a task belongs to. It also supports a new" -" `group_id` that can be used, for example, to indicate the current " -"training round. Both the `workload_id` and `group_id` enable client nodes" -" to decide whether they want to handle a task or not." +"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " +"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " +"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," +" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " +"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " +"`cnxdeveloper`, `k3nfalt` " msgstr "" -#: ../../source/ref-changelog.md:364 +#: ../../source/ref-changelog.md:112 msgid "" -"**Make Driver API and Fleet API address configurable** " -"([#1637](https://github.com/adap/flower/pull/1637))" +"**Add experimental support for Python 3.12** " +"([#2565](https://github.com/adap/flower/pull/2565))" msgstr "" -#: ../../source/ref-changelog.md:366 +#: ../../source/ref-changelog.md:114 msgid "" -"The (experimental) long-running Flower server (Driver API and Fleet API) " -"can now configure the server address of both Driver API (via `--driver-" -"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" +"**Add new XGBoost examples** " +"([#2612](https://github.com/adap/flower/pull/2612), " +"[#2554](https://github.com/adap/flower/pull/2554), " +"[#2617](https://github.com/adap/flower/pull/2617), " +"[#2618](https://github.com/adap/flower/pull/2618), " +"[#2619](https://github.com/adap/flower/pull/2619), " +"[#2567](https://github.com/adap/flower/pull/2567))" msgstr "" -#: ../../source/ref-changelog.md:368 +#: ../../source/ref-changelog.md:116 msgid "" -"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " -"\"0.0.0.0:8086\"`" +"We have added a new `xgboost-quickstart` example alongside a new " +"`xgboost-comprehensive` example that goes more in-depth." msgstr "" -#: ../../source/ref-changelog.md:370 -msgid "Both IPv4 and IPv6 addresses are supported." +#: ../../source/ref-changelog.md:118 +msgid "" +"**Add Vertical FL example** " +"([#2598](https://github.com/adap/flower/pull/2598))" msgstr "" -#: ../../source/ref-changelog.md:372 +#: ../../source/ref-changelog.md:120 msgid "" -"**Add new example of Federated Learning using fastai and Flower** " -"([#1598](https://github.com/adap/flower/pull/1598))" +"We had many questions about Vertical Federated Learning using Flower, so " +"we decided to add an simple example for it on the [Titanic " +"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " +"tutorial (in the README)." msgstr "" -#: ../../source/ref-changelog.md:374 +#: ../../source/ref-changelog.md:122 msgid "" -"A new code example (`quickstart-fastai`) demonstrates federated learning " -"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " -"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples/quickstart-fastai)." +"**Support custom** `ClientManager` **in** `start_driver()` " +"([#2292](https://github.com/adap/flower/pull/2292))" msgstr "" -#: ../../source/ref-changelog.md:376 +#: ../../source/ref-changelog.md:124 msgid "" -"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" -" versions of Android** " -"([#1603](https://github.com/adap/flower/pull/1603))" +"**Update REST API to support create and delete nodes** " +"([#2283](https://github.com/adap/flower/pull/2283))" msgstr "" -#: ../../source/ref-changelog.md:378 +#: ../../source/ref-changelog.md:126 msgid "" -"The Android code example has received a substantial update: the project " -"is compatible with Flower 1.0 (and later), the UI received a full " -"refresh, and the project is updated to be compatible with newer Android " -"tooling." +"**Update the Android SDK** " +"([#2187](https://github.com/adap/flower/pull/2187))" msgstr "" -#: ../../source/ref-changelog.md:380 -msgid "" -"**Add new `FedProx` strategy** " -"([#1619](https://github.com/adap/flower/pull/1619))" +#: ../../source/ref-changelog.md:128 +msgid "Add gRPC request-response capability to the Android SDK." msgstr "" -#: ../../source/ref-changelog.md:382 +#: ../../source/ref-changelog.md:130 msgid "" -"This " -"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" -" is almost identical to " -"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," -" but helps users replicate what is described in this " -"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " -"parameter called `proximal_mu` to regularize the local models with " -"respect to the global models." +"**Update the C++ SDK** " +"([#2537](https://github.com/adap/flower/pull/2537), " +"[#2528](https://github.com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" msgstr "" -#: ../../source/ref-changelog.md:384 -msgid "" -"**Add new metrics to telemetry events** " -"([#1640](https://github.com/adap/flower/pull/1640))" +#: ../../source/ref-changelog.md:132 +msgid "Add gRPC request-response capability to the C++ SDK." msgstr "" -#: ../../source/ref-changelog.md:386 +#: ../../source/ref-changelog.md:134 msgid "" -"An updated event structure allows, for example, the clustering of events " -"within the same workload." +"**Make HTTPS the new default** " +"([#2591](https://github.com/adap/flower/pull/2591), " +"[#2636](https://github.com/adap/flower/pull/2636))" msgstr "" -#: ../../source/ref-changelog.md:388 +#: ../../source/ref-changelog.md:136 msgid "" -"**Add new custom strategy tutorial section** " -"[#1623](https://github.com/adap/flower/pull/1623)" +"Flower is moving to HTTPS by default. The new `flower-server` requires " +"passing `--certificates`, but users can enable `--insecure` to use HTTP " +"for prototyping. The same applies to `flower-client`, which can either " +"use user-provided credentials or gRPC-bundled certificates to connect to " +"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " +"enable insecure HTTP connections." msgstr "" -#: ../../source/ref-changelog.md:390 +#: ../../source/ref-changelog.md:138 msgid "" -"The Flower tutorial now has a new section that covers implementing a " -"custom strategy from scratch: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" +"For backward compatibility, `start_client()` and `start_numpy_client()` " +"will still start in insecure mode by default. In a future release, " +"insecure connections will require user opt-in by passing `insecure=True`." msgstr "" -#: ../../source/ref-changelog.md:392 +#: ../../source/ref-changelog.md:140 msgid "" -"**Add new custom serialization tutorial section** " -"([#1622](https://github.com/adap/flower/pull/1622))" +"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" msgstr "" -#: ../../source/ref-changelog.md:394 +#: ../../source/ref-changelog.md:142 msgid "" -"The Flower tutorial now has a new section that covers custom " -"serialization: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-customize-the-client-pytorch.ipynb)" +"Using the `client_fn`, Flower clients can interchangeably run as " +"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" +" `start_simulation`) without requiring changes to how the client class is" +" defined and instantiated. The `to_client()` function is introduced to " +"convert a `NumPyClient` to a `Client`." msgstr "" -#: ../../source/ref-changelog.md:396 +#: ../../source/ref-changelog.md:144 msgid "" -"**General improvements** " -"([#1638](https://github.com/adap/flower/pull/1638), " -"[#1634](https://github.com/adap/flower/pull/1634), " -"[#1636](https://github.com/adap/flower/pull/1636), " -"[#1635](https://github.com/adap/flower/pull/1635), " -"[#1633](https://github.com/adap/flower/pull/1633), " -"[#1632](https://github.com/adap/flower/pull/1632), " -"[#1631](https://github.com/adap/flower/pull/1631), " -"[#1630](https://github.com/adap/flower/pull/1630), " -"[#1627](https://github.com/adap/flower/pull/1627), " -"[#1593](https://github.com/adap/flower/pull/1593), " -"[#1616](https://github.com/adap/flower/pull/1616), " -"[#1615](https://github.com/adap/flower/pull/1615), " -"[#1607](https://github.com/adap/flower/pull/1607), " -"[#1609](https://github.com/adap/flower/pull/1609), " -"[#1608](https://github.com/adap/flower/pull/1608), " -"[#1603](https://github.com/adap/flower/pull/1603), " -"[#1590](https://github.com/adap/flower/pull/1590), " -"[#1580](https://github.com/adap/flower/pull/1580), " -"[#1599](https://github.com/adap/flower/pull/1599), " -"[#1600](https://github.com/adap/flower/pull/1600), " -"[#1601](https://github.com/adap/flower/pull/1601), " -"[#1597](https://github.com/adap/flower/pull/1597), " -"[#1595](https://github.com/adap/flower/pull/1595), " -"[#1591](https://github.com/adap/flower/pull/1591), " -"[#1588](https://github.com/adap/flower/pull/1588), " -"[#1589](https://github.com/adap/flower/pull/1589), " -"[#1587](https://github.com/adap/flower/pull/1587), " -"[#1573](https://github.com/adap/flower/pull/1573), " -"[#1581](https://github.com/adap/flower/pull/1581), " -"[#1578](https://github.com/adap/flower/pull/1578), " -"[#1574](https://github.com/adap/flower/pull/1574), " -"[#1572](https://github.com/adap/flower/pull/1572), " -"[#1586](https://github.com/adap/flower/pull/1586))" +"**Add new** `Bulyan` **strategy** " +"([#1817](https://github.com/adap/flower/pull/1817), " +"[#1891](https://github.com/adap/flower/pull/1891))" msgstr "" -#: ../../source/ref-changelog.md:400 +#: ../../source/ref-changelog.md:146 msgid "" -"**Updated documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" +"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " +"2018](https://arxiv.org/abs/1802.07927)" msgstr "" -#: ../../source/ref-changelog.md:402 ../../source/ref-changelog.md:469 +#: ../../source/ref-changelog.md:148 msgid "" -"As usual, the documentation has improved quite a bit. It is another step " -"in our effort to make the Flower documentation the best documentation of " -"any project. Stay tuned and as always, feel free to provide feedback!" -msgstr "" - -#: ../../source/ref-changelog.md:408 -msgid "v1.2.0 (2023-01-13)" +"**Add new** `XGB Bagging` **strategy** " +"([#2611](https://github.com/adap/flower/pull/2611))" msgstr "" -#: ../../source/ref-changelog.md:414 +#: ../../source/ref-changelog.md:150 ../../source/ref-changelog.md:152 msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." -" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" +"**Introduce `WorkloadState`** " +"([#2564](https://github.com/adap/flower/pull/2564), " +"[#2632](https://github.com/adap/flower/pull/2632))" msgstr "" -#: ../../source/ref-changelog.md:418 +#: ../../source/ref-changelog.md:156 msgid "" -"**Introduce new Flower Baseline: FedAvg MNIST** " -"([#1497](https://github.com/adap/flower/pull/1497), " -"[#1552](https://github.com/adap/flower/pull/1552))" +"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " +"[#2286](https://github.com/adap/flower/pull/2286), " +"[#2509](https://github.com/adap/flower/pull/2509))" msgstr "" -#: ../../source/ref-changelog.md:420 +#: ../../source/ref-changelog.md:158 msgid "" -"Over the coming weeks, we will be releasing a number of new reference " -"implementations useful especially to FL newcomers. They will typically " -"revisit well known papers from the literature, and be suitable for " -"integration in your own application or for experimentation, in order to " -"deepen your knowledge of FL in general. Today's release is the first in " -"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" -"pack-fedavg-mnist-cnn/)" +"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " +"[#2400](https://github.com/adap/flower/pull/2400))" msgstr "" -#: ../../source/ref-changelog.md:422 +#: ../../source/ref-changelog.md:160 msgid "" -"**Improve GPU support in simulations** " -"([#1555](https://github.com/adap/flower/pull/1555))" +"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " +"[#2507](https://github.com/adap/flower/pull/2507))" msgstr "" -#: ../../source/ref-changelog.md:424 +#: ../../source/ref-changelog.md:162 msgid "" -"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" -" to improve GPU support. The update includes some of the hard-earned " -"lessons from scaling simulations in GPU cluster environments. New " -"defaults make running GPU-based simulations substantially more robust." +"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " +"[#2508](https://github.com/adap/flower/pull/2508))" msgstr "" -#: ../../source/ref-changelog.md:426 -msgid "" -"**Improve GPU support in Jupyter Notebook tutorials** " -"([#1527](https://github.com/adap/flower/pull/1527), " -"[#1558](https://github.com/adap/flower/pull/1558))" +#: ../../source/ref-changelog.md:164 +msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" msgstr "" -#: ../../source/ref-changelog.md:428 -msgid "" -"Some users reported that Jupyter Notebooks have not always been easy to " -"use on GPU instances. We listened and made improvements to all of our " -"Jupyter notebooks! Check out the updated notebooks here:" +#: ../../source/ref-changelog.md:166 +msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" msgstr "" -#: ../../source/ref-changelog.md:430 -msgid "" -"[An Introduction to Federated Learning](https://flower.ai/docs/framework" -"/tutorial-get-started-with-flower-pytorch.html)" +#: ../../source/ref-changelog.md:168 +msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" msgstr "" -#: ../../source/ref-changelog.md:431 -msgid "" -"[Strategies in Federated Learning](https://flower.ai/docs/framework" -"/tutorial-use-a-federated-learning-strategy-pytorch.html)" +#: ../../source/ref-changelog.md:170 +msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" msgstr "" -#: ../../source/ref-changelog.md:432 -msgid "" -"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" -"-strategy-from-scratch-pytorch.html)" +#: ../../source/ref-changelog.md:172 +msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" msgstr "" -#: ../../source/ref-changelog.md:433 -msgid "" -"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" -"customize-the-client-pytorch.html)" +#: ../../source/ref-changelog.md:174 +msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" msgstr "" -#: ../../source/ref-changelog.md:435 -msgid "" -"**Introduce optional telemetry** " -"([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584))" +#: ../../source/ref-changelog.md:176 +msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" msgstr "" -#: ../../source/ref-changelog.md:437 +#: ../../source/ref-changelog.md:178 msgid "" -"After a [request for " -"feedback](https://github.com/adap/flower/issues/1534) from the community," -" the Flower open-source project introduces optional collection of " -"*anonymous* usage metrics to make well-informed decisions to improve " -"Flower. Doing this enables the Flower team to understand how Flower is " -"used and what challenges users might face." +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" msgstr "" -#: ../../source/ref-changelog.md:439 +#: ../../source/ref-changelog.md:180 msgid "" -"**Flower is a friendly framework for collaborative AI and data science.**" -" Staying true to this statement, Flower makes it easy to disable " -"telemetry for users who do not want to share anonymous usage metrics. " -"[Read more.](https://flower.ai/docs/telemetry.html)." +"**General updates to Flower Examples** " +"([#2384](https://github.com/adap/flower/pull/2384), " +"[#2425](https://github.com/adap/flower/pull/2425), " +"[#2526](https://github.com/adap/flower/pull/2526), " +"[#2302](https://github.com/adap/flower/pull/2302), " +"[#2545](https://github.com/adap/flower/pull/2545))" msgstr "" -#: ../../source/ref-changelog.md:441 +#: ../../source/ref-changelog.md:182 msgid "" -"**Introduce (experimental) Driver API** " -"([#1520](https://github.com/adap/flower/pull/1520), " -"[#1525](https://github.com/adap/flower/pull/1525), " -"[#1545](https://github.com/adap/flower/pull/1545), " -"[#1546](https://github.com/adap/flower/pull/1546), " -"[#1550](https://github.com/adap/flower/pull/1550), " -"[#1551](https://github.com/adap/flower/pull/1551), " -"[#1567](https://github.com/adap/flower/pull/1567))" +"**General updates to Flower Baselines** " +"([#2301](https://github.com/adap/flower/pull/2301), " +"[#2305](https://github.com/adap/flower/pull/2305), " +"[#2307](https://github.com/adap/flower/pull/2307), " +"[#2327](https://github.com/adap/flower/pull/2327), " +"[#2435](https://github.com/adap/flower/pull/2435), " +"[#2462](https://github.com/adap/flower/pull/2462), " +"[#2463](https://github.com/adap/flower/pull/2463), " +"[#2461](https://github.com/adap/flower/pull/2461), " +"[#2469](https://github.com/adap/flower/pull/2469), " +"[#2466](https://github.com/adap/flower/pull/2466), " +"[#2471](https://github.com/adap/flower/pull/2471), " +"[#2472](https://github.com/adap/flower/pull/2472), " +"[#2470](https://github.com/adap/flower/pull/2470))" msgstr "" -#: ../../source/ref-changelog.md:443 +#: ../../source/ref-changelog.md:184 msgid "" -"Flower now has a new (experimental) Driver API which will enable fully " -"programmable, async, and multi-tenant Federated Learning and Federated " -"Analytics applications. Phew, that's a lot! Going forward, the Driver API" -" will be the abstraction that many upcoming features will be built on - " -"and you can start building those things now, too." +"**General updates to the simulation engine** " +"([#2331](https://github.com/adap/flower/pull/2331), " +"[#2447](https://github.com/adap/flower/pull/2447), " +"[#2448](https://github.com/adap/flower/pull/2448), " +"[#2294](https://github.com/adap/flower/pull/2294))" msgstr "" -#: ../../source/ref-changelog.md:445 +#: ../../source/ref-changelog.md:186 msgid "" -"The Driver API also enables a new execution mode in which the server runs" -" indefinitely. Multiple individual workloads can run concurrently and " -"start and stop their execution independent of the server. This is " -"especially useful for users who want to deploy Flower in production." +"**General updates to Flower SDKs** " +"([#2288](https://github.com/adap/flower/pull/2288), " +"[#2429](https://github.com/adap/flower/pull/2429), " +"[#2555](https://github.com/adap/flower/pull/2555), " +"[#2543](https://github.com/adap/flower/pull/2543), " +"[#2544](https://github.com/adap/flower/pull/2544), " +"[#2597](https://github.com/adap/flower/pull/2597), " +"[#2623](https://github.com/adap/flower/pull/2623))" msgstr "" -#: ../../source/ref-changelog.md:447 +#: ../../source/ref-changelog.md:188 msgid "" -"To learn more, check out the `mt-pytorch` code example. We look forward " -"to you feedback!" +"**General improvements** " +"([#2309](https://github.com/adap/flower/pull/2309), " +"[#2310](https://github.com/adap/flower/pull/2310), " +"[#2313](https://github.com/adap/flower/pull/2313), " +"[#2316](https://github.com/adap/flower/pull/2316), " +"[#2317](https://github.com/adap/flower/pull/2317), " +"[#2349](https://github.com/adap/flower/pull/2349), " +"[#2360](https://github.com/adap/flower/pull/2360), " +"[#2402](https://github.com/adap/flower/pull/2402), " +"[#2446](https://github.com/adap/flower/pull/2446), " +"[#2561](https://github.com/adap/flower/pull/2561), " +"[#2273](https://github.com/adap/flower/pull/2273), " +"[#2267](https://github.com/adap/flower/pull/2267), " +"[#2274](https://github.com/adap/flower/pull/2274), " +"[#2275](https://github.com/adap/flower/pull/2275), " +"[#2432](https://github.com/adap/flower/pull/2432), " +"[#2251](https://github.com/adap/flower/pull/2251), " +"[#2321](https://github.com/adap/flower/pull/2321), " +"[#1936](https://github.com/adap/flower/pull/1936), " +"[#2408](https://github.com/adap/flower/pull/2408), " +"[#2413](https://github.com/adap/flower/pull/2413), " +"[#2401](https://github.com/adap/flower/pull/2401), " +"[#2531](https://github.com/adap/flower/pull/2531), " +"[#2534](https://github.com/adap/flower/pull/2534), " +"[#2535](https://github.com/adap/flower/pull/2535), " +"[#2521](https://github.com/adap/flower/pull/2521), " +"[#2553](https://github.com/adap/flower/pull/2553), " +"[#2596](https://github.com/adap/flower/pull/2596))" msgstr "" -#: ../../source/ref-changelog.md:449 -msgid "" -"Please note: *The Driver API is still experimental and will likely change" -" significantly over time.*" +#: ../../source/ref-changelog.md:190 ../../source/ref-changelog.md:280 +#: ../../source/ref-changelog.md:344 ../../source/ref-changelog.md:398 +#: ../../source/ref-changelog.md:465 +msgid "Flower received many improvements under the hood, too many to list here." msgstr "" -#: ../../source/ref-changelog.md:451 +#: ../../source/ref-changelog.md:194 msgid "" -"**Add new Federated Analytics with Pandas example** " -"([#1469](https://github.com/adap/flower/pull/1469), " -"[#1535](https://github.com/adap/flower/pull/1535))" +"**Remove support for Python 3.7** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" msgstr "" -#: ../../source/ref-changelog.md:453 +#: ../../source/ref-changelog.md:196 msgid "" -"A new code example (`quickstart-pandas`) demonstrates federated analytics" -" with Pandas and Flower. You can find it here: " -"[quickstart-pandas](https://github.com/adap/flower/tree/main/examples/quickstart-pandas)." +"Python 3.7 support was deprecated in Flower 1.5, and this release removes" +" support. Flower now requires Python 3.8." msgstr "" -#: ../../source/ref-changelog.md:455 +#: ../../source/ref-changelog.md:198 msgid "" -"**Add new strategies: Krum and MultiKrum** " -"([#1481](https://github.com/adap/flower/pull/1481))" +"**Remove experimental argument** `rest` **from** `start_client` " +"([#2324](https://github.com/adap/flower/pull/2324))" msgstr "" -#: ../../source/ref-changelog.md:457 +#: ../../source/ref-changelog.md:200 msgid "" -"Edoardo, a computer science student at the Sapienza University of Rome, " -"contributed a new `Krum` strategy that enables users to easily use Krum " -"and MultiKrum in their workloads." +"The (still experimental) argument `rest` was removed from `start_client` " +"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " +"experimental REST API instead." msgstr "" -#: ../../source/ref-changelog.md:459 -msgid "" -"**Update C++ example to be compatible with Flower v1.2.0** " -"([#1495](https://github.com/adap/flower/pull/1495))" +#: ../../source/ref-changelog.md:202 +msgid "v1.5.0 (2023-08-31)" msgstr "" -#: ../../source/ref-changelog.md:461 +#: ../../source/ref-changelog.md:208 msgid "" -"The C++ code example has received a substantial update to make it " -"compatible with the latest version of Flower." +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " msgstr "" -#: ../../source/ref-changelog.md:463 +#: ../../source/ref-changelog.md:212 msgid "" -"**General improvements** " -"([#1491](https://github.com/adap/flower/pull/1491), " -"[#1504](https://github.com/adap/flower/pull/1504), " -"[#1506](https://github.com/adap/flower/pull/1506), " -"[#1514](https://github.com/adap/flower/pull/1514), " -"[#1522](https://github.com/adap/flower/pull/1522), " -"[#1523](https://github.com/adap/flower/pull/1523), " -"[#1526](https://github.com/adap/flower/pull/1526), " -"[#1528](https://github.com/adap/flower/pull/1528), " -"[#1547](https://github.com/adap/flower/pull/1547), " -"[#1549](https://github.com/adap/flower/pull/1549), " -"[#1560](https://github.com/adap/flower/pull/1560), " -"[#1564](https://github.com/adap/flower/pull/1564), " -"[#1566](https://github.com/adap/flower/pull/1566))" +"**Introduce new simulation engine** " +"([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" msgstr "" -#: ../../source/ref-changelog.md:467 +#: ../../source/ref-changelog.md:214 msgid "" -"**Updated documentation** " -"([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" +"The new simulation engine has been rewritten from the ground up, yet it " +"remains fully backwards compatible. It offers much improved stability and" +" memory handling, especially when working with GPUs. Simulations " +"transparently adapt to different settings to scale simulation in CPU-" +"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." msgstr "" -#: ../../source/ref-changelog.md:471 +#: ../../source/ref-changelog.md:216 msgid "" -"One highlight is the new [first time contributor " -"guide](https://flower.ai/docs/first-time-contributors.html): if you've " -"never contributed on GitHub before, this is the perfect place to start!" +"Comprehensive documentation includes a new [how-to run " +"simulations](https://flower.ai/docs/framework/how-to-run-" +"simulations.html) guide, new [simulation-" +"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " +"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" +"tensorflow.html) notebooks, and a new [YouTube tutorial " +"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." msgstr "" -#: ../../source/ref-changelog.md:477 -msgid "v1.1.0 (2022-10-31)" +#: ../../source/ref-changelog.md:218 +msgid "" +"**Restructure Flower Docs** " +"([#1824](https://github.com/adap/flower/pull/1824), " +"[#1865](https://github.com/adap/flower/pull/1865), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1887](https://github.com/adap/flower/pull/1887), " +"[#1919](https://github.com/adap/flower/pull/1919), " +"[#1922](https://github.com/adap/flower/pull/1922), " +"[#1920](https://github.com/adap/flower/pull/1920), " +"[#1923](https://github.com/adap/flower/pull/1923), " +"[#1924](https://github.com/adap/flower/pull/1924), " +"[#1962](https://github.com/adap/flower/pull/1962), " +"[#2006](https://github.com/adap/flower/pull/2006), " +"[#2133](https://github.com/adap/flower/pull/2133), " +"[#2203](https://github.com/adap/flower/pull/2203), " +"[#2215](https://github.com/adap/flower/pull/2215), " +"[#2122](https://github.com/adap/flower/pull/2122), " +"[#2223](https://github.com/adap/flower/pull/2223), " +"[#2219](https://github.com/adap/flower/pull/2219), " +"[#2232](https://github.com/adap/flower/pull/2232), " +"[#2233](https://github.com/adap/flower/pull/2233), " +"[#2234](https://github.com/adap/flower/pull/2234), " +"[#2235](https://github.com/adap/flower/pull/2235), " +"[#2237](https://github.com/adap/flower/pull/2237), " +"[#2238](https://github.com/adap/flower/pull/2238), " +"[#2242](https://github.com/adap/flower/pull/2242), " +"[#2231](https://github.com/adap/flower/pull/2231), " +"[#2243](https://github.com/adap/flower/pull/2243), " +"[#2227](https://github.com/adap/flower/pull/2227))" msgstr "" -#: ../../source/ref-changelog.md:481 +#: ../../source/ref-changelog.md:220 msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made the new version of Flower possible (in `git shortlog` order):" +"Much effort went into a completely restructured Flower docs experience. " +"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " +"divided into Flower Framework, Flower Baselines, Flower Android SDK, " +"Flower iOS SDK, and code example projects." msgstr "" -#: ../../source/ref-changelog.md:483 +#: ../../source/ref-changelog.md:222 msgid "" -"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " -"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " -"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " -"`danielnugraha`, `edogab33`" +"**Introduce Flower Swift SDK** " +"([#1858](https://github.com/adap/flower/pull/1858), " +"[#1897](https://github.com/adap/flower/pull/1897))" msgstr "" -#: ../../source/ref-changelog.md:487 +#: ../../source/ref-changelog.md:224 msgid "" -"**Introduce Differential Privacy wrappers (preview)** " -"([#1357](https://github.com/adap/flower/pull/1357), " -"[#1460](https://github.com/adap/flower/pull/1460))" +"This is the first preview release of the Flower Swift SDK. Flower support" +" on iOS is improving, and alongside the Swift SDK and code example, there" +" is now also an iOS quickstart tutorial." msgstr "" -#: ../../source/ref-changelog.md:489 +#: ../../source/ref-changelog.md:226 msgid "" -"The first (experimental) preview of pluggable Differential Privacy " -"wrappers enables easy configuration and usage of differential privacy " -"(DP). The pluggable DP wrappers enable framework-agnostic **and** " -"strategy-agnostic usage of both client-side DP and server-side DP. Head " -"over to the Flower docs, a new explainer goes into more detail." +"**Introduce Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" msgstr "" -#: ../../source/ref-changelog.md:491 +#: ../../source/ref-changelog.md:228 msgid "" -"**New iOS CoreML code example** " -"([#1289](https://github.com/adap/flower/pull/1289))" +"This is the first preview release of the Flower Kotlin SDK. Flower " +"support on Android is improving, and alongside the Kotlin SDK and code " +"example, there is now also an Android quickstart tutorial." msgstr "" -#: ../../source/ref-changelog.md:493 +#: ../../source/ref-changelog.md:230 msgid "" -"Flower goes iOS! A massive new code example shows how Flower clients can " -"be built for iOS. The code example contains both Flower iOS SDK " -"components that can be used for many tasks, and one task example running " -"on CoreML." +"**Introduce new end-to-end testing infrastructure** " +"([#1842](https://github.com/adap/flower/pull/1842), " +"[#2071](https://github.com/adap/flower/pull/2071), " +"[#2072](https://github.com/adap/flower/pull/2072), " +"[#2068](https://github.com/adap/flower/pull/2068), " +"[#2067](https://github.com/adap/flower/pull/2067), " +"[#2069](https://github.com/adap/flower/pull/2069), " +"[#2073](https://github.com/adap/flower/pull/2073), " +"[#2070](https://github.com/adap/flower/pull/2070), " +"[#2074](https://github.com/adap/flower/pull/2074), " +"[#2082](https://github.com/adap/flower/pull/2082), " +"[#2084](https://github.com/adap/flower/pull/2084), " +"[#2093](https://github.com/adap/flower/pull/2093), " +"[#2109](https://github.com/adap/flower/pull/2109), " +"[#2095](https://github.com/adap/flower/pull/2095), " +"[#2140](https://github.com/adap/flower/pull/2140), " +"[#2137](https://github.com/adap/flower/pull/2137), " +"[#2165](https://github.com/adap/flower/pull/2165))" msgstr "" -#: ../../source/ref-changelog.md:495 +#: ../../source/ref-changelog.md:232 msgid "" -"**New FedMedian strategy** " -"([#1461](https://github.com/adap/flower/pull/1461))" +"A new testing infrastructure ensures that new changes stay compatible " +"with existing framework integrations or strategies." msgstr "" -#: ../../source/ref-changelog.md:497 -msgid "" -"The new `FedMedian` strategy implements Federated Median (FedMedian) by " -"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." +#: ../../source/ref-changelog.md:234 +msgid "**Deprecate Python 3.7**" msgstr "" -#: ../../source/ref-changelog.md:499 +#: ../../source/ref-changelog.md:236 msgid "" -"**Log** `Client` **exceptions in Virtual Client Engine** " -"([#1493](https://github.com/adap/flower/pull/1493))" +"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" +" Python 3.7 is now deprecated and will be removed in an upcoming release." msgstr "" -#: ../../source/ref-changelog.md:501 +#: ../../source/ref-changelog.md:238 msgid "" -"All `Client` exceptions happening in the VCE are now logged by default " -"and not just exposed to the configured `Strategy` (via the `failures` " -"argument)." +"**Add new** `FedTrimmedAvg` **strategy** " +"([#1769](https://github.com/adap/flower/pull/1769), " +"[#1853](https://github.com/adap/flower/pull/1853))" msgstr "" -#: ../../source/ref-changelog.md:503 +#: ../../source/ref-changelog.md:240 msgid "" -"**Improve Virtual Client Engine internals** " -"([#1401](https://github.com/adap/flower/pull/1401), " -"[#1453](https://github.com/adap/flower/pull/1453))" +"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " +"2018](https://arxiv.org/abs/1803.01498)." msgstr "" -#: ../../source/ref-changelog.md:505 +#: ../../source/ref-changelog.md:242 msgid "" -"Some internals of the Virtual Client Engine have been revamped. The VCE " -"now uses Ray 2.0 under the hood, the value type of the `client_resources`" -" dictionary changed to `float` to allow fractions of resources to be " -"allocated." +"**Introduce start_driver** " +"([#1697](https://github.com/adap/flower/pull/1697))" msgstr "" -#: ../../source/ref-changelog.md:507 +#: ../../source/ref-changelog.md:244 msgid "" -"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " -"Client Engine**" +"In addition to `start_server` and using the raw Driver API, there is a " +"new `start_driver` function that allows for running `start_server` " +"scripts as a Flower driver with only a single-line code change. Check out" +" the `mt-pytorch` code example to see a working example using " +"`start_driver`." msgstr "" -#: ../../source/ref-changelog.md:509 +#: ../../source/ref-changelog.md:246 msgid "" -"The Virtual Client Engine now has full support for optional `Client` (and" -" `NumPyClient`) methods." +"**Add parameter aggregation to** `mt-pytorch` **code example** " +"([#1785](https://github.com/adap/flower/pull/1785))" msgstr "" -#: ../../source/ref-changelog.md:511 +#: ../../source/ref-changelog.md:248 msgid "" -"**Provide type information to packages using** `flwr` " -"([#1377](https://github.com/adap/flower/pull/1377))" +"The `mt-pytorch` example shows how to aggregate parameters when writing a" +" driver script. The included `driver.py` and `server.py` have been " +"aligned to demonstrate both the low-level way and the high-level way of " +"building server-side logic." msgstr "" -#: ../../source/ref-changelog.md:513 +#: ../../source/ref-changelog.md:250 msgid "" -"The package `flwr` is now bundled with a `py.typed` file indicating that " -"the package is typed. This enables typing support for projects or " -"packages that use `flwr` by enabling them to improve their code using " -"static type checkers like `mypy`." +"**Migrate experimental REST API to Starlette** " +"([2171](https://github.com/adap/flower/pull/2171))" msgstr "" -#: ../../source/ref-changelog.md:515 +#: ../../source/ref-changelog.md:252 msgid "" -"**Updated code example** " -"([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" +"The (experimental) REST API used to be implemented in " +"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" +" use [Starlette](https://www.starlette.io/) directly." msgstr "" -#: ../../source/ref-changelog.md:517 +#: ../../source/ref-changelog.md:254 msgid "" -"The code examples covering scikit-learn and PyTorch Lightning have been " -"updated to work with the latest version of Flower." +"Please note: The REST request-response API is still experimental and will" +" likely change significantly over time." msgstr "" -#: ../../source/ref-changelog.md:519 +#: ../../source/ref-changelog.md:256 msgid "" -"**Updated documentation** " -"([#1355](https://github.com/adap/flower/pull/1355), " -"[#1558](https://github.com/adap/flower/pull/1558), " -"[#1379](https://github.com/adap/flower/pull/1379), " -"[#1380](https://github.com/adap/flower/pull/1380), " -"[#1381](https://github.com/adap/flower/pull/1381), " -"[#1332](https://github.com/adap/flower/pull/1332), " -"[#1391](https://github.com/adap/flower/pull/1391), " -"[#1403](https://github.com/adap/flower/pull/1403), " -"[#1364](https://github.com/adap/flower/pull/1364), " -"[#1409](https://github.com/adap/flower/pull/1409), " -"[#1419](https://github.com/adap/flower/pull/1419), " -"[#1444](https://github.com/adap/flower/pull/1444), " -"[#1448](https://github.com/adap/flower/pull/1448), " -"[#1417](https://github.com/adap/flower/pull/1417), " -"[#1449](https://github.com/adap/flower/pull/1449), " -"[#1465](https://github.com/adap/flower/pull/1465), " -"[#1467](https://github.com/adap/flower/pull/1467))" +"**Introduce experimental gRPC request-response API** " +"([#1867](https://github.com/adap/flower/pull/1867), " +"[#1901](https://github.com/adap/flower/pull/1901))" msgstr "" -#: ../../source/ref-changelog.md:521 +#: ../../source/ref-changelog.md:258 msgid "" -"There have been so many documentation updates that it doesn't even make " -"sense to list them individually." +"In addition to the existing gRPC API (based on bidirectional streaming) " +"and the experimental REST API, there is now a new gRPC API that uses a " +"request-response model to communicate with client nodes." msgstr "" -#: ../../source/ref-changelog.md:523 +#: ../../source/ref-changelog.md:260 msgid "" -"**Restructured documentation** " -"([#1387](https://github.com/adap/flower/pull/1387))" +"Please note: The gRPC request-response API is still experimental and will" +" likely change significantly over time." msgstr "" -#: ../../source/ref-changelog.md:525 +#: ../../source/ref-changelog.md:262 msgid "" -"The documentation has been restructured to make it easier to navigate. " -"This is just the first step in a larger effort to make the Flower " -"documentation the best documentation of any project ever. Stay tuned!" +"**Replace the experimental** `start_client(rest=True)` **with the new** " +"`start_client(transport=\"rest\")` " +"([#1880](https://github.com/adap/flower/pull/1880))" msgstr "" -#: ../../source/ref-changelog.md:527 +#: ../../source/ref-changelog.md:264 msgid "" -"**Open in Colab button** " -"([#1389](https://github.com/adap/flower/pull/1389))" +"The (experimental) `start_client` argument `rest` was deprecated in " +"favour of a new argument `transport`. `start_client(transport=\"rest\")` " +"will yield the same behaviour as `start_client(rest=True)` did before. " +"All code should migrate to the new argument `transport`. The deprecated " +"argument `rest` will be removed in a future release." msgstr "" -#: ../../source/ref-changelog.md:529 +#: ../../source/ref-changelog.md:266 msgid "" -"The four parts of the Flower Federated Learning Tutorial now come with a " -"new `Open in Colab` button. No need to install anything on your local " -"machine, you can now use and learn about Flower in your browser, it's " -"only a single click away." +"**Add a new gRPC option** " +"([#2197](https://github.com/adap/flower/pull/2197))" msgstr "" -#: ../../source/ref-changelog.md:531 +#: ../../source/ref-changelog.md:268 msgid "" -"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," -" [#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475))" +"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" +" option set to 0 by default. This prevents the clients from sending " +"keepalive pings when there is no outstanding stream." msgstr "" -#: ../../source/ref-changelog.md:533 +#: ../../source/ref-changelog.md:270 msgid "" -"The Flower Federated Learning Tutorial has two brand-new parts covering " -"custom strategies (still WIP) and the distinction between `Client` and " -"`NumPyClient`. The existing parts one and two have also been improved " -"(many small changes and fixes)." +"**Improve example notebooks** " +"([#2005](https://github.com/adap/flower/pull/2005))" msgstr "" -#: ../../source/ref-changelog.md:539 -msgid "v1.0.0 (2022-07-28)" +#: ../../source/ref-changelog.md:272 +msgid "There's a new 30min Federated Learning PyTorch tutorial!" msgstr "" -#: ../../source/ref-changelog.md:541 -msgid "Highlights" +#: ../../source/ref-changelog.md:274 +msgid "" +"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " +"[#1873](https://github.com/adap/flower/pull/1873), " +"[#1981](https://github.com/adap/flower/pull/1981), " +"[#1988](https://github.com/adap/flower/pull/1988), " +"[#1984](https://github.com/adap/flower/pull/1984), " +"[#1982](https://github.com/adap/flower/pull/1982), " +"[#2112](https://github.com/adap/flower/pull/2112), " +"[#2144](https://github.com/adap/flower/pull/2144), " +"[#2174](https://github.com/adap/flower/pull/2174), " +"[#2225](https://github.com/adap/flower/pull/2225), " +"[#2183](https://github.com/adap/flower/pull/2183))" msgstr "" -#: ../../source/ref-changelog.md:543 -msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" +#: ../../source/ref-changelog.md:276 +msgid "" +"Many examples have received significant updates, including simplified " +"advanced-tensorflow and advanced-pytorch examples, improved macOS " +"compatibility of TensorFlow examples, and code examples for simulation. A" +" major upgrade is that all code examples now have a `requirements.txt` " +"(in addition to `pyproject.toml`)." msgstr "" -#: ../../source/ref-changelog.md:544 -msgid "All `Client`/`NumPyClient` methods are now optional" +#: ../../source/ref-changelog.md:278 +msgid "" +"**General improvements** " +"([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" msgstr "" -#: ../../source/ref-changelog.md:545 -msgid "Configurable `get_parameters`" +#: ../../source/ref-changelog.md:284 ../../source/ref-changelog.md:348 +#: ../../source/ref-changelog.md:406 ../../source/ref-changelog.md:475 +#: ../../source/ref-changelog.md:537 +msgid "None" msgstr "" -#: ../../source/ref-changelog.md:546 -msgid "" -"Tons of small API cleanups resulting in a more coherent developer " -"experience" +#: ../../source/ref-changelog.md:286 +msgid "v1.4.0 (2023-04-21)" msgstr "" -#: ../../source/ref-changelog.md:550 +#: ../../source/ref-changelog.md:292 msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made Flower 1.0 possible (in reverse [GitHub " -"Contributors](https://github.com/adap/flower/graphs/contributors) order):" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " +"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " +"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " +"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " +"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" msgstr "" -#: ../../source/ref-changelog.md:552 +#: ../../source/ref-changelog.md:296 msgid "" -"[@rtaiello](https://github.com/rtaiello), " -"[@g-pichler](https://github.com/g-pichler), [@rob-" -"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" -"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " -"[@nfnt](https://github.com/nfnt), " -"[@tatiana-s](https://github.com/tatiana-s), " -"[@TParcollet](https://github.com/TParcollet), " -"[@vballoli](https://github.com/vballoli), " -"[@negedng](https://github.com/negedng), " -"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " -"[@hei411](https://github.com/hei411), " -"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " -"[@AmitChaulwar](https://github.com/AmitChaulwar), " -"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" -"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " -"[@lbhm](https://github.com/lbhm), " -"[@sishtiaq](https://github.com/sishtiaq), " -"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" -"/Jueun-Park), [@architjen](https://github.com/architjen), " -"[@PratikGarai](https://github.com/PratikGarai), " -"[@mrinaald](https://github.com/mrinaald), " -"[@zliel](https://github.com/zliel), " -"[@MeiruiJiang](https://github.com/MeiruiJiang), " -"[@sancarlim](https://github.com/sancarlim), " -"[@gubertoli](https://github.com/gubertoli), " -"[@Vingt100](https://github.com/Vingt100), " -"[@MakGulati](https://github.com/MakGulati), " -"[@cozek](https://github.com/cozek), " -"[@jafermarq](https://github.com/jafermarq), " -"[@sisco0](https://github.com/sisco0), " -"[@akhilmathurs](https://github.com/akhilmathurs), " -"[@CanTuerk](https://github.com/CanTuerk), " -"[@mariaboerner1987](https://github.com/mariaboerner1987), " -"[@pedropgusmao](https://github.com/pedropgusmao), " -"[@tanertopal](https://github.com/tanertopal), " -"[@danieljanes](https://github.com/danieljanes)." -msgstr "" - -#: ../../source/ref-changelog.md:556 -msgid "" -"**All arguments must be passed as keyword arguments** " -"([#1338](https://github.com/adap/flower/pull/1338))" +"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " +"example)** ([#1694](https://github.com/adap/flower/pull/1694), " +"[#1709](https://github.com/adap/flower/pull/1709), " +"[#1715](https://github.com/adap/flower/pull/1715), " +"[#1717](https://github.com/adap/flower/pull/1717), " +"[#1763](https://github.com/adap/flower/pull/1763), " +"[#1795](https://github.com/adap/flower/pull/1795))" msgstr "" -#: ../../source/ref-changelog.md:558 +#: ../../source/ref-changelog.md:298 msgid "" -"Pass all arguments as keyword arguments, positional arguments are not " -"longer supported. Code that uses positional arguments (e.g., " -"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " -"for each positional argument (e.g., " -"`start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())`)." +"XGBoost is a tree-based ensemble machine learning algorithm that uses " +"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" +" " +"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," +" and a [code example](https://github.com/adap/flower/tree/main/examples" +"/xgboost-quickstart) that demonstrates the usage of this new strategy in " +"an XGBoost project." msgstr "" -#: ../../source/ref-changelog.md:560 +#: ../../source/ref-changelog.md:300 msgid "" -"**Introduce configuration object** `ServerConfig` **in** `start_server` " -"**and** `start_simulation` " -"([#1317](https://github.com/adap/flower/pull/1317))" +"**Introduce iOS SDK (preview)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" msgstr "" -#: ../../source/ref-changelog.md:562 +#: ../../source/ref-changelog.md:302 msgid "" -"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " -"600.0}`, `start_server` and `start_simulation` now expect a configuration" -" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" -" arguments that as the previous config dict, but it makes writing type-" -"safe code easier and the default parameters values more transparent." +"This is a major update for anyone wanting to implement Federated Learning" +" on iOS mobile devices. We now have a swift iOS SDK present under " +"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" +" that will facilitate greatly the app creating process. To showcase its " +"use, the [iOS " +"example](https://github.com/adap/flower/tree/main/examples/ios) has also " +"been updated!" msgstr "" -#: ../../source/ref-changelog.md:564 +#: ../../source/ref-changelog.md:304 msgid "" -"**Rename built-in strategy parameters for clarity** " -"([#1334](https://github.com/adap/flower/pull/1334))" +"**Introduce new \"What is Federated Learning?\" tutorial** " +"([#1657](https://github.com/adap/flower/pull/1657), " +"[#1721](https://github.com/adap/flower/pull/1721))" msgstr "" -#: ../../source/ref-changelog.md:566 +#: ../../source/ref-changelog.md:306 msgid "" -"The following built-in strategy parameters were renamed to improve " -"readability and consistency with other API's:" -msgstr "" - -#: ../../source/ref-changelog.md:568 -msgid "`fraction_eval` --> `fraction_evaluate`" -msgstr "" - -#: ../../source/ref-changelog.md:569 -msgid "`min_eval_clients` --> `min_evaluate_clients`" -msgstr "" - -#: ../../source/ref-changelog.md:570 -msgid "`eval_fn` --> `evaluate_fn`" +"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" +"what-is-federated-learning.html) in our documentation explains the basics" +" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" +" Learning to start their journey with Flower. Forward it to anyone who's " +"interested in Federated Learning!" msgstr "" -#: ../../source/ref-changelog.md:572 +#: ../../source/ref-changelog.md:308 msgid "" -"**Update default arguments of built-in strategies** " -"([#1278](https://github.com/adap/flower/pull/1278))" +"**Introduce new Flower Baseline: FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679))" msgstr "" -#: ../../source/ref-changelog.md:574 +#: ../../source/ref-changelog.md:310 msgid "" -"All built-in strategies now use `fraction_fit=1.0` and " -"`fraction_evaluate=1.0`, which means they select *all* currently " -"available clients for training and evaluation. Projects that relied on " -"the previous default values can get the previous behaviour by " -"initializing the strategy in the following way:" -msgstr "" - -#: ../../source/ref-changelog.md:576 -msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" +"This new baseline replicates the MNIST+CNN task from the paper [Federated" +" Optimization in Heterogeneous Networks (Li et al., " +"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," +" which aims at making convergence more robust in heterogeneous settings." msgstr "" -#: ../../source/ref-changelog.md:578 +#: ../../source/ref-changelog.md:312 msgid "" -"**Add** `server_round` **to** `Strategy.evaluate` " -"([#1334](https://github.com/adap/flower/pull/1334))" +"**Introduce new Flower Baseline: FedAvg FEMNIST** " +"([#1655](https://github.com/adap/flower/pull/1655))" msgstr "" -#: ../../source/ref-changelog.md:580 +#: ../../source/ref-changelog.md:314 msgid "" -"The `Strategy` method `evaluate` now receives the current round of " -"federated learning/evaluation as the first parameter." +"This new baseline replicates an experiment evaluating the performance of " +"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " +"Benchmark for Federated Settings (Caldas et al., " +"2018)](https://arxiv.org/abs/1812.01097)." msgstr "" -#: ../../source/ref-changelog.md:582 +#: ../../source/ref-changelog.md:316 msgid "" -"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " -"([#1334](https://github.com/adap/flower/pull/1334))" +"**Introduce (experimental) REST API** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" msgstr "" -#: ../../source/ref-changelog.md:584 +#: ../../source/ref-changelog.md:318 msgid "" -"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " -"three parameters: (1) The current round of federated learning/evaluation " -"(`server_round`), (2) the model parameters to evaluate (`parameters`), " -"and (3) a config dictionary (`config`)." +"A new REST API has been introduced as an alternative to the gRPC-based " +"communication stack. In this initial version, the REST API only supports " +"anonymous clients." msgstr "" -#: ../../source/ref-changelog.md:586 +#: ../../source/ref-changelog.md:320 msgid "" -"**Rename** `rnd` **to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" +"Please note: The REST API is still experimental and will likely change " +"significantly over time." msgstr "" -#: ../../source/ref-changelog.md:588 +#: ../../source/ref-changelog.md:322 msgid "" -"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " -"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " -"current round of federated learning/evaluation as their first parameter. " -"To improve reaability and avoid confusion with *random*, this parameter " -"has been renamed from `rnd` to `server_round`." +"**Improve the (experimental) Driver API** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" msgstr "" -#: ../../source/ref-changelog.md:590 +#: ../../source/ref-changelog.md:324 msgid "" -"**Move** `flwr.dataset` **to** `flwr_baselines` " -"([#1273](https://github.com/adap/flower/pull/1273))" -msgstr "" - -#: ../../source/ref-changelog.md:592 -msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." +"The Driver API is still an experimental feature, but this release " +"introduces some major upgrades. One of the main improvements is the " +"introduction of an SQLite database to store server state on disk (instead" +" of in-memory). Another improvement is that tasks (instructions or " +"results) that have been delivered will now be deleted. This greatly " +"improves the memory efficiency of a long-running Flower server." msgstr "" -#: ../../source/ref-changelog.md:594 +#: ../../source/ref-changelog.md:326 msgid "" -"**Remove experimental strategies** " -"([#1280](https://github.com/adap/flower/pull/1280))" +"**Fix spilling issues related to Ray during simulations** " +"([#1698](https://github.com/adap/flower/pull/1698))" msgstr "" -#: ../../source/ref-changelog.md:596 +#: ../../source/ref-changelog.md:328 msgid "" -"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " -"`FedFSv1`)." +"While running long simulations, `ray` was sometimes spilling huge amounts" +" of data that would make the training unable to continue. This is now " +"fixed! 🎉" msgstr "" -#: ../../source/ref-changelog.md:598 +#: ../../source/ref-changelog.md:330 msgid "" -"**Rename** `Weights` **to** `NDArrays` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"**Add new example using** `TabNet` **and Flower** " +"([#1725](https://github.com/adap/flower/pull/1725))" msgstr "" -#: ../../source/ref-changelog.md:600 +#: ../../source/ref-changelog.md:332 msgid "" -"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " -"capture what this type is all about." +"TabNet is a powerful and flexible framework for training machine learning" +" models on tabular data. We now have a federated example using Flower: " +"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" +"/quickstart-tabnet)." msgstr "" -#: ../../source/ref-changelog.md:602 +#: ../../source/ref-changelog.md:334 msgid "" -"**Remove antiquated** `force_final_distributed_eval` **from** " -"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"**Add new how-to guide for monitoring simulations** " +"([#1649](https://github.com/adap/flower/pull/1649))" msgstr "" -#: ../../source/ref-changelog.md:604 +#: ../../source/ref-changelog.md:336 msgid "" -"The `start_server` parameter `force_final_distributed_eval` has long been" -" a historic artefact, in this release it is finally gone for good." +"We now have a documentation guide to help users monitor their performance" +" during simulations." msgstr "" -#: ../../source/ref-changelog.md:606 +#: ../../source/ref-changelog.md:338 msgid "" -"**Make** `get_parameters` **configurable** " -"([#1242](https://github.com/adap/flower/pull/1242))" +"**Add training metrics to** `History` **object during simulations** " +"([#1696](https://github.com/adap/flower/pull/1696))" msgstr "" -#: ../../source/ref-changelog.md:608 +#: ../../source/ref-changelog.md:340 msgid "" -"The `get_parameters` method now accepts a configuration dictionary, just " -"like `get_properties`, `fit`, and `evaluate`." +"The `fit_metrics_aggregation_fn` can be used to aggregate training " +"metrics, but previous releases did not save the results in the `History` " +"object. This is now the case!" msgstr "" -#: ../../source/ref-changelog.md:610 +#: ../../source/ref-changelog.md:342 msgid "" -"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " -"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" +"**General improvements** " +"([#1659](https://github.com/adap/flower/pull/1659), " +"[#1646](https://github.com/adap/flower/pull/1646), " +"[#1647](https://github.com/adap/flower/pull/1647), " +"[#1471](https://github.com/adap/flower/pull/1471), " +"[#1648](https://github.com/adap/flower/pull/1648), " +"[#1651](https://github.com/adap/flower/pull/1651), " +"[#1652](https://github.com/adap/flower/pull/1652), " +"[#1653](https://github.com/adap/flower/pull/1653), " +"[#1659](https://github.com/adap/flower/pull/1659), " +"[#1665](https://github.com/adap/flower/pull/1665), " +"[#1670](https://github.com/adap/flower/pull/1670), " +"[#1672](https://github.com/adap/flower/pull/1672), " +"[#1677](https://github.com/adap/flower/pull/1677), " +"[#1684](https://github.com/adap/flower/pull/1684), " +"[#1683](https://github.com/adap/flower/pull/1683), " +"[#1686](https://github.com/adap/flower/pull/1686), " +"[#1682](https://github.com/adap/flower/pull/1682), " +"[#1685](https://github.com/adap/flower/pull/1685), " +"[#1692](https://github.com/adap/flower/pull/1692), " +"[#1705](https://github.com/adap/flower/pull/1705), " +"[#1708](https://github.com/adap/flower/pull/1708), " +"[#1711](https://github.com/adap/flower/pull/1711), " +"[#1713](https://github.com/adap/flower/pull/1713), " +"[#1714](https://github.com/adap/flower/pull/1714), " +"[#1718](https://github.com/adap/flower/pull/1718), " +"[#1716](https://github.com/adap/flower/pull/1716), " +"[#1723](https://github.com/adap/flower/pull/1723), " +"[#1735](https://github.com/adap/flower/pull/1735), " +"[#1678](https://github.com/adap/flower/pull/1678), " +"[#1750](https://github.com/adap/flower/pull/1750), " +"[#1753](https://github.com/adap/flower/pull/1753), " +"[#1736](https://github.com/adap/flower/pull/1736), " +"[#1766](https://github.com/adap/flower/pull/1766), " +"[#1760](https://github.com/adap/flower/pull/1760), " +"[#1775](https://github.com/adap/flower/pull/1775), " +"[#1776](https://github.com/adap/flower/pull/1776), " +"[#1777](https://github.com/adap/flower/pull/1777), " +"[#1779](https://github.com/adap/flower/pull/1779), " +"[#1784](https://github.com/adap/flower/pull/1784), " +"[#1773](https://github.com/adap/flower/pull/1773), " +"[#1755](https://github.com/adap/flower/pull/1755), " +"[#1789](https://github.com/adap/flower/pull/1789), " +"[#1788](https://github.com/adap/flower/pull/1788), " +"[#1798](https://github.com/adap/flower/pull/1798), " +"[#1799](https://github.com/adap/flower/pull/1799), " +"[#1739](https://github.com/adap/flower/pull/1739), " +"[#1800](https://github.com/adap/flower/pull/1800), " +"[#1804](https://github.com/adap/flower/pull/1804), " +"[#1805](https://github.com/adap/flower/pull/1805))" msgstr "" -#: ../../source/ref-changelog.md:612 -msgid "" -"The `start_simulation` function now accepts a configuration dictionary " -"`config` instead of the `num_rounds` integer. This improves the " -"consistency between `start_simulation` and `start_server` and makes " -"transitioning between the two easier." +#: ../../source/ref-changelog.md:350 +msgid "v1.3.0 (2023-02-06)" msgstr "" -#: ../../source/ref-changelog.md:616 +#: ../../source/ref-changelog.md:356 msgid "" -"**Support Python 3.10** " -"([#1320](https://github.com/adap/flower/pull/1320))" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" msgstr "" -#: ../../source/ref-changelog.md:618 +#: ../../source/ref-changelog.md:360 msgid "" -"The previous Flower release introduced experimental support for Python " -"3.10, this release declares Python 3.10 support as stable." +"**Add support for** `workload_id` **and** `group_id` **in Driver API** " +"([#1595](https://github.com/adap/flower/pull/1595))" msgstr "" -#: ../../source/ref-changelog.md:620 +#: ../../source/ref-changelog.md:362 msgid "" -"**Make all** `Client` **and** `NumPyClient` **methods optional** " -"([#1260](https://github.com/adap/flower/pull/1260), " -"[#1277](https://github.com/adap/flower/pull/1277))" +"The (experimental) Driver API now supports a `workload_id` that can be " +"used to identify which workload a task belongs to. It also supports a new" +" `group_id` that can be used, for example, to indicate the current " +"training round. Both the `workload_id` and `group_id` enable client nodes" +" to decide whether they want to handle a task or not." msgstr "" -#: ../../source/ref-changelog.md:622 +#: ../../source/ref-changelog.md:364 msgid "" -"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " -"`fit`, and `evaluate` are all optional. This enables writing clients that" -" implement, for example, only `fit`, but no other method. No need to " -"implement `evaluate` when using centralized evaluation!" +"**Make Driver API and Fleet API address configurable** " +"([#1637](https://github.com/adap/flower/pull/1637))" msgstr "" -#: ../../source/ref-changelog.md:624 +#: ../../source/ref-changelog.md:366 msgid "" -"**Enable passing a** `Server` **instance to** `start_simulation` " -"([#1281](https://github.com/adap/flower/pull/1281))" +"The (experimental) long-running Flower server (Driver API and Fleet API) " +"can now configure the server address of both Driver API (via `--driver-" +"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" msgstr "" -#: ../../source/ref-changelog.md:626 +#: ../../source/ref-changelog.md:368 msgid "" -"Similar to `start_server`, `start_simulation` now accepts a full `Server`" -" instance. This enables users to heavily customize the execution of " -"eperiments and opens the door to running, for example, async FL using the" -" Virtual Client Engine." +"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " +"\"0.0.0.0:8086\"`" msgstr "" -#: ../../source/ref-changelog.md:628 -msgid "" -"**Update code examples** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" +#: ../../source/ref-changelog.md:370 +msgid "Both IPv4 and IPv6 addresses are supported." msgstr "" -#: ../../source/ref-changelog.md:630 +#: ../../source/ref-changelog.md:372 msgid "" -"Many code examples received small or even large maintenance updates, " -"among them are" -msgstr "" - -#: ../../source/ref-changelog.md:632 -msgid "`scikit-learn`" -msgstr "" - -#: ../../source/ref-changelog.md:633 -msgid "`simulation_pytorch`" -msgstr "" - -#: ../../source/ref-changelog.md:634 -msgid "`quickstart_pytorch`" -msgstr "" - -#: ../../source/ref-changelog.md:635 -msgid "`quickstart_simulation`" -msgstr "" - -#: ../../source/ref-changelog.md:636 -msgid "`quickstart_tensorflow`" -msgstr "" - -#: ../../source/ref-changelog.md:637 -msgid "`advanced_tensorflow`" +"**Add new example of Federated Learning using fastai and Flower** " +"([#1598](https://github.com/adap/flower/pull/1598))" msgstr "" -#: ../../source/ref-changelog.md:639 +#: ../../source/ref-changelog.md:374 msgid "" -"**Remove the obsolete simulation example** " -"([#1328](https://github.com/adap/flower/pull/1328))" +"A new code example (`quickstart-fastai`) demonstrates federated learning " +"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " +"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" +"/quickstart-fastai)." msgstr "" -#: ../../source/ref-changelog.md:641 +#: ../../source/ref-changelog.md:376 msgid "" -"Removes the obsolete `simulation` example and renames " -"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " -"naming of `simulation_pytorch`" +"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" +" versions of Android** " +"([#1603](https://github.com/adap/flower/pull/1603))" msgstr "" -#: ../../source/ref-changelog.md:643 +#: ../../source/ref-changelog.md:378 msgid "" -"**Update documentation** " -"([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), " -"[#1251](https://github.com/adap/flower/pull/1251), " -"[#1257](https://github.com/adap/flower/pull/1257), " -"[#1267](https://github.com/adap/flower/pull/1267), " -"[#1268](https://github.com/adap/flower/pull/1268), " -"[#1300](https://github.com/adap/flower/pull/1300), " -"[#1304](https://github.com/adap/flower/pull/1304), " -"[#1305](https://github.com/adap/flower/pull/1305), " -"[#1307](https://github.com/adap/flower/pull/1307))" +"The Android code example has received a substantial update: the project " +"is compatible with Flower 1.0 (and later), the UI received a full " +"refresh, and the project is updated to be compatible with newer Android " +"tooling." msgstr "" -#: ../../source/ref-changelog.md:645 +#: ../../source/ref-changelog.md:380 msgid "" -"One substantial documentation update fixes multiple smaller rendering " -"issues, makes titles more succinct to improve navigation, removes a " -"deprecated library, updates documentation dependencies, includes the " -"`flwr.common` module in the API reference, includes support for markdown-" -"based documentation, migrates the changelog from `.rst` to `.md`, and " -"fixes a number of smaller details!" -msgstr "" - -#: ../../source/ref-changelog.md:647 ../../source/ref-changelog.md:702 -#: ../../source/ref-changelog.md:771 ../../source/ref-changelog.md:810 -msgid "**Minor updates**" +"**Add new `FedProx` strategy** " +"([#1619](https://github.com/adap/flower/pull/1619))" msgstr "" -#: ../../source/ref-changelog.md:649 +#: ../../source/ref-changelog.md:382 msgid "" -"Add round number to fit and evaluate log messages " -"([#1266](https://github.com/adap/flower/pull/1266))" +"This " +"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" +" is almost identical to " +"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," +" but helps users replicate what is described in this " +"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " +"parameter called `proximal_mu` to regularize the local models with " +"respect to the global models." msgstr "" -#: ../../source/ref-changelog.md:650 +#: ../../source/ref-changelog.md:384 msgid "" -"Add secure gRPC connection to the `advanced_tensorflow` code example " -"([#847](https://github.com/adap/flower/pull/847))" +"**Add new metrics to telemetry events** " +"([#1640](https://github.com/adap/flower/pull/1640))" msgstr "" -#: ../../source/ref-changelog.md:651 +#: ../../source/ref-changelog.md:386 msgid "" -"Update developer tooling " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" +"An updated event structure allows, for example, the clustering of events " +"within the same workload." msgstr "" -#: ../../source/ref-changelog.md:652 +#: ../../source/ref-changelog.md:388 msgid "" -"Rename ProtoBuf messages to improve consistency " -"([#1214](https://github.com/adap/flower/pull/1214), " -"[#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" -msgstr "" - -#: ../../source/ref-changelog.md:654 -msgid "v0.19.0 (2022-05-18)" +"**Add new custom strategy tutorial section** " +"[#1623](https://github.com/adap/flower/pull/1623)" msgstr "" -#: ../../source/ref-changelog.md:658 +#: ../../source/ref-changelog.md:390 msgid "" -"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " -"([#919](https://github.com/adap/flower/pull/919), " -"[#1127](https://github.com/adap/flower/pull/1127), " -"[#914](https://github.com/adap/flower/pull/914))" +"The Flower tutorial now has a new section that covers implementing a " +"custom strategy from scratch: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" msgstr "" -#: ../../source/ref-changelog.md:660 +#: ../../source/ref-changelog.md:392 msgid "" -"The first preview release of Flower Baselines has arrived! We're " -"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " -"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " -"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " -"With this first preview release we're also inviting the community to " -"[contribute their own baselines](https://flower.ai/docs/contributing-" -"baselines.html)." +"**Add new custom serialization tutorial section** " +"([#1622](https://github.com/adap/flower/pull/1622))" msgstr "" -#: ../../source/ref-changelog.md:662 +#: ../../source/ref-changelog.md:394 msgid "" -"**C++ client SDK (preview) and code example** " -"([#1111](https://github.com/adap/flower/pull/1111))" +"The Flower tutorial now has a new section that covers custom " +"serialization: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-customize-the-client-pytorch.ipynb)" msgstr "" -#: ../../source/ref-changelog.md:664 +#: ../../source/ref-changelog.md:396 msgid "" -"Preview support for Flower clients written in C++. The C++ preview " -"includes a Flower client SDK and a quickstart code example that " -"demonstrates a simple C++ client using the SDK." +"**General improvements** " +"([#1638](https://github.com/adap/flower/pull/1638), " +"[#1634](https://github.com/adap/flower/pull/1634), " +"[#1636](https://github.com/adap/flower/pull/1636), " +"[#1635](https://github.com/adap/flower/pull/1635), " +"[#1633](https://github.com/adap/flower/pull/1633), " +"[#1632](https://github.com/adap/flower/pull/1632), " +"[#1631](https://github.com/adap/flower/pull/1631), " +"[#1630](https://github.com/adap/flower/pull/1630), " +"[#1627](https://github.com/adap/flower/pull/1627), " +"[#1593](https://github.com/adap/flower/pull/1593), " +"[#1616](https://github.com/adap/flower/pull/1616), " +"[#1615](https://github.com/adap/flower/pull/1615), " +"[#1607](https://github.com/adap/flower/pull/1607), " +"[#1609](https://github.com/adap/flower/pull/1609), " +"[#1608](https://github.com/adap/flower/pull/1608), " +"[#1603](https://github.com/adap/flower/pull/1603), " +"[#1590](https://github.com/adap/flower/pull/1590), " +"[#1580](https://github.com/adap/flower/pull/1580), " +"[#1599](https://github.com/adap/flower/pull/1599), " +"[#1600](https://github.com/adap/flower/pull/1600), " +"[#1601](https://github.com/adap/flower/pull/1601), " +"[#1597](https://github.com/adap/flower/pull/1597), " +"[#1595](https://github.com/adap/flower/pull/1595), " +"[#1591](https://github.com/adap/flower/pull/1591), " +"[#1588](https://github.com/adap/flower/pull/1588), " +"[#1589](https://github.com/adap/flower/pull/1589), " +"[#1587](https://github.com/adap/flower/pull/1587), " +"[#1573](https://github.com/adap/flower/pull/1573), " +"[#1581](https://github.com/adap/flower/pull/1581), " +"[#1578](https://github.com/adap/flower/pull/1578), " +"[#1574](https://github.com/adap/flower/pull/1574), " +"[#1572](https://github.com/adap/flower/pull/1572), " +"[#1586](https://github.com/adap/flower/pull/1586))" msgstr "" -#: ../../source/ref-changelog.md:666 +#: ../../source/ref-changelog.md:400 msgid "" -"**Add experimental support for Python 3.10 and Python 3.11** " -"([#1135](https://github.com/adap/flower/pull/1135))" +"**Updated documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" msgstr "" -#: ../../source/ref-changelog.md:668 +#: ../../source/ref-changelog.md:402 ../../source/ref-changelog.md:469 msgid "" -"Python 3.10 is the latest stable release of Python and Python 3.11 is due" -" to be released in October. This Flower release adds experimental support" -" for both Python versions." +"As usual, the documentation has improved quite a bit. It is another step " +"in our effort to make the Flower documentation the best documentation of " +"any project. Stay tuned and as always, feel free to provide feedback!" msgstr "" -#: ../../source/ref-changelog.md:670 -msgid "" -"**Aggregate custom metrics through user-provided functions** " -"([#1144](https://github.com/adap/flower/pull/1144))" +#: ../../source/ref-changelog.md:408 +msgid "v1.2.0 (2023-01-13)" msgstr "" -#: ../../source/ref-changelog.md:672 +#: ../../source/ref-changelog.md:414 msgid "" -"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" -" customize the strategy. Built-in strategies support two new arguments, " -"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " -"allow passing custom metric aggregation functions." +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." +" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" msgstr "" -#: ../../source/ref-changelog.md:674 +#: ../../source/ref-changelog.md:418 msgid "" -"**User-configurable round timeout** " -"([#1162](https://github.com/adap/flower/pull/1162))" +"**Introduce new Flower Baseline: FedAvg MNIST** " +"([#1497](https://github.com/adap/flower/pull/1497), " +"[#1552](https://github.com/adap/flower/pull/1552))" msgstr "" -#: ../../source/ref-changelog.md:676 +#: ../../source/ref-changelog.md:420 msgid "" -"A new configuration value allows the round timeout to be set for " -"`start_server` and `start_simulation`. If the `config` dictionary " -"contains a `round_timeout` key (with a `float` value in seconds), the " -"server will wait *at least* `round_timeout` seconds before it closes the " -"connection." +"Over the coming weeks, we will be releasing a number of new reference " +"implementations useful especially to FL newcomers. They will typically " +"revisit well known papers from the literature, and be suitable for " +"integration in your own application or for experimentation, in order to " +"deepen your knowledge of FL in general. Today's release is the first in " +"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" +"pack-fedavg-mnist-cnn/)" msgstr "" -#: ../../source/ref-changelog.md:678 +#: ../../source/ref-changelog.md:422 msgid "" -"**Enable both federated evaluation and centralized evaluation to be used " -"at the same time in all built-in strategies** " -"([#1091](https://github.com/adap/flower/pull/1091))" +"**Improve GPU support in simulations** " +"([#1555](https://github.com/adap/flower/pull/1555))" msgstr "" -#: ../../source/ref-changelog.md:680 +#: ../../source/ref-changelog.md:424 msgid "" -"Built-in strategies can now perform both federated evaluation (i.e., " -"client-side) and centralized evaluation (i.e., server-side) in the same " -"round. Federated evaluation can be disabled by setting `fraction_eval` to" -" `0.0`." +"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" +" to improve GPU support. The update includes some of the hard-earned " +"lessons from scaling simulations in GPU cluster environments. New " +"defaults make running GPU-based simulations substantially more robust." msgstr "" -#: ../../source/ref-changelog.md:682 +#: ../../source/ref-changelog.md:426 msgid "" -"**Two new Jupyter Notebook tutorials** " -"([#1141](https://github.com/adap/flower/pull/1141))" +"**Improve GPU support in Jupyter Notebook tutorials** " +"([#1527](https://github.com/adap/flower/pull/1527), " +"[#1558](https://github.com/adap/flower/pull/1558))" msgstr "" -#: ../../source/ref-changelog.md:684 +#: ../../source/ref-changelog.md:428 msgid "" -"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " -"basic and intermediate Flower features:" +"Some users reported that Jupyter Notebooks have not always been easy to " +"use on GPU instances. We listened and made improvements to all of our " +"Jupyter notebooks! Check out the updated notebooks here:" msgstr "" -#: ../../source/ref-changelog.md:686 +#: ../../source/ref-changelog.md:430 msgid "" -"*An Introduction to Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" -"-Intro-to-FL-PyTorch.ipynb)" +"[An Introduction to Federated Learning](https://flower.ai/docs/framework" +"/tutorial-get-started-with-flower-pytorch.html)" msgstr "" -#: ../../source/ref-changelog.md:688 +#: ../../source/ref-changelog.md:431 msgid "" -"*Using Strategies in Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" -"-Strategies-in-FL-PyTorch.ipynb)" +"[Strategies in Federated Learning](https://flower.ai/docs/framework" +"/tutorial-use-a-federated-learning-strategy-pytorch.html)" msgstr "" -#: ../../source/ref-changelog.md:690 +#: ../../source/ref-changelog.md:432 msgid "" -"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " -"([#1076](https://github.com/adap/flower/pull/1076))" +"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" +"-strategy-from-scratch-pytorch.html)" msgstr "" -#: ../../source/ref-changelog.md:692 +#: ../../source/ref-changelog.md:433 msgid "" -"The new `FedAvgM` strategy implements Federated Averaging with Server " -"Momentum \\[Hsu et al., 2019\\]." +"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" +"customize-the-client-pytorch.html)" msgstr "" -#: ../../source/ref-changelog.md:694 +#: ../../source/ref-changelog.md:435 msgid "" -"**New advanced PyTorch code example** " -"([#1007](https://github.com/adap/flower/pull/1007))" +"**Introduce optional telemetry** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" msgstr "" -#: ../../source/ref-changelog.md:696 +#: ../../source/ref-changelog.md:437 msgid "" -"A new code example (`advanced_pytorch`) demonstrates advanced Flower " -"concepts with PyTorch." +"After a [request for " +"feedback](https://github.com/adap/flower/issues/1534) from the community," +" the Flower open-source project introduces optional collection of " +"*anonymous* usage metrics to make well-informed decisions to improve " +"Flower. Doing this enables the Flower team to understand how Flower is " +"used and what challenges users might face." msgstr "" -#: ../../source/ref-changelog.md:698 +#: ../../source/ref-changelog.md:439 msgid "" -"**New JAX code example** " -"([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143))" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users who do not want to share anonymous usage metrics. " +"[Read more.](https://flower.ai/docs/telemetry.html)." msgstr "" -#: ../../source/ref-changelog.md:700 +#: ../../source/ref-changelog.md:441 msgid "" -"A new code example (`jax_from_centralized_to_federated`) shows federated " -"learning with JAX and Flower." +"**Introduce (experimental) Driver API** " +"([#1520](https://github.com/adap/flower/pull/1520), " +"[#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" msgstr "" -#: ../../source/ref-changelog.md:704 +#: ../../source/ref-changelog.md:443 msgid "" -"New option to keep Ray running if Ray was already initialized in " -"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" +"Flower now has a new (experimental) Driver API which will enable fully " +"programmable, async, and multi-tenant Federated Learning and Federated " +"Analytics applications. Phew, that's a lot! Going forward, the Driver API" +" will be the abstraction that many upcoming features will be built on - " +"and you can start building those things now, too." msgstr "" -#: ../../source/ref-changelog.md:705 +#: ../../source/ref-changelog.md:445 msgid "" -"Add support for custom `ClientManager` as a `start_simulation` parameter " -"([#1171](https://github.com/adap/flower/pull/1171))" +"The Driver API also enables a new execution mode in which the server runs" +" indefinitely. Multiple individual workloads can run concurrently and " +"start and stop their execution independent of the server. This is " +"especially useful for users who want to deploy Flower in production." msgstr "" -#: ../../source/ref-changelog.md:706 +#: ../../source/ref-changelog.md:447 msgid "" -"New documentation for [implementing " -"strategies](https://flower.ai/docs/framework/how-to-implement-" -"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " -"[#1175](https://github.com/adap/flower/pull/1175))" +"To learn more, check out the `mt-pytorch` code example. We look forward " +"to you feedback!" msgstr "" -#: ../../source/ref-changelog.md:707 +#: ../../source/ref-changelog.md:449 msgid "" -"New mobile-friendly documentation theme " -"([#1174](https://github.com/adap/flower/pull/1174))" +"Please note: *The Driver API is still experimental and will likely change" +" significantly over time.*" msgstr "" -#: ../../source/ref-changelog.md:708 +#: ../../source/ref-changelog.md:451 msgid "" -"Limit version range for (optional) `ray` dependency to include only " -"compatible releases (`>=1.9.2,<1.12.0`) " -"([#1205](https://github.com/adap/flower/pull/1205))" +"**Add new Federated Analytics with Pandas example** " +"([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535))" msgstr "" -#: ../../source/ref-changelog.md:712 +#: ../../source/ref-changelog.md:453 msgid "" -"**Remove deprecated support for Python 3.6** " -"([#871](https://github.com/adap/flower/pull/871))" +"A new code example (`quickstart-pandas`) demonstrates federated analytics" +" with Pandas and Flower. You can find it here: [quickstart-" +"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" +"pandas)." msgstr "" -#: ../../source/ref-changelog.md:713 +#: ../../source/ref-changelog.md:455 msgid "" -"**Remove deprecated KerasClient** " -"([#857](https://github.com/adap/flower/pull/857))" +"**Add new strategies: Krum and MultiKrum** " +"([#1481](https://github.com/adap/flower/pull/1481))" msgstr "" -#: ../../source/ref-changelog.md:714 +#: ../../source/ref-changelog.md:457 msgid "" -"**Remove deprecated no-op extra installs** " -"([#973](https://github.com/adap/flower/pull/973))" +"Edoardo, a computer science student at the Sapienza University of Rome, " +"contributed a new `Krum` strategy that enables users to easily use Krum " +"and MultiKrum in their workloads." msgstr "" -#: ../../source/ref-changelog.md:715 +#: ../../source/ref-changelog.md:459 msgid "" -"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " -"([#869](https://github.com/adap/flower/pull/869))" +"**Update C++ example to be compatible with Flower v1.2.0** " +"([#1495](https://github.com/adap/flower/pull/1495))" msgstr "" -#: ../../source/ref-changelog.md:716 +#: ../../source/ref-changelog.md:461 msgid "" -"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " -"([#1107](https://github.com/adap/flower/pull/1107))" +"The C++ code example has received a substantial update to make it " +"compatible with the latest version of Flower." msgstr "" -#: ../../source/ref-changelog.md:717 +#: ../../source/ref-changelog.md:463 msgid "" -"**Remove deprecated DefaultStrategy strategy** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"**General improvements** " +"([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github.com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" msgstr "" -#: ../../source/ref-changelog.md:718 +#: ../../source/ref-changelog.md:467 msgid "" -"**Remove deprecated support for eval_fn accuracy return value** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"**Updated documentation** " +"([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" msgstr "" -#: ../../source/ref-changelog.md:719 +#: ../../source/ref-changelog.md:471 msgid "" -"**Remove deprecated support for passing initial parameters as NumPy " -"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" -msgstr "" - -#: ../../source/ref-changelog.md:721 -msgid "v0.18.0 (2022-02-28)" +"One highlight is the new [first time contributor " +"guide](https://flower.ai/docs/first-time-contributors.html): if you've " +"never contributed on GitHub before, this is the perfect place to start!" msgstr "" -#: ../../source/ref-changelog.md:725 -msgid "" -"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " -"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " -"[#872](https://github.com/adap/flower/pull/872), " -"[#833](https://github.com/adap/flower/pull/833), " -"[#1036](https://github.com/adap/flower/pull/1036))" +#: ../../source/ref-changelog.md:477 +msgid "v1.1.0 (2022-10-31)" msgstr "" -#: ../../source/ref-changelog.md:727 +#: ../../source/ref-changelog.md:481 msgid "" -"Simulations (using the Virtual Client Engine through `start_simulation`) " -"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " -"installing Flower with the `simulation` extra (`pip install " -"flwr[simulation]`)." +"We would like to give our **special thanks** to all the contributors who " +"made the new version of Flower possible (in `git shortlog` order):" msgstr "" -#: ../../source/ref-changelog.md:729 +#: ../../source/ref-changelog.md:483 msgid "" -"**New Jupyter Notebook code example** " -"([#833](https://github.com/adap/flower/pull/833))" +"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " +"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " +"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " +"`danielnugraha`, `edogab33`" msgstr "" -#: ../../source/ref-changelog.md:731 +#: ../../source/ref-changelog.md:487 msgid "" -"A new code example (`quickstart_simulation`) demonstrates Flower " -"simulations using the Virtual Client Engine through Jupyter Notebook " -"(incl. Google Colab)." +"**Introduce Differential Privacy wrappers (preview)** " +"([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" msgstr "" -#: ../../source/ref-changelog.md:733 +#: ../../source/ref-changelog.md:489 msgid "" -"**Client properties (feature preview)** " -"([#795](https://github.com/adap/flower/pull/795))" +"The first (experimental) preview of pluggable Differential Privacy " +"wrappers enables easy configuration and usage of differential privacy " +"(DP). The pluggable DP wrappers enable framework-agnostic **and** " +"strategy-agnostic usage of both client-side DP and server-side DP. Head " +"over to the Flower docs, a new explainer goes into more detail." msgstr "" -#: ../../source/ref-changelog.md:735 +#: ../../source/ref-changelog.md:491 msgid "" -"Clients can implement a new method `get_properties` to enable server-side" -" strategies to query client properties." +"**New iOS CoreML code example** " +"([#1289](https://github.com/adap/flower/pull/1289))" msgstr "" -#: ../../source/ref-changelog.md:737 +#: ../../source/ref-changelog.md:493 msgid "" -"**Experimental Android support with TFLite** " -"([#865](https://github.com/adap/flower/pull/865))" +"Flower goes iOS! A massive new code example shows how Flower clients can " +"be built for iOS. The code example contains both Flower iOS SDK " +"components that can be used for many tasks, and one task example running " +"on CoreML." msgstr "" -#: ../../source/ref-changelog.md:739 +#: ../../source/ref-changelog.md:495 msgid "" -"Android support has finally arrived in `main`! Flower is both client-" -"agnostic and framework-agnostic by design. One can integrate arbitrary " -"client platforms and with this release, using Flower on Android has " -"become a lot easier." +"**New FedMedian strategy** " +"([#1461](https://github.com/adap/flower/pull/1461))" msgstr "" -#: ../../source/ref-changelog.md:741 +#: ../../source/ref-changelog.md:497 msgid "" -"The example uses TFLite on the client side, along with a new " -"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " -"still experimental, but they are a first step towards a fully-fledged " -"Android SDK and a unified `FedAvg` implementation that integrated the new" -" functionality from `FedAvgAndroid`." +"The new `FedMedian` strategy implements Federated Median (FedMedian) by " +"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." msgstr "" -#: ../../source/ref-changelog.md:743 +#: ../../source/ref-changelog.md:499 msgid "" -"**Make gRPC keepalive time user-configurable and decrease default " -"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" +"**Log** `Client` **exceptions in Virtual Client Engine** " +"([#1493](https://github.com/adap/flower/pull/1493))" msgstr "" -#: ../../source/ref-changelog.md:745 +#: ../../source/ref-changelog.md:501 msgid "" -"The default gRPC keepalive time has been reduced to increase the " -"compatibility of Flower with more cloud environments (for example, " -"Microsoft Azure). Users can configure the keepalive time to customize the" -" gRPC stack based on specific requirements." +"All `Client` exceptions happening in the VCE are now logged by default " +"and not just exposed to the configured `Strategy` (via the `failures` " +"argument)." msgstr "" -#: ../../source/ref-changelog.md:747 +#: ../../source/ref-changelog.md:503 msgid "" -"**New differential privacy example using Opacus and PyTorch** " -"([#805](https://github.com/adap/flower/pull/805))" +"**Improve Virtual Client Engine internals** " +"([#1401](https://github.com/adap/flower/pull/1401), " +"[#1453](https://github.com/adap/flower/pull/1453))" msgstr "" -#: ../../source/ref-changelog.md:749 +#: ../../source/ref-changelog.md:505 msgid "" -"A new code example (`opacus`) demonstrates differentially-private " -"federated learning with Opacus, PyTorch, and Flower." +"Some internals of the Virtual Client Engine have been revamped. The VCE " +"now uses Ray 2.0 under the hood, the value type of the `client_resources`" +" dictionary changed to `float` to allow fractions of resources to be " +"allocated." msgstr "" -#: ../../source/ref-changelog.md:751 +#: ../../source/ref-changelog.md:507 msgid "" -"**New Hugging Face Transformers code example** " -"([#863](https://github.com/adap/flower/pull/863))" +"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " +"Client Engine**" msgstr "" -#: ../../source/ref-changelog.md:753 +#: ../../source/ref-changelog.md:509 msgid "" -"A new code example (`quickstart_huggingface`) demonstrates usage of " -"Hugging Face Transformers with Flower." +"The Virtual Client Engine now has full support for optional `Client` (and" +" `NumPyClient`) methods." msgstr "" -#: ../../source/ref-changelog.md:755 +#: ../../source/ref-changelog.md:511 msgid "" -"**New MLCube code example** " -"([#779](https://github.com/adap/flower/pull/779), " -"[#1034](https://github.com/adap/flower/pull/1034), " -"[#1065](https://github.com/adap/flower/pull/1065), " -"[#1090](https://github.com/adap/flower/pull/1090))" +"**Provide type information to packages using** `flwr` " +"([#1377](https://github.com/adap/flower/pull/1377))" msgstr "" -#: ../../source/ref-changelog.md:757 +#: ../../source/ref-changelog.md:513 msgid "" -"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " -"with Flower." +"The package `flwr` is now bundled with a `py.typed` file indicating that " +"the package is typed. This enables typing support for projects or " +"packages that use `flwr` by enabling them to improve their code using " +"static type checkers like `mypy`." msgstr "" -#: ../../source/ref-changelog.md:759 +#: ../../source/ref-changelog.md:515 msgid "" -"**SSL-enabled server and client** " -"([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" +"**Updated code example** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" msgstr "" -#: ../../source/ref-changelog.md:761 +#: ../../source/ref-changelog.md:517 msgid "" -"SSL enables secure encrypted connections between clients and servers. " -"This release open-sources the Flower secure gRPC implementation to make " -"encrypted communication channels accessible to all Flower users." +"The code examples covering scikit-learn and PyTorch Lightning have been " +"updated to work with the latest version of Flower." msgstr "" -#: ../../source/ref-changelog.md:763 +#: ../../source/ref-changelog.md:519 msgid "" -"**Updated** `FedAdam` **and** `FedYogi` **strategies** " -"([#885](https://github.com/adap/flower/pull/885), " -"[#895](https://github.com/adap/flower/pull/895))" +"**Updated documentation** " +"([#1355](https://github.com/adap/flower/pull/1355), " +"[#1558](https://github.com/adap/flower/pull/1558), " +"[#1379](https://github.com/adap/flower/pull/1379), " +"[#1380](https://github.com/adap/flower/pull/1380), " +"[#1381](https://github.com/adap/flower/pull/1381), " +"[#1332](https://github.com/adap/flower/pull/1332), " +"[#1391](https://github.com/adap/flower/pull/1391), " +"[#1403](https://github.com/adap/flower/pull/1403), " +"[#1364](https://github.com/adap/flower/pull/1364), " +"[#1409](https://github.com/adap/flower/pull/1409), " +"[#1419](https://github.com/adap/flower/pull/1419), " +"[#1444](https://github.com/adap/flower/pull/1444), " +"[#1448](https://github.com/adap/flower/pull/1448), " +"[#1417](https://github.com/adap/flower/pull/1417), " +"[#1449](https://github.com/adap/flower/pull/1449), " +"[#1465](https://github.com/adap/flower/pull/1465), " +"[#1467](https://github.com/adap/flower/pull/1467))" msgstr "" -#: ../../source/ref-changelog.md:765 +#: ../../source/ref-changelog.md:521 msgid "" -"`FedAdam` and `FedAdam` match the latest version of the Adaptive " -"Federated Optimization paper." +"There have been so many documentation updates that it doesn't even make " +"sense to list them individually." msgstr "" -#: ../../source/ref-changelog.md:767 +#: ../../source/ref-changelog.md:523 msgid "" -"**Initialize** `start_simulation` **with a list of client IDs** " -"([#860](https://github.com/adap/flower/pull/860))" +"**Restructured documentation** " +"([#1387](https://github.com/adap/flower/pull/1387))" msgstr "" -#: ../../source/ref-changelog.md:769 +#: ../../source/ref-changelog.md:525 msgid "" -"`start_simulation` can now be called with a list of client IDs " -"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " -"`client_fn` whenever a client needs to be initialized, which can make it " -"easier to load data partitions that are not accessible through `int` " -"identifiers." +"The documentation has been restructured to make it easier to navigate. " +"This is just the first step in a larger effort to make the Flower " +"documentation the best documentation of any project ever. Stay tuned!" msgstr "" -#: ../../source/ref-changelog.md:773 +#: ../../source/ref-changelog.md:527 msgid "" -"Update `num_examples` calculation in PyTorch code examples in " -"([#909](https://github.com/adap/flower/pull/909))" +"**Open in Colab button** " +"([#1389](https://github.com/adap/flower/pull/1389))" msgstr "" -#: ../../source/ref-changelog.md:774 +#: ../../source/ref-changelog.md:529 msgid "" -"Expose Flower version through `flwr.__version__` " -"([#952](https://github.com/adap/flower/pull/952))" +"The four parts of the Flower Federated Learning Tutorial now come with a " +"new `Open in Colab` button. No need to install anything on your local " +"machine, you can now use and learn about Flower in your browser, it's " +"only a single click away." msgstr "" -#: ../../source/ref-changelog.md:775 +#: ../../source/ref-changelog.md:531 msgid "" -"`start_server` in `app.py` now returns a `History` object containing " -"metrics from training ([#974](https://github.com/adap/flower/pull/974))" +"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" msgstr "" -#: ../../source/ref-changelog.md:776 +#: ../../source/ref-changelog.md:533 msgid "" -"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " -"([#978](https://github.com/adap/flower/pull/978))" +"The Flower Federated Learning Tutorial has two brand-new parts covering " +"custom strategies (still WIP) and the distinction between `Client` and " +"`NumPyClient`. The existing parts one and two have also been improved " +"(many small changes and fixes)." msgstr "" -#: ../../source/ref-changelog.md:777 -msgid "" -"Increase sleep time after server start to three seconds in all code " -"examples ([#1086](https://github.com/adap/flower/pull/1086))" +#: ../../source/ref-changelog.md:539 +msgid "v1.0.0 (2022-07-28)" msgstr "" -#: ../../source/ref-changelog.md:778 -msgid "" -"Added a new FAQ section to the documentation " -"([#948](https://github.com/adap/flower/pull/948))" +#: ../../source/ref-changelog.md:541 +msgid "Highlights" msgstr "" -#: ../../source/ref-changelog.md:779 -msgid "" -"And many more under-the-hood changes, library updates, documentation " -"changes, and tooling improvements!" +#: ../../source/ref-changelog.md:543 +msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" msgstr "" -#: ../../source/ref-changelog.md:783 -msgid "" -"**Removed** `flwr_example` **and** `flwr_experimental` **from release " -"build** ([#869](https://github.com/adap/flower/pull/869))" +#: ../../source/ref-changelog.md:544 +msgid "All `Client`/`NumPyClient` methods are now optional" msgstr "" -#: ../../source/ref-changelog.md:785 -msgid "" -"The packages `flwr_example` and `flwr_experimental` have been deprecated " -"since Flower 0.12.0 and they are not longer included in Flower release " -"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" -"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " -"an upcoming release." +#: ../../source/ref-changelog.md:545 +msgid "Configurable `get_parameters`" msgstr "" -#: ../../source/ref-changelog.md:787 -msgid "v0.17.0 (2021-09-24)" +#: ../../source/ref-changelog.md:546 +msgid "" +"Tons of small API cleanups resulting in a more coherent developer " +"experience" msgstr "" -#: ../../source/ref-changelog.md:791 +#: ../../source/ref-changelog.md:550 msgid "" -"**Experimental virtual client engine** " -"([#781](https://github.com/adap/flower/pull/781) " -"[#790](https://github.com/adap/flower/pull/790) " -"[#791](https://github.com/adap/flower/pull/791))" +"We would like to give our **special thanks** to all the contributors who " +"made Flower 1.0 possible (in reverse [GitHub " +"Contributors](https://github.com/adap/flower/graphs/contributors) order):" msgstr "" -#: ../../source/ref-changelog.md:793 +#: ../../source/ref-changelog.md:552 msgid "" -"One of Flower's goals is to enable research at scale. This release " -"enables a first (experimental) peek at a major new feature, codenamed the" -" virtual client engine. Virtual clients enable simulations that scale to " -"a (very) large number of clients on a single machine or compute cluster. " -"The easiest way to test the new functionality is to look at the two new " -"code examples called `quickstart_simulation` and `simulation_pytorch`." +"[@rtaiello](https://github.com/rtaiello), " +"[@g-pichler](https://github.com/g-pichler), [@rob-" +"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" +"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " +"[@nfnt](https://github.com/nfnt), " +"[@tatiana-s](https://github.com/tatiana-s), " +"[@TParcollet](https://github.com/TParcollet), " +"[@vballoli](https://github.com/vballoli), " +"[@negedng](https://github.com/negedng), " +"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " +"[@hei411](https://github.com/hei411), " +"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " +"[@AmitChaulwar](https://github.com/AmitChaulwar), " +"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" +"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " +"[@lbhm](https://github.com/lbhm), " +"[@sishtiaq](https://github.com/sishtiaq), " +"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" +"/Jueun-Park), [@architjen](https://github.com/architjen), " +"[@PratikGarai](https://github.com/PratikGarai), " +"[@mrinaald](https://github.com/mrinaald), " +"[@zliel](https://github.com/zliel), " +"[@MeiruiJiang](https://github.com/MeiruiJiang), " +"[@sancarlim](https://github.com/sancarlim), " +"[@gubertoli](https://github.com/gubertoli), " +"[@Vingt100](https://github.com/Vingt100), " +"[@MakGulati](https://github.com/MakGulati), " +"[@cozek](https://github.com/cozek), " +"[@jafermarq](https://github.com/jafermarq), " +"[@sisco0](https://github.com/sisco0), " +"[@akhilmathurs](https://github.com/akhilmathurs), " +"[@CanTuerk](https://github.com/CanTuerk), " +"[@mariaboerner1987](https://github.com/mariaboerner1987), " +"[@pedropgusmao](https://github.com/pedropgusmao), " +"[@tanertopal](https://github.com/tanertopal), " +"[@danieljanes](https://github.com/danieljanes)." msgstr "" -#: ../../source/ref-changelog.md:795 +#: ../../source/ref-changelog.md:556 msgid "" -"The feature is still experimental, so there's no stability guarantee for " -"the API. It's also not quite ready for prime time and comes with a few " -"known caveats. However, those who are curious are encouraged to try it " -"out and share their thoughts." +"**All arguments must be passed as keyword arguments** " +"([#1338](https://github.com/adap/flower/pull/1338))" msgstr "" -#: ../../source/ref-changelog.md:797 +#: ../../source/ref-changelog.md:558 msgid "" -"**New built-in strategies** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" +"Pass all arguments as keyword arguments, positional arguments are not " +"longer supported. Code that uses positional arguments (e.g., " +"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " +"for each positional argument (e.g., " +"`start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())`)." msgstr "" -#: ../../source/ref-changelog.md:799 +#: ../../source/ref-changelog.md:560 msgid "" -"FedYogi - Federated learning strategy using Yogi on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" +"**Introduce configuration object** `ServerConfig` **in** `start_server` " +"**and** `start_simulation` " +"([#1317](https://github.com/adap/flower/pull/1317))" msgstr "" -#: ../../source/ref-changelog.md:800 +#: ../../source/ref-changelog.md:562 msgid "" -"FedAdam - Federated learning strategy using Adam on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" +"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " +"600.0}`, `start_server` and `start_simulation` now expect a configuration" +" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" +" arguments that as the previous config dict, but it makes writing type-" +"safe code easier and the default parameters values more transparent." msgstr "" -#: ../../source/ref-changelog.md:802 +#: ../../source/ref-changelog.md:564 msgid "" -"**New PyTorch Lightning code example** " -"([#617](https://github.com/adap/flower/pull/617))" +"**Rename built-in strategy parameters for clarity** " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -#: ../../source/ref-changelog.md:804 +#: ../../source/ref-changelog.md:566 msgid "" -"**New Variational Auto-Encoder code example** " -"([#752](https://github.com/adap/flower/pull/752))" +"The following built-in strategy parameters were renamed to improve " +"readability and consistency with other API's:" msgstr "" -#: ../../source/ref-changelog.md:806 -msgid "" -"**New scikit-learn code example** " -"([#748](https://github.com/adap/flower/pull/748))" +#: ../../source/ref-changelog.md:568 +msgid "`fraction_eval` --> `fraction_evaluate`" msgstr "" -#: ../../source/ref-changelog.md:808 -msgid "" -"**New experimental TensorBoard strategy** " -"([#789](https://github.com/adap/flower/pull/789))" +#: ../../source/ref-changelog.md:569 +msgid "`min_eval_clients` --> `min_evaluate_clients`" msgstr "" -#: ../../source/ref-changelog.md:812 -msgid "" -"Improved advanced TensorFlow code example " -"([#769](https://github.com/adap/flower/pull/769))" +#: ../../source/ref-changelog.md:570 +msgid "`eval_fn` --> `evaluate_fn`" msgstr "" -#: ../../source/ref-changelog.md:813 +#: ../../source/ref-changelog.md:572 msgid "" -"Warning when `min_available_clients` is misconfigured " -"([#830](https://github.com/adap/flower/pull/830))" +"**Update default arguments of built-in strategies** " +"([#1278](https://github.com/adap/flower/pull/1278))" msgstr "" -#: ../../source/ref-changelog.md:814 +#: ../../source/ref-changelog.md:574 msgid "" -"Improved gRPC server docs " -"([#841](https://github.com/adap/flower/pull/841))" +"All built-in strategies now use `fraction_fit=1.0` and " +"`fraction_evaluate=1.0`, which means they select *all* currently " +"available clients for training and evaluation. Projects that relied on " +"the previous default values can get the previous behaviour by " +"initializing the strategy in the following way:" msgstr "" -#: ../../source/ref-changelog.md:815 -msgid "" -"Improved error message in `NumPyClient` " -"([#851](https://github.com/adap/flower/pull/851))" +#: ../../source/ref-changelog.md:576 +msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" msgstr "" -#: ../../source/ref-changelog.md:816 +#: ../../source/ref-changelog.md:578 msgid "" -"Improved PyTorch quickstart code example " -"([#852](https://github.com/adap/flower/pull/852))" +"**Add** `server_round` **to** `Strategy.evaluate` " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -#: ../../source/ref-changelog.md:820 +#: ../../source/ref-changelog.md:580 msgid "" -"**Disabled final distributed evaluation** " -"([#800](https://github.com/adap/flower/pull/800))" +"The `Strategy` method `evaluate` now receives the current round of " +"federated learning/evaluation as the first parameter." msgstr "" -#: ../../source/ref-changelog.md:822 +#: ../../source/ref-changelog.md:582 msgid "" -"Prior behaviour was to perform a final round of distributed evaluation on" -" all connected clients, which is often not required (e.g., when using " -"server-side evaluation). The prior behaviour can be enabled by passing " -"`force_final_distributed_eval=True` to `start_server`." +"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -#: ../../source/ref-changelog.md:824 +#: ../../source/ref-changelog.md:584 msgid "" -"**Renamed q-FedAvg strategy** " -"([#802](https://github.com/adap/flower/pull/802))" +"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " +"three parameters: (1) The current round of federated learning/evaluation " +"(`server_round`), (2) the model parameters to evaluate (`parameters`), " +"and (3) a config dictionary (`config`)." msgstr "" -#: ../../source/ref-changelog.md:826 +#: ../../source/ref-changelog.md:586 msgid "" -"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " -"the notation given in the original paper (q-FFL is the optimization " -"objective, q-FedAvg is the proposed solver). Note the original (now " -"deprecated) `QffedAvg` class is still available for compatibility reasons" -" (it will be removed in a future release)." +"**Rename** `rnd` **to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" msgstr "" -#: ../../source/ref-changelog.md:828 +#: ../../source/ref-changelog.md:588 msgid "" -"**Deprecated and renamed code example** `simulation_pytorch` **to** " -"`simulation_pytorch_legacy` " -"([#791](https://github.com/adap/flower/pull/791))" +"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " +"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " +"current round of federated learning/evaluation as their first parameter. " +"To improve reaability and avoid confusion with *random*, this parameter " +"has been renamed from `rnd` to `server_round`." msgstr "" -#: ../../source/ref-changelog.md:830 +#: ../../source/ref-changelog.md:590 msgid "" -"This example has been replaced by a new example. The new example is based" -" on the experimental virtual client engine, which will become the new " -"default way of doing most types of large-scale simulations in Flower. The" -" existing example was kept for reference purposes, but it might be " -"removed in the future." +"**Move** `flwr.dataset` **to** `flwr_baselines` " +"([#1273](https://github.com/adap/flower/pull/1273))" msgstr "" -#: ../../source/ref-changelog.md:832 -msgid "v0.16.0 (2021-05-11)" +#: ../../source/ref-changelog.md:592 +msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." msgstr "" -#: ../../source/ref-changelog.md:836 +#: ../../source/ref-changelog.md:594 msgid "" -"**New built-in strategies** " -"([#549](https://github.com/adap/flower/pull/549))" -msgstr "" - -#: ../../source/ref-changelog.md:838 -msgid "(abstract) FedOpt" +"**Remove experimental strategies** " +"([#1280](https://github.com/adap/flower/pull/1280))" msgstr "" -#: ../../source/ref-changelog.md:841 +#: ../../source/ref-changelog.md:596 msgid "" -"**Custom metrics for server and strategies** " -"([#717](https://github.com/adap/flower/pull/717))" +"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " +"`FedFSv1`)." msgstr "" -#: ../../source/ref-changelog.md:843 +#: ../../source/ref-changelog.md:598 msgid "" -"The Flower server is now fully task-agnostic, all remaining instances of " -"task-specific metrics (such as `accuracy`) have been replaced by custom " -"metrics dictionaries. Flower 0.15 introduced the capability to pass a " -"dictionary containing custom metrics from client to server. As of this " -"release, custom metrics replace task-specific metrics on the server." +"**Rename** `Weights` **to** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -#: ../../source/ref-changelog.md:845 +#: ../../source/ref-changelog.md:600 msgid "" -"Custom metric dictionaries are now used in two user-facing APIs: they are" -" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " -"they enable evaluation functions passed to built-in strategies (via " -"`eval_fn`) to return more than two evaluation metrics. Strategies can " -"even return *aggregated* metrics dictionaries for the server to keep " -"track of." +"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " +"capture what this type is all about." msgstr "" -#: ../../source/ref-changelog.md:847 +#: ../../source/ref-changelog.md:602 msgid "" -"Strategy implementations should migrate their `aggregate_fit` and " -"`aggregate_evaluate` methods to the new return type (e.g., by simply " -"returning an empty `{}`), server-side evaluation functions should migrate" -" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." +"**Remove antiquated** `force_final_distributed_eval` **from** " +"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -#: ../../source/ref-changelog.md:849 +#: ../../source/ref-changelog.md:604 msgid "" -"Flower 0.15-style return types are deprecated (but still supported), " -"compatibility will be removed in a future release." +"The `start_server` parameter `force_final_distributed_eval` has long been" +" a historic artefact, in this release it is finally gone for good." msgstr "" -#: ../../source/ref-changelog.md:851 +#: ../../source/ref-changelog.md:606 msgid "" -"**Migration warnings for deprecated functionality** " -"([#690](https://github.com/adap/flower/pull/690))" +"**Make** `get_parameters` **configurable** " +"([#1242](https://github.com/adap/flower/pull/1242))" msgstr "" -#: ../../source/ref-changelog.md:853 +#: ../../source/ref-changelog.md:608 msgid "" -"Earlier versions of Flower were often migrated to new APIs, while " -"maintaining compatibility with legacy APIs. This release introduces " -"detailed warning messages if usage of deprecated APIs is detected. The " -"new warning messages often provide details on how to migrate to more " -"recent APIs, thus easing the transition from one release to another." +"The `get_parameters` method now accepts a configuration dictionary, just " +"like `get_properties`, `fit`, and `evaluate`." msgstr "" -#: ../../source/ref-changelog.md:855 +#: ../../source/ref-changelog.md:610 msgid "" -"Improved docs and docstrings " -"([#691](https://github.com/adap/flower/pull/691) " -"[#692](https://github.com/adap/flower/pull/692) " -"[#713](https://github.com/adap/flower/pull/713))" +"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " +"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" msgstr "" -#: ../../source/ref-changelog.md:857 -msgid "MXNet example and documentation" +#: ../../source/ref-changelog.md:612 +msgid "" +"The `start_simulation` function now accepts a configuration dictionary " +"`config` instead of the `num_rounds` integer. This improves the " +"consistency between `start_simulation` and `start_server` and makes " +"transitioning between the two easier." msgstr "" -#: ../../source/ref-changelog.md:859 +#: ../../source/ref-changelog.md:616 msgid "" -"FedBN implementation in example PyTorch: From Centralized To Federated " -"([#696](https://github.com/adap/flower/pull/696) " -"[#702](https://github.com/adap/flower/pull/702) " -"[#705](https://github.com/adap/flower/pull/705))" +"**Support Python 3.10** " +"([#1320](https://github.com/adap/flower/pull/1320))" msgstr "" -#: ../../source/ref-changelog.md:863 +#: ../../source/ref-changelog.md:618 msgid "" -"**Serialization-agnostic server** " -"([#721](https://github.com/adap/flower/pull/721))" +"The previous Flower release introduced experimental support for Python " +"3.10, this release declares Python 3.10 support as stable." msgstr "" -#: ../../source/ref-changelog.md:865 +#: ../../source/ref-changelog.md:620 msgid "" -"The Flower server is now fully serialization-agnostic. Prior usage of " -"class `Weights` (which represents parameters as deserialized NumPy " -"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " -"`Parameters` objects are fully serialization-agnostic and represents " -"parameters as byte arrays, the `tensor_type` attributes indicates how " -"these byte arrays should be interpreted (e.g., for " -"serialization/deserialization)." +"**Make all** `Client` **and** `NumPyClient` **methods optional** " +"([#1260](https://github.com/adap/flower/pull/1260), " +"[#1277](https://github.com/adap/flower/pull/1277))" msgstr "" -#: ../../source/ref-changelog.md:867 +#: ../../source/ref-changelog.md:622 msgid "" -"Built-in strategies implement this approach by handling serialization and" -" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " -"implementations should update to the slightly changed Strategy method " -"definitions. Strategy authors can consult PR " -"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" -" easily migrate to the new format." +"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " +"`fit`, and `evaluate` are all optional. This enables writing clients that" +" implement, for example, only `fit`, but no other method. No need to " +"implement `evaluate` when using centralized evaluation!" msgstr "" -#: ../../source/ref-changelog.md:869 +#: ../../source/ref-changelog.md:624 msgid "" -"Deprecated `flwr.server.Server.evaluate`, use " -"`flwr.server.Server.evaluate_round` instead " -"([#717](https://github.com/adap/flower/pull/717))" +"**Enable passing a** `Server` **instance to** `start_simulation` " +"([#1281](https://github.com/adap/flower/pull/1281))" msgstr "" -#: ../../source/ref-changelog.md:871 -msgid "v0.15.0 (2021-03-12)" +#: ../../source/ref-changelog.md:626 +msgid "" +"Similar to `start_server`, `start_simulation` now accepts a full `Server`" +" instance. This enables users to heavily customize the execution of " +"eperiments and opens the door to running, for example, async FL using the" +" Virtual Client Engine." msgstr "" -#: ../../source/ref-changelog.md:875 +#: ../../source/ref-changelog.md:628 msgid "" -"**Server-side parameter initialization** " -"([#658](https://github.com/adap/flower/pull/658))" +"**Update code examples** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" msgstr "" -#: ../../source/ref-changelog.md:877 +#: ../../source/ref-changelog.md:630 msgid "" -"Model parameters can now be initialized on the server-side. Server-side " -"parameter initialization works via a new `Strategy` method called " -"`initialize_parameters`." +"Many code examples received small or even large maintenance updates, " +"among them are" msgstr "" -#: ../../source/ref-changelog.md:879 -msgid "" -"Built-in strategies support a new constructor argument called " -"`initial_parameters` to set the initial parameters. Built-in strategies " -"will provide these initial parameters to the server on startup and then " -"delete them to free the memory afterwards." +#: ../../source/ref-changelog.md:632 +msgid "`scikit-learn`" msgstr "" -#: ../../source/ref-changelog.md:898 -msgid "" -"If no initial parameters are provided to the strategy, the server will " -"continue to use the current behaviour (namely, it will ask one of the " -"connected clients for its parameters and use these as the initial global " -"parameters)." +#: ../../source/ref-changelog.md:633 +msgid "`simulation_pytorch`" msgstr "" -#: ../../source/ref-changelog.md:900 -msgid "Deprecations" +#: ../../source/ref-changelog.md:634 +msgid "`quickstart_pytorch`" msgstr "" -#: ../../source/ref-changelog.md:902 -msgid "" -"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " -"`flwr.server.strategy.FedAvg`, which is equivalent)" +#: ../../source/ref-changelog.md:635 +msgid "`quickstart_simulation`" msgstr "" -#: ../../source/ref-changelog.md:904 -msgid "v0.14.0 (2021-02-18)" +#: ../../source/ref-changelog.md:636 +msgid "`quickstart_tensorflow`" msgstr "" -#: ../../source/ref-changelog.md:908 -msgid "" -"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " -"([#610](https://github.com/adap/flower/pull/610) " -"[#572](https://github.com/adap/flower/pull/572) " -"[#633](https://github.com/adap/flower/pull/633))" +#: ../../source/ref-changelog.md:637 +msgid "`advanced_tensorflow`" msgstr "" -#: ../../source/ref-changelog.md:910 +#: ../../source/ref-changelog.md:639 msgid "" -"Clients can now return an additional dictionary mapping `str` keys to " -"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " -"This means one can return almost arbitrary values from `fit`/`evaluate` " -"and make use of them on the server side!" +"**Remove the obsolete simulation example** " +"([#1328](https://github.com/adap/flower/pull/1328))" msgstr "" -#: ../../source/ref-changelog.md:912 +#: ../../source/ref-changelog.md:641 msgid "" -"This improvement also allowed for more consistent return types between " -"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " -"dict)` representing the loss, number of examples, and a dictionary " -"holding arbitrary problem-specific values like accuracy." +"Removes the obsolete `simulation` example and renames " +"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " +"naming of `simulation_pytorch`" msgstr "" -#: ../../source/ref-changelog.md:914 +#: ../../source/ref-changelog.md:643 msgid "" -"In case you wondered: this feature is compatible with existing projects, " -"the additional dictionary return value is optional. New code should " -"however migrate to the new return types to be compatible with upcoming " -"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " -"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " -"details." +"**Update documentation** " +"([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" msgstr "" -#: ../../source/ref-changelog.md:916 +#: ../../source/ref-changelog.md:645 msgid "" -"*Code example:* note the additional dictionary return values in both " -"`FlwrClient.fit` and `FlwrClient.evaluate`:" +"One substantial documentation update fixes multiple smaller rendering " +"issues, makes titles more succinct to improve navigation, removes a " +"deprecated library, updates documentation dependencies, includes the " +"`flwr.common` module in the API reference, includes support for markdown-" +"based documentation, migrates the changelog from `.rst` to `.md`, and " +"fixes a number of smaller details!" msgstr "" -#: ../../source/ref-changelog.md:931 -msgid "" -"**Generalized** `config` **argument in** `Client.fit` **and** " -"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" +#: ../../source/ref-changelog.md:647 ../../source/ref-changelog.md:702 +#: ../../source/ref-changelog.md:771 ../../source/ref-changelog.md:810 +msgid "**Minor updates**" msgstr "" -#: ../../source/ref-changelog.md:933 +#: ../../source/ref-changelog.md:649 msgid "" -"The `config` argument used to be of type `Dict[str, str]`, which means " -"that dictionary values were expected to be strings. The new release " -"generalizes this to enable values of the following types: `bool`, " -"`bytes`, `float`, `int`, `str`." +"Add round number to fit and evaluate log messages " +"([#1266](https://github.com/adap/flower/pull/1266))" msgstr "" -#: ../../source/ref-changelog.md:935 +#: ../../source/ref-changelog.md:650 msgid "" -"This means one can now pass almost arbitrary values to `fit`/`evaluate` " -"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" -"side and `int(config[\"epochs\"])` on the client side!" +"Add secure gRPC connection to the `advanced_tensorflow` code example " +"([#847](https://github.com/adap/flower/pull/847))" msgstr "" -#: ../../source/ref-changelog.md:937 +#: ../../source/ref-changelog.md:651 msgid "" -"*Code example:* note that the `config` dictionary now contains non-`str` " -"values in both `Client.fit` and `Client.evaluate`:" -msgstr "" - -#: ../../source/ref-changelog.md:954 -msgid "v0.13.0 (2021-01-08)" +"Update developer tooling " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" msgstr "" -#: ../../source/ref-changelog.md:958 +#: ../../source/ref-changelog.md:652 msgid "" -"New example: PyTorch From Centralized To Federated " -"([#549](https://github.com/adap/flower/pull/549))" -msgstr "" - -#: ../../source/ref-changelog.md:959 -msgid "Improved documentation" +"Rename ProtoBuf messages to improve consistency " +"([#1214](https://github.com/adap/flower/pull/1214), " +"[#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -#: ../../source/ref-changelog.md:960 -msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" +#: ../../source/ref-changelog.md:654 +msgid "v0.19.0 (2022-05-18)" msgstr "" -#: ../../source/ref-changelog.md:961 -msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" +#: ../../source/ref-changelog.md:658 +msgid "" +"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " +"([#919](https://github.com/adap/flower/pull/919), " +"[#1127](https://github.com/adap/flower/pull/1127), " +"[#914](https://github.com/adap/flower/pull/914))" msgstr "" -#: ../../source/ref-changelog.md:962 +#: ../../source/ref-changelog.md:660 msgid "" -"Updated examples documentation " -"([#549](https://github.com/adap/flower/pull/549))" +"The first preview release of Flower Baselines has arrived! We're " +"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " +"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " +"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " +"With this first preview release we're also inviting the community to " +"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" +"contribute-baselines.html)." msgstr "" -#: ../../source/ref-changelog.md:963 +#: ../../source/ref-changelog.md:662 msgid "" -"Removed obsolete documentation " -"([#548](https://github.com/adap/flower/pull/548))" +"**C++ client SDK (preview) and code example** " +"([#1111](https://github.com/adap/flower/pull/1111))" msgstr "" -#: ../../source/ref-changelog.md:965 -msgid "Bugfix:" +#: ../../source/ref-changelog.md:664 +msgid "" +"Preview support for Flower clients written in C++. The C++ preview " +"includes a Flower client SDK and a quickstart code example that " +"demonstrates a simple C++ client using the SDK." msgstr "" -#: ../../source/ref-changelog.md:967 +#: ../../source/ref-changelog.md:666 msgid "" -"`Server.fit` does not disconnect clients when finished, disconnecting the" -" clients is now handled in `flwr.server.start_server` " -"([#553](https://github.com/adap/flower/pull/553) " -"[#540](https://github.com/adap/flower/issues/540))." +"**Add experimental support for Python 3.10 and Python 3.11** " +"([#1135](https://github.com/adap/flower/pull/1135))" msgstr "" -#: ../../source/ref-changelog.md:969 -msgid "v0.12.0 (2020-12-07)" +#: ../../source/ref-changelog.md:668 +msgid "" +"Python 3.10 is the latest stable release of Python and Python 3.11 is due" +" to be released in October. This Flower release adds experimental support" +" for both Python versions." msgstr "" -#: ../../source/ref-changelog.md:971 ../../source/ref-changelog.md:987 -msgid "Important changes:" +#: ../../source/ref-changelog.md:670 +msgid "" +"**Aggregate custom metrics through user-provided functions** " +"([#1144](https://github.com/adap/flower/pull/1144))" msgstr "" -#: ../../source/ref-changelog.md:973 +#: ../../source/ref-changelog.md:672 msgid "" -"Added an example for embedded devices " -"([#507](https://github.com/adap/flower/pull/507))" +"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" +" customize the strategy. Built-in strategies support two new arguments, " +"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " +"allow passing custom metric aggregation functions." msgstr "" -#: ../../source/ref-changelog.md:974 +#: ../../source/ref-changelog.md:674 msgid "" -"Added a new NumPyClient (in addition to the existing KerasClient) " -"([#504](https://github.com/adap/flower/pull/504) " -"[#508](https://github.com/adap/flower/pull/508))" +"**User-configurable round timeout** " +"([#1162](https://github.com/adap/flower/pull/1162))" msgstr "" -#: ../../source/ref-changelog.md:975 +#: ../../source/ref-changelog.md:676 msgid "" -"Deprecated `flwr_example` package and started to migrate examples into " -"the top-level `examples` directory " -"([#494](https://github.com/adap/flower/pull/494) " -"[#512](https://github.com/adap/flower/pull/512))" +"A new configuration value allows the round timeout to be set for " +"`start_server` and `start_simulation`. If the `config` dictionary " +"contains a `round_timeout` key (with a `float` value in seconds), the " +"server will wait *at least* `round_timeout` seconds before it closes the " +"connection." msgstr "" -#: ../../source/ref-changelog.md:977 -msgid "v0.11.0 (2020-11-30)" +#: ../../source/ref-changelog.md:678 +msgid "" +"**Enable both federated evaluation and centralized evaluation to be used " +"at the same time in all built-in strategies** " +"([#1091](https://github.com/adap/flower/pull/1091))" msgstr "" -#: ../../source/ref-changelog.md:979 -msgid "Incompatible changes:" +#: ../../source/ref-changelog.md:680 +msgid "" +"Built-in strategies can now perform both federated evaluation (i.e., " +"client-side) and centralized evaluation (i.e., server-side) in the same " +"round. Federated evaluation can be disabled by setting `fraction_eval` to" +" `0.0`." msgstr "" -#: ../../source/ref-changelog.md:981 +#: ../../source/ref-changelog.md:682 msgid "" -"Renamed strategy methods " -"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " -"Flower's public APIs. Other public methods/functions (e.g., every method " -"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " -"which is why we're removing it from the four methods in Strategy. To " -"migrate rename the following `Strategy` methods accordingly:" +"**Two new Jupyter Notebook tutorials** " +"([#1141](https://github.com/adap/flower/pull/1141))" msgstr "" -#: ../../source/ref-changelog.md:982 -msgid "`on_configure_evaluate` => `configure_evaluate`" +#: ../../source/ref-changelog.md:684 +msgid "" +"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " +"basic and intermediate Flower features:" msgstr "" -#: ../../source/ref-changelog.md:983 -msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" +#: ../../source/ref-changelog.md:686 +msgid "" +"*An Introduction to Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" +"-Intro-to-FL-PyTorch.ipynb)" msgstr "" -#: ../../source/ref-changelog.md:984 -msgid "`on_configure_fit` => `configure_fit`" +#: ../../source/ref-changelog.md:688 +msgid "" +"*Using Strategies in Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" +"-Strategies-in-FL-PyTorch.ipynb)" msgstr "" -#: ../../source/ref-changelog.md:985 -msgid "`on_aggregate_fit` => `aggregate_fit`" +#: ../../source/ref-changelog.md:690 +msgid "" +"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " +"([#1076](https://github.com/adap/flower/pull/1076))" msgstr "" -#: ../../source/ref-changelog.md:989 +#: ../../source/ref-changelog.md:692 msgid "" -"Deprecated `DefaultStrategy` " -"([#479](https://github.com/adap/flower/pull/479)). To migrate use " -"`FedAvg` instead." +"The new `FedAvgM` strategy implements Federated Averaging with Server " +"Momentum \\[Hsu et al., 2019\\]." msgstr "" -#: ../../source/ref-changelog.md:990 +#: ../../source/ref-changelog.md:694 msgid "" -"Simplified examples and baselines " -"([#484](https://github.com/adap/flower/pull/484))." +"**New advanced PyTorch code example** " +"([#1007](https://github.com/adap/flower/pull/1007))" msgstr "" -#: ../../source/ref-changelog.md:991 +#: ../../source/ref-changelog.md:696 msgid "" -"Removed presently unused `on_conclude_round` from strategy interface " -"([#483](https://github.com/adap/flower/pull/483))." +"A new code example (`advanced_pytorch`) demonstrates advanced Flower " +"concepts with PyTorch." msgstr "" -#: ../../source/ref-changelog.md:992 +#: ../../source/ref-changelog.md:698 msgid "" -"Set minimal Python version to 3.6.1 instead of 3.6.9 " -"([#471](https://github.com/adap/flower/pull/471))." +"**New JAX code example** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" msgstr "" -#: ../../source/ref-changelog.md:993 +#: ../../source/ref-changelog.md:700 msgid "" -"Improved `Strategy` docstrings " -"([#470](https://github.com/adap/flower/pull/470))." +"A new code example (`jax_from_centralized_to_federated`) shows federated " +"learning with JAX and Flower." msgstr "" -#: ../../source/ref-example-projects.rst:2 -msgid "Example projects" +#: ../../source/ref-changelog.md:704 +msgid "" +"New option to keep Ray running if Ray was already initialized in " +"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" msgstr "" -#: ../../source/ref-example-projects.rst:4 +#: ../../source/ref-changelog.md:705 msgid "" -"Flower comes with a number of usage examples. The examples demonstrate " -"how Flower can be used to federate different kinds of existing machine " -"learning pipelines, usually leveraging popular machine learning " -"frameworks such as `PyTorch `_ or `TensorFlow " -"`_." +"Add support for custom `ClientManager` as a `start_simulation` parameter " +"([#1171](https://github.com/adap/flower/pull/1171))" msgstr "" -#: ../../source/ref-example-projects.rst:11 +#: ../../source/ref-changelog.md:706 msgid "" -"Flower usage examples used to be bundled with Flower in a package called " -"``flwr_example``. We are migrating those examples to standalone projects " -"to make them easier to use. All new examples are based in the directory " -"`examples `_." +"New documentation for [implementing " +"strategies](https://flower.ai/docs/framework/how-to-implement-" +"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " +"[#1175](https://github.com/adap/flower/pull/1175))" msgstr "" -#: ../../source/ref-example-projects.rst:16 -msgid "The following examples are available as standalone projects." +#: ../../source/ref-changelog.md:707 +msgid "" +"New mobile-friendly documentation theme " +"([#1174](https://github.com/adap/flower/pull/1174))" msgstr "" -#: ../../source/ref-example-projects.rst:20 -msgid "Quickstart TensorFlow/Keras" +#: ../../source/ref-changelog.md:708 +msgid "" +"Limit version range for (optional) `ray` dependency to include only " +"compatible releases (`>=1.9.2,<1.12.0`) " +"([#1205](https://github.com/adap/flower/pull/1205))" msgstr "" -#: ../../source/ref-example-projects.rst:22 +#: ../../source/ref-changelog.md:712 msgid "" -"The TensorFlow/Keras quickstart example shows CIFAR-10 image " -"classification with MobileNetV2:" +"**Remove deprecated support for Python 3.6** " +"([#871](https://github.com/adap/flower/pull/871))" msgstr "" -#: ../../source/ref-example-projects.rst:25 +#: ../../source/ref-changelog.md:713 msgid "" -"`Quickstart TensorFlow (Code) " -"`_" +"**Remove deprecated KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" msgstr "" -#: ../../source/ref-example-projects.rst:26 +#: ../../source/ref-changelog.md:714 msgid "" -"`Quickstart TensorFlow (Tutorial) `_" +"**Remove deprecated no-op extra installs** " +"([#973](https://github.com/adap/flower/pull/973))" msgstr "" -#: ../../source/ref-example-projects.rst:27 +#: ../../source/ref-changelog.md:715 msgid "" -"`Quickstart TensorFlow (Blog Post) `_" +"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " +"([#869](https://github.com/adap/flower/pull/869))" msgstr "" -#: ../../source/ref-example-projects.rst:31 -#: ../../source/tutorial-quickstart-pytorch.rst:5 -msgid "Quickstart PyTorch" +#: ../../source/ref-changelog.md:716 +msgid "" +"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " +"([#1107](https://github.com/adap/flower/pull/1107))" msgstr "" -#: ../../source/ref-example-projects.rst:33 +#: ../../source/ref-changelog.md:717 msgid "" -"The PyTorch quickstart example shows CIFAR-10 image classification with a" -" simple Convolutional Neural Network:" +"**Remove deprecated DefaultStrategy strategy** " +"([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -#: ../../source/ref-example-projects.rst:36 +#: ../../source/ref-changelog.md:718 msgid "" -"`Quickstart PyTorch (Code) " -"`_" +"**Remove deprecated support for eval_fn accuracy return value** " +"([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -#: ../../source/ref-example-projects.rst:37 +#: ../../source/ref-changelog.md:719 msgid "" -"`Quickstart PyTorch (Tutorial) `_" +"**Remove deprecated support for passing initial parameters as NumPy " +"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -#: ../../source/ref-example-projects.rst:41 -msgid "PyTorch: From Centralized To Federated" +#: ../../source/ref-changelog.md:721 +msgid "v0.18.0 (2022-02-28)" msgstr "" -#: ../../source/ref-example-projects.rst:43 +#: ../../source/ref-changelog.md:725 msgid "" -"This example shows how a regular PyTorch project can be federated using " -"Flower:" +"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " +"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " +"[#872](https://github.com/adap/flower/pull/872), " +"[#833](https://github.com/adap/flower/pull/833), " +"[#1036](https://github.com/adap/flower/pull/1036))" msgstr "" -#: ../../source/ref-example-projects.rst:45 +#: ../../source/ref-changelog.md:727 msgid "" -"`PyTorch: From Centralized To Federated (Code) " -"`_" +"Simulations (using the Virtual Client Engine through `start_simulation`) " +"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " +"installing Flower with the `simulation` extra (`pip install " +"flwr[simulation]`)." msgstr "" -#: ../../source/ref-example-projects.rst:46 +#: ../../source/ref-changelog.md:729 msgid "" -"`PyTorch: From Centralized To Federated (Tutorial) " -"`_" -msgstr "" - -#: ../../source/ref-example-projects.rst:50 -msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" +"**New Jupyter Notebook code example** " +"([#833](https://github.com/adap/flower/pull/833))" msgstr "" -#: ../../source/ref-example-projects.rst:52 +#: ../../source/ref-changelog.md:731 msgid "" -"This example shows how Flower can be used to build a federated learning " -"system that run across Raspberry Pi and Nvidia Jetson:" +"A new code example (`quickstart_simulation`) demonstrates Flower " +"simulations using the Virtual Client Engine through Jupyter Notebook " +"(incl. Google Colab)." msgstr "" -#: ../../source/ref-example-projects.rst:54 +#: ../../source/ref-changelog.md:733 msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " -"`_" +"**Client properties (feature preview)** " +"([#795](https://github.com/adap/flower/pull/795))" msgstr "" -#: ../../source/ref-example-projects.rst:55 +#: ../../source/ref-changelog.md:735 msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " -"`_" +"Clients can implement a new method `get_properties` to enable server-side" +" strategies to query client properties." msgstr "" -#: ../../source/ref-example-projects.rst:60 -msgid "Legacy Examples (`flwr_example`)" +#: ../../source/ref-changelog.md:737 +msgid "" +"**Experimental Android support with TFLite** " +"([#865](https://github.com/adap/flower/pull/865))" msgstr "" -#: ../../source/ref-example-projects.rst:63 +#: ../../source/ref-changelog.md:739 msgid "" -"The useage examples in `flwr_example` are deprecated and will be removed " -"in the future. New examples are provided as standalone projects in " -"`examples `_." +"Android support has finally arrived in `main`! Flower is both client-" +"agnostic and framework-agnostic by design. One can integrate arbitrary " +"client platforms and with this release, using Flower on Android has " +"become a lot easier." msgstr "" -#: ../../source/ref-example-projects.rst:69 -msgid "Extra Dependencies" +#: ../../source/ref-changelog.md:741 +msgid "" +"The example uses TFLite on the client side, along with a new " +"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " +"still experimental, but they are a first step towards a fully-fledged " +"Android SDK and a unified `FedAvg` implementation that integrated the new" +" functionality from `FedAvgAndroid`." msgstr "" -#: ../../source/ref-example-projects.rst:71 +#: ../../source/ref-changelog.md:743 msgid "" -"The core Flower framework keeps a minimal set of dependencies. The " -"examples demonstrate Flower in the context of different machine learning " -"frameworks, so additional dependencies need to be installed before an " -"example can be run." +"**Make gRPC keepalive time user-configurable and decrease default " +"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" msgstr "" -#: ../../source/ref-example-projects.rst:75 -msgid "For PyTorch examples::" +#: ../../source/ref-changelog.md:745 +msgid "" +"The default gRPC keepalive time has been reduced to increase the " +"compatibility of Flower with more cloud environments (for example, " +"Microsoft Azure). Users can configure the keepalive time to customize the" +" gRPC stack based on specific requirements." msgstr "" -#: ../../source/ref-example-projects.rst:79 -msgid "For TensorFlow examples::" +#: ../../source/ref-changelog.md:747 +msgid "" +"**New differential privacy example using Opacus and PyTorch** " +"([#805](https://github.com/adap/flower/pull/805))" msgstr "" -#: ../../source/ref-example-projects.rst:83 -msgid "For both PyTorch and TensorFlow examples::" +#: ../../source/ref-changelog.md:749 +msgid "" +"A new code example (`opacus`) demonstrates differentially-private " +"federated learning with Opacus, PyTorch, and Flower." msgstr "" -#: ../../source/ref-example-projects.rst:87 +#: ../../source/ref-changelog.md:751 msgid "" -"Please consult :code:`pyproject.toml` for a full list of possible extras " -"(section :code:`[tool.poetry.extras]`)." +"**New Hugging Face Transformers code example** " +"([#863](https://github.com/adap/flower/pull/863))" msgstr "" -#: ../../source/ref-example-projects.rst:92 -msgid "PyTorch Examples" +#: ../../source/ref-changelog.md:753 +msgid "" +"A new code example (`quickstart_huggingface`) demonstrates usage of " +"Hugging Face Transformers with Flower." msgstr "" -#: ../../source/ref-example-projects.rst:94 +#: ../../source/ref-changelog.md:755 msgid "" -"Our PyTorch examples are based on PyTorch 1.7. They should work with " -"other releases as well. So far, we provide the following examples." +"**New MLCube code example** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" msgstr "" -#: ../../source/ref-example-projects.rst:98 -msgid "CIFAR-10 Image Classification" +#: ../../source/ref-changelog.md:757 +msgid "" +"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " +"with Flower." msgstr "" -#: ../../source/ref-example-projects.rst:100 +#: ../../source/ref-changelog.md:759 msgid "" -"`CIFAR-10 and CIFAR-100 `_ " -"are popular RGB image datasets. The Flower CIFAR-10 example uses PyTorch " -"to train a simple CNN classifier in a federated learning setup with two " -"clients." +"**SSL-enabled server and client** " +"([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" msgstr "" -#: ../../source/ref-example-projects.rst:104 -#: ../../source/ref-example-projects.rst:121 -#: ../../source/ref-example-projects.rst:146 -msgid "First, start a Flower server:" +#: ../../source/ref-changelog.md:761 +msgid "" +"SSL enables secure encrypted connections between clients and servers. " +"This release open-sources the Flower secure gRPC implementation to make " +"encrypted communication channels accessible to all Flower users." msgstr "" -#: ../../source/ref-example-projects.rst:106 -msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" +#: ../../source/ref-changelog.md:763 +msgid "" +"**Updated** `FedAdam` **and** `FedYogi` **strategies** " +"([#885](https://github.com/adap/flower/pull/885), " +"[#895](https://github.com/adap/flower/pull/895))" msgstr "" -#: ../../source/ref-example-projects.rst:108 -#: ../../source/ref-example-projects.rst:125 -#: ../../source/ref-example-projects.rst:150 -msgid "Then, start the two clients in a new terminal window:" +#: ../../source/ref-changelog.md:765 +msgid "" +"`FedAdam` and `FedAdam` match the latest version of the Adaptive " +"Federated Optimization paper." msgstr "" -#: ../../source/ref-example-projects.rst:110 -msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" +#: ../../source/ref-changelog.md:767 +msgid "" +"**Initialize** `start_simulation` **with a list of client IDs** " +"([#860](https://github.com/adap/flower/pull/860))" msgstr "" -#: ../../source/ref-example-projects.rst:112 -msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." +#: ../../source/ref-changelog.md:769 +msgid "" +"`start_simulation` can now be called with a list of client IDs " +"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " +"`client_fn` whenever a client needs to be initialized, which can make it " +"easier to load data partitions that are not accessible through `int` " +"identifiers." msgstr "" -#: ../../source/ref-example-projects.rst:115 -msgid "ImageNet-2012 Image Classification" +#: ../../source/ref-changelog.md:773 +msgid "" +"Update `num_examples` calculation in PyTorch code examples in " +"([#909](https://github.com/adap/flower/pull/909))" msgstr "" -#: ../../source/ref-example-projects.rst:117 +#: ../../source/ref-changelog.md:774 msgid "" -"`ImageNet-2012 `_ is one of the major computer" -" vision datasets. The Flower ImageNet example uses PyTorch to train a " -"ResNet-18 classifier in a federated learning setup with ten clients." +"Expose Flower version through `flwr.__version__` " +"([#952](https://github.com/adap/flower/pull/952))" msgstr "" -#: ../../source/ref-example-projects.rst:123 -msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" +#: ../../source/ref-changelog.md:775 +msgid "" +"`start_server` in `app.py` now returns a `History` object containing " +"metrics from training ([#974](https://github.com/adap/flower/pull/974))" msgstr "" -#: ../../source/ref-example-projects.rst:127 -msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" +#: ../../source/ref-changelog.md:776 +msgid "" +"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " +"([#978](https://github.com/adap/flower/pull/978))" msgstr "" -#: ../../source/ref-example-projects.rst:129 -msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." +#: ../../source/ref-changelog.md:777 +msgid "" +"Increase sleep time after server start to three seconds in all code " +"examples ([#1086](https://github.com/adap/flower/pull/1086))" msgstr "" -#: ../../source/ref-example-projects.rst:133 -msgid "TensorFlow Examples" +#: ../../source/ref-changelog.md:778 +msgid "" +"Added a new FAQ section to the documentation " +"([#948](https://github.com/adap/flower/pull/948))" msgstr "" -#: ../../source/ref-example-projects.rst:135 +#: ../../source/ref-changelog.md:779 msgid "" -"Our TensorFlow examples are based on TensorFlow 2.0 or newer. So far, we " -"provide the following examples." +"And many more under-the-hood changes, library updates, documentation " +"changes, and tooling improvements!" msgstr "" -#: ../../source/ref-example-projects.rst:139 -msgid "Fashion-MNIST Image Classification" +#: ../../source/ref-changelog.md:783 +msgid "" +"**Removed** `flwr_example` **and** `flwr_experimental` **from release " +"build** ([#869](https://github.com/adap/flower/pull/869))" msgstr "" -#: ../../source/ref-example-projects.rst:141 +#: ../../source/ref-changelog.md:785 msgid "" -"`Fashion-MNIST `_ is " -"often used as the \"Hello, world!\" of machine learning. We follow this " -"tradition and provide an example which samples random local datasets from" -" Fashion-MNIST and trains a simple image classification model over those " -"partitions." +"The packages `flwr_example` and `flwr_experimental` have been deprecated " +"since Flower 0.12.0 and they are not longer included in Flower release " +"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" +"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " +"an upcoming release." msgstr "" -#: ../../source/ref-example-projects.rst:148 -msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" +#: ../../source/ref-changelog.md:787 +msgid "v0.17.0 (2021-09-24)" msgstr "" -#: ../../source/ref-example-projects.rst:152 -msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" +#: ../../source/ref-changelog.md:791 +msgid "" +"**Experimental virtual client engine** " +"([#781](https://github.com/adap/flower/pull/781) " +"[#790](https://github.com/adap/flower/pull/790) " +"[#791](https://github.com/adap/flower/pull/791))" msgstr "" -#: ../../source/ref-example-projects.rst:154 +#: ../../source/ref-changelog.md:793 msgid "" -"For more details, see " -":code:`src/py/flwr_example/tensorflow_fashion_mnist`." +"One of Flower's goals is to enable research at scale. This release " +"enables a first (experimental) peek at a major new feature, codenamed the" +" virtual client engine. Virtual clients enable simulations that scale to " +"a (very) large number of clients on a single machine or compute cluster. " +"The easiest way to test the new functionality is to look at the two new " +"code examples called `quickstart_simulation` and `simulation_pytorch`." msgstr "" -#: ../../source/ref-faq.rst:4 +#: ../../source/ref-changelog.md:795 msgid "" -"This page collects answers to commonly asked questions about Federated " -"Learning with Flower." +"The feature is still experimental, so there's no stability guarantee for " +"the API. It's also not quite ready for prime time and comes with a few " +"known caveats. However, those who are curious are encouraged to try it " +"out and share their thoughts." msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Can Flower run on Juptyter Notebooks / Google Colab?" +#: ../../source/ref-changelog.md:797 +msgid "" +"**New built-in strategies** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" msgstr "" -#: ../../source/ref-faq.rst:8 +#: ../../source/ref-changelog.md:799 msgid "" -"Yes, it can! Flower even comes with a few under-the-hood optimizations to" -" make it work even better on Colab. Here's a quickstart example:" +"FedYogi - Federated learning strategy using Yogi on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" msgstr "" -#: ../../source/ref-faq.rst:10 +#: ../../source/ref-changelog.md:800 msgid "" -"`Flower simulation PyTorch " -"`_" +"FedAdam - Federated learning strategy using Adam on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" msgstr "" -#: ../../source/ref-faq.rst:11 +#: ../../source/ref-changelog.md:802 msgid "" -"`Flower simulation TensorFlow/Keras " -"`_" +"**New PyTorch Lightning code example** " +"([#617](https://github.com/adap/flower/pull/617))" msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" +#: ../../source/ref-changelog.md:804 +msgid "" +"**New Variational Auto-Encoder code example** " +"([#752](https://github.com/adap/flower/pull/752))" msgstr "" -#: ../../source/ref-faq.rst:15 +#: ../../source/ref-changelog.md:806 msgid "" -"Find the `blog post about federated learning on embedded device here " -"`_" -" and the corresponding `GitHub code example " -"`_." +"**New scikit-learn code example** " +"([#748](https://github.com/adap/flower/pull/748))" msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" +#: ../../source/ref-changelog.md:808 +msgid "" +"**New experimental TensorBoard strategy** " +"([#789](https://github.com/adap/flower/pull/789))" msgstr "" -#: ../../source/ref-faq.rst:19 +#: ../../source/ref-changelog.md:812 msgid "" -"Yes, it does. Please take a look at our `blog post " -"`_ or check out the code examples:" +"Improved advanced TensorFlow code example " +"([#769](https://github.com/adap/flower/pull/769))" msgstr "" -#: ../../source/ref-faq.rst:21 +#: ../../source/ref-changelog.md:813 msgid "" -"`Android Kotlin example `_" +"Warning when `min_available_clients` is misconfigured " +"([#830](https://github.com/adap/flower/pull/830))" msgstr "" -#: ../../source/ref-faq.rst:22 -msgid "`Android Java example `_" +#: ../../source/ref-changelog.md:814 +msgid "" +"Improved gRPC server docs " +"([#841](https://github.com/adap/flower/pull/841))" msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" +#: ../../source/ref-changelog.md:815 +msgid "" +"Improved error message in `NumPyClient` " +"([#851](https://github.com/adap/flower/pull/851))" msgstr "" -#: ../../source/ref-faq.rst:26 +#: ../../source/ref-changelog.md:816 msgid "" -"Yes, of course. A list of available examples using Flower within a " -"blockchain environment is available here:" +"Improved PyTorch quickstart code example " +"([#852](https://github.com/adap/flower/pull/852))" msgstr "" -#: ../../source/ref-faq.rst:28 +#: ../../source/ref-changelog.md:820 msgid "" -"`Flower meets Nevermined GitHub Repository `_." +"**Disabled final distributed evaluation** " +"([#800](https://github.com/adap/flower/pull/800))" msgstr "" -#: ../../source/ref-faq.rst:29 +#: ../../source/ref-changelog.md:822 msgid "" -"`Flower meets Nevermined YouTube video " -"`_." +"Prior behaviour was to perform a final round of distributed evaluation on" +" all connected clients, which is often not required (e.g., when using " +"server-side evaluation). The prior behaviour can be enabled by passing " +"`force_final_distributed_eval=True` to `start_server`." msgstr "" -#: ../../source/ref-faq.rst:30 +#: ../../source/ref-changelog.md:824 msgid "" -"`Flower meets KOSMoS `_." +"**Renamed q-FedAvg strategy** " +"([#802](https://github.com/adap/flower/pull/802))" msgstr "" -#: ../../source/ref-faq.rst:31 +#: ../../source/ref-changelog.md:826 msgid "" -"`Flower meets Talan blog post `_ ." +"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " +"the notation given in the original paper (q-FFL is the optimization " +"objective, q-FedAvg is the proposed solver). Note the original (now " +"deprecated) `QffedAvg` class is still available for compatibility reasons" +" (it will be removed in a future release)." msgstr "" -#: ../../source/ref-faq.rst:32 +#: ../../source/ref-changelog.md:828 msgid "" -"`Flower meets Talan GitHub Repository " -"`_ ." -msgstr "" - -#: ../../source/ref-telemetry.md:1 -msgid "Telemetry" +"**Deprecated and renamed code example** `simulation_pytorch` **to** " +"`simulation_pytorch_legacy` " +"([#791](https://github.com/adap/flower/pull/791))" msgstr "" -#: ../../source/ref-telemetry.md:3 +#: ../../source/ref-changelog.md:830 msgid "" -"The Flower open-source project collects **anonymous** usage metrics to " -"make well-informed decisions to improve Flower. Doing this enables the " -"Flower team to understand how Flower is used and what challenges users " -"might face." +"This example has been replaced by a new example. The new example is based" +" on the experimental virtual client engine, which will become the new " +"default way of doing most types of large-scale simulations in Flower. The" +" existing example was kept for reference purposes, but it might be " +"removed in the future." msgstr "" -#: ../../source/ref-telemetry.md:5 -msgid "" -"**Flower is a friendly framework for collaborative AI and data science.**" -" Staying true to this statement, Flower makes it easy to disable " -"telemetry for users that do not want to share anonymous usage metrics." +#: ../../source/ref-changelog.md:832 +msgid "v0.16.0 (2021-05-11)" msgstr "" -#: ../../source/ref-telemetry.md:7 -msgid "Principles" +#: ../../source/ref-changelog.md:836 +msgid "" +"**New built-in strategies** " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" -#: ../../source/ref-telemetry.md:9 -msgid "We follow strong principles guarding anonymous usage metrics collection:" +#: ../../source/ref-changelog.md:838 +msgid "(abstract) FedOpt" msgstr "" -#: ../../source/ref-telemetry.md:11 +#: ../../source/ref-changelog.md:841 msgid "" -"**Optional:** You will always be able to disable telemetry; read on to " -"learn “[How to opt-out](#how-to-opt-out)”." +"**Custom metrics for server and strategies** " +"([#717](https://github.com/adap/flower/pull/717))" msgstr "" -#: ../../source/ref-telemetry.md:12 +#: ../../source/ref-changelog.md:843 msgid "" -"**Anonymous:** The reported usage metrics are anonymous and do not " -"contain any personally identifiable information (PII). See “[Collected " -"metrics](#collected-metrics)” to understand what metrics are being " -"reported." +"The Flower server is now fully task-agnostic, all remaining instances of " +"task-specific metrics (such as `accuracy`) have been replaced by custom " +"metrics dictionaries. Flower 0.15 introduced the capability to pass a " +"dictionary containing custom metrics from client to server. As of this " +"release, custom metrics replace task-specific metrics on the server." msgstr "" -#: ../../source/ref-telemetry.md:13 +#: ../../source/ref-changelog.md:845 msgid "" -"**Transparent:** You can easily inspect what anonymous metrics are being " -"reported; see the section “[How to inspect what is being reported](#how-" -"to-inspect-what-is-being-reported)”" +"Custom metric dictionaries are now used in two user-facing APIs: they are" +" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " +"they enable evaluation functions passed to built-in strategies (via " +"`eval_fn`) to return more than two evaluation metrics. Strategies can " +"even return *aggregated* metrics dictionaries for the server to keep " +"track of." msgstr "" -#: ../../source/ref-telemetry.md:14 +#: ../../source/ref-changelog.md:847 msgid "" -"**Open for feedback:** You can always reach out to us if you have " -"feedback; see the section “[How to contact us](#how-to-contact-us)” for " -"details." +"Strategy implementations should migrate their `aggregate_fit` and " +"`aggregate_evaluate` methods to the new return type (e.g., by simply " +"returning an empty `{}`), server-side evaluation functions should migrate" +" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." msgstr "" -#: ../../source/ref-telemetry.md:16 -msgid "How to opt-out" +#: ../../source/ref-changelog.md:849 +msgid "" +"Flower 0.15-style return types are deprecated (but still supported), " +"compatibility will be removed in a future release." msgstr "" -#: ../../source/ref-telemetry.md:18 +#: ../../source/ref-changelog.md:851 msgid "" -"When Flower starts, it will check for an environment variable called " -"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " -"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " -"client, simply do so by prepending your command as in:" +"**Migration warnings for deprecated functionality** " +"([#690](https://github.com/adap/flower/pull/690))" msgstr "" -#: ../../source/ref-telemetry.md:24 +#: ../../source/ref-changelog.md:853 msgid "" -"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," -" `.bashrc` (or whatever configuration file applies to your environment) " -"to disable Flower telemetry permanently." +"Earlier versions of Flower were often migrated to new APIs, while " +"maintaining compatibility with legacy APIs. This release introduces " +"detailed warning messages if usage of deprecated APIs is detected. The " +"new warning messages often provide details on how to migrate to more " +"recent APIs, thus easing the transition from one release to another." msgstr "" -#: ../../source/ref-telemetry.md:26 -msgid "Collected metrics" +#: ../../source/ref-changelog.md:855 +msgid "" +"Improved docs and docstrings " +"([#691](https://github.com/adap/flower/pull/691) " +"[#692](https://github.com/adap/flower/pull/692) " +"[#713](https://github.com/adap/flower/pull/713))" msgstr "" -#: ../../source/ref-telemetry.md:28 -msgid "Flower telemetry collects the following metrics:" +#: ../../source/ref-changelog.md:857 +msgid "MXNet example and documentation" msgstr "" -#: ../../source/ref-telemetry.md:30 +#: ../../source/ref-changelog.md:859 msgid "" -"**Flower version.** Understand which versions of Flower are currently " -"being used. This helps us to decide whether we should invest effort into " -"releasing a patch version for an older version of Flower or instead use " -"the bandwidth to build new features." +"FedBN implementation in example PyTorch: From Centralized To Federated " +"([#696](https://github.com/adap/flower/pull/696) " +"[#702](https://github.com/adap/flower/pull/702) " +"[#705](https://github.com/adap/flower/pull/705))" msgstr "" -#: ../../source/ref-telemetry.md:32 +#: ../../source/ref-changelog.md:863 msgid "" -"**Operating system.** Enables us to answer questions such as: *Should we " -"create more guides for Linux, macOS, or Windows?*" +"**Serialization-agnostic server** " +"([#721](https://github.com/adap/flower/pull/721))" msgstr "" -#: ../../source/ref-telemetry.md:34 +#: ../../source/ref-changelog.md:865 msgid "" -"**Python version.** Knowing the Python version helps us, for example, to " -"decide whether we should invest effort into supporting old versions of " -"Python or stop supporting them and start taking advantage of new Python " -"features." +"The Flower server is now fully serialization-agnostic. Prior usage of " +"class `Weights` (which represents parameters as deserialized NumPy " +"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " +"`Parameters` objects are fully serialization-agnostic and represents " +"parameters as byte arrays, the `tensor_type` attributes indicates how " +"these byte arrays should be interpreted (e.g., for " +"serialization/deserialization)." msgstr "" -#: ../../source/ref-telemetry.md:36 +#: ../../source/ref-changelog.md:867 msgid "" -"**Hardware properties.** Understanding the hardware environment that " -"Flower is being used in helps to decide whether we should, for example, " -"put more effort into supporting low-resource environments." +"Built-in strategies implement this approach by handling serialization and" +" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " +"implementations should update to the slightly changed Strategy method " +"definitions. Strategy authors can consult PR " +"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" +" easily migrate to the new format." msgstr "" -#: ../../source/ref-telemetry.md:38 +#: ../../source/ref-changelog.md:869 msgid "" -"**Execution mode.** Knowing what execution mode Flower starts in enables " -"us to understand how heavily certain features are being used and better " -"prioritize based on that." +"Deprecated `flwr.server.Server.evaluate`, use " +"`flwr.server.Server.evaluate_round` instead " +"([#717](https://github.com/adap/flower/pull/717))" msgstr "" -#: ../../source/ref-telemetry.md:40 -msgid "" -"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " -"time a Flower workload starts. This allows us to understand which device " -"types not only start Flower workloads but also successfully complete " -"them." +#: ../../source/ref-changelog.md:871 +msgid "v0.15.0 (2021-03-12)" msgstr "" -#: ../../source/ref-telemetry.md:42 +#: ../../source/ref-changelog.md:875 msgid "" -"**Source.** Flower telemetry tries to store a random source ID in " -"`~/.flwr/source` the first time a telemetry event is generated. The " -"source ID is important to identify whether an issue is recurring or " -"whether an issue is triggered by multiple clusters running concurrently " -"(which often happens in simulation). For example, if a device runs " -"multiple workloads at the same time, and this results in an issue, then, " -"in order to reproduce the issue, multiple workloads must be started at " -"the same time." +"**Server-side parameter initialization** " +"([#658](https://github.com/adap/flower/pull/658))" msgstr "" -#: ../../source/ref-telemetry.md:44 +#: ../../source/ref-changelog.md:877 msgid "" -"You may delete the source ID at any time. If you wish for all events " -"logged under a specific source ID to be deleted, you can send a deletion " -"request mentioning the source ID to `telemetry@flower.ai`. All events " -"related to that source ID will then be permanently deleted." +"Model parameters can now be initialized on the server-side. Server-side " +"parameter initialization works via a new `Strategy` method called " +"`initialize_parameters`." msgstr "" -#: ../../source/ref-telemetry.md:46 +#: ../../source/ref-changelog.md:879 msgid "" -"We will not collect any personally identifiable information. If you think" -" any of the metrics collected could be misused in any way, please [get in" -" touch with us](#how-to-contact-us). We will update this page to reflect " -"any changes to the metrics collected and publish changes in the " -"changelog." +"Built-in strategies support a new constructor argument called " +"`initial_parameters` to set the initial parameters. Built-in strategies " +"will provide these initial parameters to the server on startup and then " +"delete them to free the memory afterwards." msgstr "" -#: ../../source/ref-telemetry.md:48 +#: ../../source/ref-changelog.md:898 msgid "" -"If you think other metrics would be helpful for us to better guide our " -"decisions, please let us know! We will carefully review them; if we are " -"confident that they do not compromise user privacy, we may add them." +"If no initial parameters are provided to the strategy, the server will " +"continue to use the current behaviour (namely, it will ask one of the " +"connected clients for its parameters and use these as the initial global " +"parameters)." msgstr "" -#: ../../source/ref-telemetry.md:50 -msgid "How to inspect what is being reported" +#: ../../source/ref-changelog.md:900 +msgid "Deprecations" msgstr "" -#: ../../source/ref-telemetry.md:52 +#: ../../source/ref-changelog.md:902 msgid "" -"We wanted to make it very easy for you to inspect what anonymous usage " -"metrics are reported. You can view all the reported telemetry information" -" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " -"is disabled by default. You may use logging independently from " -"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " -"without sending any metrics." +"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " +"`flwr.server.strategy.FedAvg`, which is equivalent)" msgstr "" -#: ../../source/ref-telemetry.md:58 +#: ../../source/ref-changelog.md:904 +msgid "v0.14.0 (2021-02-18)" +msgstr "" + +#: ../../source/ref-changelog.md:908 msgid "" -"The inspect Flower telemetry without sending any anonymous usage metrics," -" use both environment variables:" +"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " +"([#610](https://github.com/adap/flower/pull/610) " +"[#572](https://github.com/adap/flower/pull/572) " +"[#633](https://github.com/adap/flower/pull/633))" msgstr "" -#: ../../source/ref-telemetry.md:64 -msgid "How to contact us" +#: ../../source/ref-changelog.md:910 +msgid "" +"Clients can now return an additional dictionary mapping `str` keys to " +"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " +"This means one can return almost arbitrary values from `fit`/`evaluate` " +"and make use of them on the server side!" msgstr "" -#: ../../source/ref-telemetry.md:66 +#: ../../source/ref-changelog.md:912 msgid "" -"We want to hear from you. If you have any feedback or ideas on how to " -"improve the way we handle anonymous usage metrics, reach out to us via " -"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " -"(`telemetry@flower.ai`)." +"This improvement also allowed for more consistent return types between " +"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " +"dict)` representing the loss, number of examples, and a dictionary " +"holding arbitrary problem-specific values like accuracy." msgstr "" -#: ../../source/tutorial-quickstart-android.rst:-1 +#: ../../source/ref-changelog.md:914 msgid "" -"Read this Federated Learning quickstart tutorial for creating an Android " -"app using Flower." +"In case you wondered: this feature is compatible with existing projects, " +"the additional dictionary return value is optional. New code should " +"however migrate to the new return types to be compatible with upcoming " +"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " +"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " +"details." msgstr "" -#: ../../source/tutorial-quickstart-android.rst:5 -msgid "Quickstart Android" +#: ../../source/ref-changelog.md:916 +msgid "" +"*Code example:* note the additional dictionary return values in both " +"`FlwrClient.fit` and `FlwrClient.evaluate`:" msgstr "" -#: ../../source/tutorial-quickstart-android.rst:10 +#: ../../source/ref-changelog.md:931 msgid "" -"Let's build a federated learning system using TFLite and Flower on " -"Android!" +"**Generalized** `config` **argument in** `Client.fit` **and** " +"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" msgstr "" -#: ../../source/tutorial-quickstart-android.rst:12 +#: ../../source/ref-changelog.md:933 msgid "" -"Please refer to the `full code example " -"`_ to learn " -"more." +"The `config` argument used to be of type `Dict[str, str]`, which means " +"that dictionary values were expected to be strings. The new release " +"generalizes this to enable values of the following types: `bool`, " +"`bytes`, `float`, `int`, `str`." msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:-1 +#: ../../source/ref-changelog.md:935 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with FastAI to train a vision model on CIFAR-10." +"This means one can now pass almost arbitrary values to `fit`/`evaluate` " +"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" +"side and `int(config[\"epochs\"])` on the client side!" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:5 -msgid "Quickstart fastai" +#: ../../source/ref-changelog.md:937 +msgid "" +"*Code example:* note that the `config` dictionary now contains non-`str` " +"values in both `Client.fit` and `Client.evaluate`:" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:10 -msgid "Let's build a federated learning system using fastai and Flower!" +#: ../../source/ref-changelog.md:954 +msgid "v0.13.0 (2021-01-08)" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:12 +#: ../../source/ref-changelog.md:958 msgid "" -"Please refer to the `full code example " -"`_ " -"to learn more." +"New example: PyTorch From Centralized To Federated " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:-1 -msgid "" -"Check out this Federating Learning quickstart tutorial for using Flower " -"with HuggingFace Transformers in order to fine-tune an LLM." +#: ../../source/ref-changelog.md:959 +msgid "Improved documentation" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:5 -msgid "Quickstart 🤗 Transformers" +#: ../../source/ref-changelog.md:960 +msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:10 +#: ../../source/ref-changelog.md:961 +msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" +msgstr "" + +#: ../../source/ref-changelog.md:962 msgid "" -"Let's build a federated learning system using Hugging Face Transformers " -"and Flower!" +"Updated examples documentation " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:12 +#: ../../source/ref-changelog.md:963 msgid "" -"We will leverage Hugging Face to federate the training of language models" -" over multiple clients using Flower. More specifically, we will fine-tune" -" a pre-trained Transformer model (distilBERT) for sequence classification" -" over a dataset of IMDB ratings. The end goal is to detect if a movie " -"rating is positive or negative." +"Removed obsolete documentation " +"([#548](https://github.com/adap/flower/pull/548))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:18 -msgid "Dependencies" +#: ../../source/ref-changelog.md:965 +msgid "Bugfix:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:20 +#: ../../source/ref-changelog.md:967 msgid "" -"To follow along this tutorial you will need to install the following " -"packages: :code:`datasets`, :code:`evaluate`, :code:`flwr`, " -":code:`torch`, and :code:`transformers`. This can be done using " -":code:`pip`:" +"`Server.fit` does not disconnect clients when finished, disconnecting the" +" clients is now handled in `flwr.server.start_server` " +"([#553](https://github.com/adap/flower/pull/553) " +"[#540](https://github.com/adap/flower/issues/540))." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:30 -msgid "Standard Hugging Face workflow" +#: ../../source/ref-changelog.md:969 +msgid "v0.12.0 (2020-12-07)" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:33 -msgid "Handling the data" +#: ../../source/ref-changelog.md:971 ../../source/ref-changelog.md:987 +msgid "Important changes:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:35 +#: ../../source/ref-changelog.md:973 msgid "" -"To fetch the IMDB dataset, we will use Hugging Face's :code:`datasets` " -"library. We then need to tokenize the data and create :code:`PyTorch` " -"dataloaders, this is all done in the :code:`load_data` function:" +"Added an example for embedded devices " +"([#507](https://github.com/adap/flower/pull/507))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:81 -msgid "Training and testing the model" +#: ../../source/ref-changelog.md:974 +msgid "" +"Added a new NumPyClient (in addition to the existing KerasClient) " +"([#504](https://github.com/adap/flower/pull/504) " +"[#508](https://github.com/adap/flower/pull/508))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:83 +#: ../../source/ref-changelog.md:975 msgid "" -"Once we have a way of creating our trainloader and testloader, we can " -"take care of the training and testing. This is very similar to any " -":code:`PyTorch` training or testing loop:" +"Deprecated `flwr_example` package and started to migrate examples into " +"the top-level `examples` directory " +"([#494](https://github.com/adap/flower/pull/494) " +"[#512](https://github.com/adap/flower/pull/512))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:121 -msgid "Creating the model itself" +#: ../../source/ref-changelog.md:977 +msgid "v0.11.0 (2020-11-30)" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:123 +#: ../../source/ref-changelog.md:979 +msgid "Incompatible changes:" +msgstr "" + +#: ../../source/ref-changelog.md:981 msgid "" -"To create the model itself, we will just load the pre-trained distillBERT" -" model using Hugging Face’s :code:`AutoModelForSequenceClassification` :" +"Renamed strategy methods " +"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " +"Flower's public APIs. Other public methods/functions (e.g., every method " +"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " +"which is why we're removing it from the four methods in Strategy. To " +"migrate rename the following `Strategy` methods accordingly:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:136 -msgid "Federating the example" +#: ../../source/ref-changelog.md:982 +msgid "`on_configure_evaluate` => `configure_evaluate`" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:139 -msgid "Creating the IMDBClient" +#: ../../source/ref-changelog.md:983 +msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:141 -msgid "" -"To federate our example to multiple clients, we first need to write our " -"Flower client class (inheriting from :code:`flwr.client.NumPyClient`). " -"This is very easy, as our model is a standard :code:`PyTorch` model:" +#: ../../source/ref-changelog.md:984 +msgid "`on_configure_fit` => `configure_fit`" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:169 +#: ../../source/ref-changelog.md:985 +msgid "`on_aggregate_fit` => `aggregate_fit`" +msgstr "" + +#: ../../source/ref-changelog.md:989 msgid "" -"The :code:`get_parameters` function lets the server get the client's " -"parameters. Inversely, the :code:`set_parameters` function allows the " -"server to send its parameters to the client. Finally, the :code:`fit` " -"function trains the model locally for the client, and the " -":code:`evaluate` function tests the model locally and returns the " -"relevant metrics." +"Deprecated `DefaultStrategy` " +"([#479](https://github.com/adap/flower/pull/479)). To migrate use " +"`FedAvg` instead." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:175 -msgid "Starting the server" +#: ../../source/ref-changelog.md:990 +msgid "" +"Simplified examples and baselines " +"([#484](https://github.com/adap/flower/pull/484))." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:177 +#: ../../source/ref-changelog.md:991 msgid "" -"Now that we have a way to instantiate clients, we need to create our " -"server in order to aggregate the results. Using Flower, this can be done " -"very easily by first choosing a strategy (here, we are using " -":code:`FedAvg`, which will define the global weights as the average of " -"all the clients' weights at each round) and then using the " -":code:`flwr.server.start_server` function:" +"Removed presently unused `on_conclude_round` from strategy interface " +"([#483](https://github.com/adap/flower/pull/483))." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:205 +#: ../../source/ref-changelog.md:992 msgid "" -"The :code:`weighted_average` function is there to provide a way to " -"aggregate the metrics distributed amongst the clients (basically this " -"allows us to display a nice average accuracy and loss for every round)." +"Set minimal Python version to 3.6.1 instead of 3.6.9 " +"([#471](https://github.com/adap/flower/pull/471))." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:209 -msgid "Putting everything together" +#: ../../source/ref-changelog.md:993 +msgid "" +"Improved `Strategy` docstrings " +"([#470](https://github.com/adap/flower/pull/470))." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:211 -msgid "We can now start client instances using:" +#: ../../source/ref-example-projects.rst:2 +msgid "Example projects" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:221 +#: ../../source/ref-example-projects.rst:4 msgid "" -"And they will be able to connect to the server and start the federated " -"training." +"Flower comes with a number of usage examples. The examples demonstrate " +"how Flower can be used to federate different kinds of existing machine " +"learning pipelines, usually leveraging popular machine learning " +"frameworks such as `PyTorch `_ or `TensorFlow " +"`_." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:223 +#: ../../source/ref-example-projects.rst:10 msgid "" -"If you want to check out everything put together, you should check out " -"the full code example: [https://github.com/adap/flower/tree/main/examples" -"/quickstart-" -"huggingface](https://github.com/adap/flower/tree/main/examples" -"/quickstart-huggingface)." +"The following examples are available as standalone projects. Quickstart " +"TensorFlow/Keras ---------------------------" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:227 +#: ../../source/ref-example-projects.rst:14 msgid "" -"Of course, this is a very basic example, and a lot can be added or " -"modified, it was just to showcase how simply we could federate a Hugging " -"Face workflow using Flower." +"The TensorFlow/Keras quickstart example shows CIFAR-10 image " +"classification with MobileNetV2:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:230 +#: ../../source/ref-example-projects.rst:17 msgid "" -"Note that in this example we used :code:`PyTorch`, but we could have very" -" well used :code:`TensorFlow`." +"`Quickstart TensorFlow (Code) " +"`_" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:-1 +#: ../../source/ref-example-projects.rst:18 +msgid ":doc:`Quickstart TensorFlow (Tutorial) `" +msgstr "" + +#: ../../source/ref-example-projects.rst:19 msgid "" -"Read this Federated Learning quickstart tutorial for creating an iOS app " -"using Flower to train a neural network on MNIST." +"`Quickstart TensorFlow (Blog Post) `_" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:5 -msgid "Quickstart iOS" +#: ../../source/ref-example-projects.rst:23 +#: ../../source/tutorial-quickstart-pytorch.rst:5 +msgid "Quickstart PyTorch" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:10 +#: ../../source/ref-example-projects.rst:25 msgid "" -"In this tutorial we will learn how to train a Neural Network on MNIST " -"using Flower and CoreML on iOS devices." +"The PyTorch quickstart example shows CIFAR-10 image classification with a" +" simple Convolutional Neural Network:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:12 +#: ../../source/ref-example-projects.rst:28 msgid "" -"First of all, for running the Flower Python server, it is recommended to " -"create a virtual environment and run everything within a `virtualenv " -"`_. For the Flower " -"client implementation in iOS, it is recommended to use Xcode as our IDE." +"`Quickstart PyTorch (Code) " +"`_" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:15 -msgid "" -"Our example consists of one Python *server* and two iPhone *clients* that" -" all have the same model." +#: ../../source/ref-example-projects.rst:29 +msgid ":doc:`Quickstart PyTorch (Tutorial) `" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:17 -msgid "" -"*Clients* are responsible for generating individual weight updates for " -"the model based on their local datasets. These updates are then sent to " -"the *server* which will aggregate them to produce a better model. " -"Finally, the *server* sends this improved version of the model back to " -"each *client*. A complete cycle of weight updates is called a *round*." +#: ../../source/ref-example-projects.rst:33 +msgid "PyTorch: From Centralized To Federated" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:21 +#: ../../source/ref-example-projects.rst:35 msgid "" -"Now that we have a rough idea of what is going on, let's get started to " -"setup our Flower server environment. We first need to install Flower. You" -" can do this by using pip:" +"This example shows how a regular PyTorch project can be federated using " +"Flower:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:27 -msgid "Or Poetry:" +#: ../../source/ref-example-projects.rst:37 +msgid "" +"`PyTorch: From Centralized To Federated (Code) " +"`_" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:36 +#: ../../source/ref-example-projects.rst:38 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training using CoreML as our local training pipeline and " -"MNIST as our dataset. For simplicity reasons we will use the complete " -"Flower client with CoreML, that has been implemented and stored inside " -"the Swift SDK. The client implementation can be seen below:" +":doc:`PyTorch: From Centralized To Federated (Tutorial) `" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:72 -msgid "" -"Let's create a new application project in Xcode and add :code:`flwr` as a" -" dependency in your project. For our application, we will store the logic" -" of our app in :code:`FLiOSModel.swift` and the UI elements in " -":code:`ContentView.swift`. We will focus more on :code:`FLiOSModel.swift`" -" in this quickstart. Please refer to the `full code example " -"`_ to learn more " -"about the app." +#: ../../source/ref-example-projects.rst:42 +msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:75 -msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" +#: ../../source/ref-example-projects.rst:44 +msgid "" +"This example shows how Flower can be used to build a federated learning " +"system that run across Raspberry Pi and Nvidia Jetson:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:83 +#: ../../source/ref-example-projects.rst:46 msgid "" -"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " -"will be bundled inside the application during deployment to your iOS " -"device. We need to pass the url to access mlmodel and run CoreML machine " -"learning processes, it can be retrieved by calling the function " -":code:`Bundle.main.url`. For the MNIST dataset, we need to preprocess it " -"into :code:`MLBatchProvider` object. The preprocessing is done inside " -":code:`DataLoader.swift`." +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " +"`_" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:99 +#: ../../source/ref-example-projects.rst:47 msgid "" -"Since CoreML does not allow the model parameters to be seen before " -"training, and accessing the model parameters during or after the training" -" can only be done by specifying the layer name, we need to know this " -"informations beforehand, through looking at the model specification, " -"which are written as proto files. The implementation can be seen in " -":code:`MLModelInspect`." +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " +"`_" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:102 +#: ../../source/ref-faq.rst:4 msgid "" -"After we have all of the necessary informations, let's create our Flower " -"client." +"This page collects answers to commonly asked questions about Federated " +"Learning with Flower." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:117 +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" +msgstr "" + +#: ../../source/ref-faq.rst:8 msgid "" -"Then start the Flower gRPC client and start communicating to the server " -"by passing our Flower client to the function :code:`startFlwrGRPC`." +"Yes, it can! Flower even comes with a few under-the-hood optimizations to" +" make it work even better on Colab. Here's a quickstart example:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:124 +#: ../../source/ref-faq.rst:10 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -"call the provided :code:`MLFlwrClient` and call :code:`startFlwrGRPC()`. " -"The attribute :code:`hostname` and :code:`port` tells the client which " -"server to connect to. This can be done by entering the hostname and port " -"in the application before clicking the start button to start the " -"federated learning process." +"`Flower simulation PyTorch " +"`_" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:131 -#: ../../source/tutorial-quickstart-mxnet.rst:228 -#: ../../source/tutorial-quickstart-pytorch.rst:205 -#: ../../source/tutorial-quickstart-tensorflow.rst:100 +#: ../../source/ref-faq.rst:11 msgid "" -"For simple workloads we can start a Flower server and leave all the " -"configuration possibilities at their default values. In a file named " -":code:`server.py`, import Flower and start the server:" +"`Flower simulation TensorFlow/Keras " +"`_" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:142 -#: ../../source/tutorial-quickstart-mxnet.rst:239 -#: ../../source/tutorial-quickstart-pytorch.rst:216 -#: ../../source/tutorial-quickstart-scikitlearn.rst:215 -#: ../../source/tutorial-quickstart-tensorflow.rst:112 -msgid "Train the model, federated!" +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:144 -#: ../../source/tutorial-quickstart-pytorch.rst:218 -#: ../../source/tutorial-quickstart-tensorflow.rst:114 -#: ../../source/tutorial-quickstart-xgboost.rst:525 +#: ../../source/ref-faq.rst:15 msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. FL systems usually have a server and " -"multiple clients. We therefore have to start the server first:" +"Find the `blog post about federated learning on embedded device here " +"`_" +" and the corresponding `GitHub code example " +"`_." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:152 -msgid "" -"Once the server is running we can start the clients in different " -"terminals. Build and run the client through your Xcode, one through Xcode" -" Simulator and the other by deploying it to your iPhone. To see more " -"about how to deploy your app to iPhone or Simulator visit `here " -"`_." +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:156 +#: ../../source/ref-faq.rst:19 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system in your ios device. The full `source code " -"`_ for this " -"example can be found in :code:`examples/ios`." +"Yes, it does. Please take a look at our `blog post " +"`_ or check out the code examples:" msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:-1 +#: ../../source/ref-faq.rst:21 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with Jax to train a linear regression model on a scikit-learn dataset." +"`Android Kotlin example `_" msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:5 -msgid "Quickstart JAX" +#: ../../source/ref-faq.rst:22 +msgid "`Android Java example `_" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:-1 -msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with MXNet to train a Sequential model on MNIST." +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:5 -msgid "Quickstart MXNet" +#: ../../source/ref-faq.rst:26 +msgid "" +"Yes, of course. A list of available examples using Flower within a " +"blockchain environment is available here:" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:7 +#: ../../source/ref-faq.rst:28 msgid "" -"MXNet is no longer maintained and has been moved into `Attic " -"`_. As a result, we would " -"encourage you to use other ML frameworks alongise Flower, for example, " -"PyTorch. This tutorial might be removed in future versions of Flower." +"`Flower meets Nevermined GitHub Repository `_." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:12 +#: ../../source/ref-faq.rst:29 msgid "" -"In this tutorial, we will learn how to train a :code:`Sequential` model " -"on MNIST using Flower and MXNet." +"`Flower meets Nevermined YouTube video " +"`_." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:14 -#: ../../source/tutorial-quickstart-scikitlearn.rst:12 +#: ../../source/ref-faq.rst:30 msgid "" -"It is recommended to create a virtual environment and run everything " -"within this `virtualenv `_." +"`Flower meets KOSMoS `_." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:18 -#: ../../source/tutorial-quickstart-scikitlearn.rst:16 +#: ../../source/ref-faq.rst:31 msgid "" -"*Clients* are responsible for generating individual model parameter " -"updates for the model based on their local datasets. These updates are " -"then sent to the *server* which will aggregate them to produce an updated" -" global model. Finally, the *server* sends this improved version of the " -"model back to each *client*. A complete cycle of parameters updates is " -"called a *round*." +"`Flower meets Talan blog post `_ ." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:22 -#: ../../source/tutorial-quickstart-scikitlearn.rst:20 +#: ../../source/ref-faq.rst:32 msgid "" -"Now that we have a rough idea of what is going on, let's get started. We " -"first need to install Flower. You can do this by running:" +"`Flower meets Talan GitHub Repository " +"`_ ." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:28 -msgid "Since we want to use MXNet, let's go ahead and install it:" +#: ../../source/ref-telemetry.md:1 +msgid "Telemetry" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:38 +#: ../../source/ref-telemetry.md:3 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. Our training " -"procedure and network architecture are based on MXNet´s `Hand-written " -"Digit Recognition tutorial " -"`_." +"The Flower open-source project collects **anonymous** usage metrics to " +"make well-informed decisions to improve Flower. Doing this enables the " +"Flower team to understand how Flower is used and what challenges users " +"might face." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:40 +#: ../../source/ref-telemetry.md:5 msgid "" -"In a file called :code:`client.py`, import Flower and MXNet related " -"packages:" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users that do not want to share anonymous usage metrics." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:55 -msgid "In addition, define the device allocation in MXNet with:" +#: ../../source/ref-telemetry.md:7 +msgid "Principles" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:61 -msgid "" -"We use MXNet to load MNIST, a popular image classification dataset of " -"handwritten digits for machine learning. The MXNet utility " -":code:`mx.test_utils.get_mnist()` downloads the training and test data." +#: ../../source/ref-telemetry.md:9 +msgid "We follow strong principles guarding anonymous usage metrics collection:" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:75 +#: ../../source/ref-telemetry.md:11 msgid "" -"Define the training and loss with MXNet. We train the model by looping " -"over the dataset, measure the corresponding loss, and optimize it." +"**Optional:** You will always be able to disable telemetry; read on to " +"learn “[How to opt-out](#how-to-opt-out)”." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:113 +#: ../../source/ref-telemetry.md:12 msgid "" -"Next, we define the validation of our machine learning model. We loop " -"over the test set and measure both loss and accuracy on the test set." +"**Anonymous:** The reported usage metrics are anonymous and do not " +"contain any personally identifiable information (PII). See “[Collected " +"metrics](#collected-metrics)” to understand what metrics are being " +"reported." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:137 +#: ../../source/ref-telemetry.md:13 msgid "" -"After defining the training and testing of a MXNet machine learning " -"model, we use these functions to implement a Flower client." +"**Transparent:** You can easily inspect what anonymous metrics are being " +"reported; see the section “[How to inspect what is being reported](#how-" +"to-inspect-what-is-being-reported)”" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:139 -msgid "Our Flower clients will use a simple :code:`Sequential` model:" +#: ../../source/ref-telemetry.md:14 +msgid "" +"**Open for feedback:** You can always reach out to us if you have " +"feedback; see the section “[How to contact us](#how-to-contact-us)” for " +"details." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:158 -msgid "" -"After loading the dataset with :code:`load_data()` we perform one forward" -" propagation to initialize the model and model parameters with " -":code:`model(init)`. Next, we implement a Flower client." +#: ../../source/ref-telemetry.md:16 +msgid "How to opt-out" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:160 -#: ../../source/tutorial-quickstart-pytorch.rst:144 -#: ../../source/tutorial-quickstart-tensorflow.rst:54 +#: ../../source/ref-telemetry.md:18 msgid "" -"The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to train the neural network we defined earlier)." +"When Flower starts, it will check for an environment variable called " +"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " +"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " +"client, simply do so by prepending your command as in:" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:166 +#: ../../source/ref-telemetry.md:24 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses MXNet. Implementing :code:`NumPyClient` usually means " -"defining the following methods (:code:`set_parameters` is optional " -"though):" +"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," +" `.bashrc` (or whatever configuration file applies to your environment) " +"to disable Flower telemetry permanently." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:172 -#: ../../source/tutorial-quickstart-pytorch.rst:156 -#: ../../source/tutorial-quickstart-scikitlearn.rst:109 -msgid "return the model weight as a list of NumPy ndarrays" +#: ../../source/ref-telemetry.md:26 +msgid "Collected metrics" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:173 -#: ../../source/tutorial-quickstart-pytorch.rst:157 -#: ../../source/tutorial-quickstart-scikitlearn.rst:111 -msgid ":code:`set_parameters` (optional)" +#: ../../source/ref-telemetry.md:28 +msgid "Flower telemetry collects the following metrics:" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:174 -#: ../../source/tutorial-quickstart-pytorch.rst:158 -#: ../../source/tutorial-quickstart-scikitlearn.rst:111 +#: ../../source/ref-telemetry.md:30 msgid "" -"update the local model weights with the parameters received from the " -"server" +"**Flower version.** Understand which versions of Flower are currently " +"being used. This helps us to decide whether we should invest effort into " +"releasing a patch version for an older version of Flower or instead use " +"the bandwidth to build new features." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:176 -#: ../../source/tutorial-quickstart-pytorch.rst:160 -#: ../../source/tutorial-quickstart-scikitlearn.rst:114 -msgid "set the local model weights" +#: ../../source/ref-telemetry.md:32 +msgid "" +"**Operating system.** Enables us to answer questions such as: *Should we " +"create more guides for Linux, macOS, or Windows?*" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:177 -#: ../../source/tutorial-quickstart-pytorch.rst:161 -#: ../../source/tutorial-quickstart-scikitlearn.rst:115 -msgid "train the local model" +#: ../../source/ref-telemetry.md:34 +msgid "" +"**Python version.** Knowing the Python version helps us, for example, to " +"decide whether we should invest effort into supporting old versions of " +"Python or stop supporting them and start taking advantage of new Python " +"features." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:178 -#: ../../source/tutorial-quickstart-pytorch.rst:162 -#: ../../source/tutorial-quickstart-scikitlearn.rst:116 -msgid "receive the updated local model weights" +#: ../../source/ref-telemetry.md:36 +msgid "" +"**Hardware properties.** Understanding the hardware environment that " +"Flower is being used in helps to decide whether we should, for example, " +"put more effort into supporting low-resource environments." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:180 -#: ../../source/tutorial-quickstart-pytorch.rst:164 -#: ../../source/tutorial-quickstart-scikitlearn.rst:118 -msgid "test the local model" +#: ../../source/ref-telemetry.md:38 +msgid "" +"**Execution mode.** Knowing what execution mode Flower starts in enables " +"us to understand how heavily certain features are being used and better " +"prioritize based on that." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:182 -msgid "They can be implemented in the following way:" +#: ../../source/ref-telemetry.md:40 +msgid "" +"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " +"time a Flower workload starts. This allows us to understand which device " +"types not only start Flower workloads but also successfully complete " +"them." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:212 +#: ../../source/ref-telemetry.md:42 msgid "" -"We can now create an instance of our class :code:`MNISTClient` and add " -"one line to actually run this client:" +"**Source.** Flower telemetry tries to store a random source ID in " +"`~/.flwr/source` the first time a telemetry event is generated. The " +"source ID is important to identify whether an issue is recurring or " +"whether an issue is triggered by multiple clusters running concurrently " +"(which often happens in simulation). For example, if a device runs " +"multiple workloads at the same time, and this results in an issue, then, " +"in order to reproduce the issue, multiple workloads must be started at " +"the same time." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:219 +#: ../../source/ref-telemetry.md:44 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()` or " -":code:`fl.client.start_numpy_client()`. The string " -":code:`\"0.0.0.0:8080\"` tells the client which server to connect to. In " -"our case we can run the server and the client on the same machine, " -"therefore we use :code:`\"0.0.0.0:8080\"`. If we run a truly federated " -"workload with the server and clients running on different machines, all " -"that needs to change is the :code:`server_address` we pass to the client." +"You may delete the source ID at any time. If you wish for all events " +"logged under a specific source ID to be deleted, you can send a deletion " +"request mentioning the source ID to `telemetry@flower.ai`. All events " +"related to that source ID will then be permanently deleted." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:241 +#: ../../source/ref-telemetry.md:46 msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. Federated learning systems usually have a " -"server and multiple clients. We therefore have to start the server first:" +"We will not collect any personally identifiable information. If you think" +" any of the metrics collected could be misused in any way, please [get in" +" touch with us](#how-to-contact-us). We will update this page to reflect " +"any changes to the metrics collected and publish changes in the " +"changelog." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:249 -#: ../../source/tutorial-quickstart-pytorch.rst:226 -#: ../../source/tutorial-quickstart-scikitlearn.rst:224 -#: ../../source/tutorial-quickstart-tensorflow.rst:122 -#: ../../source/tutorial-quickstart-xgboost.rst:533 +#: ../../source/ref-telemetry.md:48 msgid "" -"Once the server is running we can start the clients in different " -"terminals. Open a new terminal and start the first client:" +"If you think other metrics would be helpful for us to better guide our " +"decisions, please let us know! We will carefully review them; if we are " +"confident that they do not compromise user privacy, we may add them." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:256 -#: ../../source/tutorial-quickstart-pytorch.rst:233 -#: ../../source/tutorial-quickstart-scikitlearn.rst:231 -#: ../../source/tutorial-quickstart-tensorflow.rst:129 -#: ../../source/tutorial-quickstart-xgboost.rst:540 -msgid "Open another terminal and start the second client:" +#: ../../source/ref-telemetry.md:50 +msgid "How to inspect what is being reported" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:262 -#: ../../source/tutorial-quickstart-pytorch.rst:239 -#: ../../source/tutorial-quickstart-scikitlearn.rst:237 -#: ../../source/tutorial-quickstart-xgboost.rst:546 +#: ../../source/ref-telemetry.md:52 msgid "" -"Each client will have its own dataset. You should now see how the " -"training does in the very first terminal (the one that started the " -"server):" +"We wanted to make it very easy for you to inspect what anonymous usage " +"metrics are reported. You can view all the reported telemetry information" +" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " +"is disabled by default. You may use logging independently from " +"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " +"without sending any metrics." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:294 +#: ../../source/ref-telemetry.md:58 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in :code:`examples" -"/quickstart-mxnet`." +"The inspect Flower telemetry without sending any anonymous usage metrics," +" use both environment variables:" msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:-1 +#: ../../source/ref-telemetry.md:64 +msgid "How to contact us" +msgstr "" + +#: ../../source/ref-telemetry.md:66 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with Pandas to perform Federated Analytics." +"We want to hear from you. If you have any feedback or ideas on how to " +"improve the way we handle anonymous usage metrics, reach out to us via " +"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " +"(`telemetry@flower.ai`)." msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:5 -msgid "Quickstart Pandas" +#: ../../source/tutorial-quickstart-android.rst:-1 +msgid "" +"Read this Federated Learning quickstart tutorial for creating an Android " +"app using Flower." msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:10 -msgid "Let's build a federated analytics system using Pandas and Flower!" +#: ../../source/tutorial-quickstart-android.rst:5 +msgid "Quickstart Android" msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:12 +#: ../../source/tutorial-quickstart-android.rst:10 msgid "" -"Please refer to the `full code example " -"`_ " -"to learn more." +"Let's build a federated learning system using TFLite and Flower on " +"Android!" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:-1 +#: ../../source/tutorial-quickstart-android.rst:12 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with PyTorch to train a CNN model on MNIST." +"Please refer to the `full code example " +"`_ to learn " +"more." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:13 +#: ../../source/tutorial-quickstart-fastai.rst:-1 msgid "" -"In this tutorial we will learn how to train a Convolutional Neural " -"Network on CIFAR10 using Flower and PyTorch." +"Check out this Federated Learning quickstart tutorial for using Flower " +"with FastAI to train a vision model on CIFAR-10." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:15 -#: ../../source/tutorial-quickstart-xgboost.rst:39 -msgid "" -"First of all, it is recommended to create a virtual environment and run " -"everything within a `virtualenv `_." +#: ../../source/tutorial-quickstart-fastai.rst:5 +msgid "Quickstart fastai" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:29 -msgid "" -"Since we want to use PyTorch to solve a computer vision task, let's go " -"ahead and install PyTorch and the **torchvision** library:" +#: ../../source/tutorial-quickstart-fastai.rst:10 +msgid "Let's build a federated learning system using fastai and Flower!" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:39 +#: ../../source/tutorial-quickstart-fastai.rst:12 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. Our training " -"procedure and network architecture are based on PyTorch's `Deep Learning " -"with PyTorch " -"`_." +"Please refer to the `full code example " +"`_ " +"to learn more." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:41 +#: ../../source/tutorial-quickstart-huggingface.rst:-1 msgid "" -"In a file called :code:`client.py`, import Flower and PyTorch related " -"packages:" +"Check out this Federating Learning quickstart tutorial for using Flower " +"with HuggingFace Transformers in order to fine-tune an LLM." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:56 -msgid "In addition, we define the device allocation in PyTorch with:" +#: ../../source/tutorial-quickstart-huggingface.rst:5 +msgid "Quickstart 🤗 Transformers" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:62 +#: ../../source/tutorial-quickstart-huggingface.rst:10 msgid "" -"We use PyTorch to load CIFAR10, a popular colored image classification " -"dataset for machine learning. The PyTorch :code:`DataLoader()` downloads " -"the training and test data that are then normalized." +"Let's build a federated learning system using Hugging Face Transformers " +"and Flower!" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:78 +#: ../../source/tutorial-quickstart-huggingface.rst:12 msgid "" -"Define the loss and optimizer with PyTorch. The training of the dataset " -"is done by looping over the dataset, measure the corresponding loss and " -"optimize it." +"We will leverage Hugging Face to federate the training of language models" +" over multiple clients using Flower. More specifically, we will fine-tune" +" a pre-trained Transformer model (distilBERT) for sequence classification" +" over a dataset of IMDB ratings. The end goal is to detect if a movie " +"rating is positive or negative." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:94 -msgid "" -"Define then the validation of the machine learning network. We loop over" -" the test set and measure the loss and accuracy of the test set." +#: ../../source/tutorial-quickstart-huggingface.rst:18 +msgid "Dependencies" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:113 +#: ../../source/tutorial-quickstart-huggingface.rst:20 msgid "" -"After defining the training and testing of a PyTorch machine learning " -"model, we use the functions for the Flower clients." +"To follow along this tutorial you will need to install the following " +"packages: :code:`datasets`, :code:`evaluate`, :code:`flwr`, " +":code:`torch`, and :code:`transformers`. This can be done using " +":code:`pip`:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:115 -msgid "" -"The Flower clients will use a simple CNN adapted from 'PyTorch: A 60 " -"Minute Blitz':" +#: ../../source/tutorial-quickstart-huggingface.rst:30 +msgid "Standard Hugging Face workflow" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:142 -msgid "" -"After loading the data set with :code:`load_data()` we define the Flower " -"interface." +#: ../../source/tutorial-quickstart-huggingface.rst:33 +msgid "Handling the data" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:150 +#: ../../source/tutorial-quickstart-huggingface.rst:35 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses PyTorch. Implementing :code:`NumPyClient` usually means " -"defining the following methods (:code:`set_parameters` is optional " -"though):" +"To fetch the IMDB dataset, we will use Hugging Face's :code:`datasets` " +"library. We then need to tokenize the data and create :code:`PyTorch` " +"dataloaders, this is all done in the :code:`load_data` function:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:166 -msgid "which can be implemented in the following way:" +#: ../../source/tutorial-quickstart-huggingface.rst:81 +msgid "Training and testing the model" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:189 -#: ../../source/tutorial-quickstart-tensorflow.rst:82 +#: ../../source/tutorial-quickstart-huggingface.rst:83 msgid "" -"We can now create an instance of our class :code:`CifarClient` and add " -"one line to actually run this client:" +"Once we have a way of creating our trainloader and testloader, we can " +"take care of the training and testing. This is very similar to any " +":code:`PyTorch` training or testing loop:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:196 -#: ../../source/tutorial-quickstart-tensorflow.rst:90 -msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " -"implement a client of type :code:`NumPyClient` you'll need to first call " -"its :code:`to_client()` method. The string :code:`\"[::]:8080\"` tells " -"the client which server to connect to. In our case we can run the server " -"and the client on the same machine, therefore we use " -":code:`\"[::]:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we point the client at." +#: ../../source/tutorial-quickstart-huggingface.rst:121 +msgid "Creating the model itself" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:271 +#: ../../source/tutorial-quickstart-huggingface.rst:123 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in :code:`examples" -"/quickstart-pytorch`." +"To create the model itself, we will just load the pre-trained distillBERT" +" model using Hugging Face’s :code:`AutoModelForSequenceClassification` :" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:-1 -msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with PyTorch Lightning to train an Auto Encoder model on MNIST." +#: ../../source/tutorial-quickstart-huggingface.rst:136 +msgid "Federating the example" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 -msgid "Quickstart PyTorch Lightning" +#: ../../source/tutorial-quickstart-huggingface.rst:139 +msgid "Creating the IMDBClient" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:10 +#: ../../source/tutorial-quickstart-huggingface.rst:141 msgid "" -"Let's build a horizontal federated learning system using PyTorch " -"Lightning and Flower!" +"To federate our example to multiple clients, we first need to write our " +"Flower client class (inheriting from :code:`flwr.client.NumPyClient`). " +"This is very easy, as our model is a standard :code:`PyTorch` model:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 +#: ../../source/tutorial-quickstart-huggingface.rst:169 msgid "" -"Please refer to the `full code example " -"`_ to learn more." +"The :code:`get_parameters` function lets the server get the client's " +"parameters. Inversely, the :code:`set_parameters` function allows the " +"server to send its parameters to the client. Finally, the :code:`fit` " +"function trains the model locally for the client, and the " +":code:`evaluate` function tests the model locally and returns the " +"relevant metrics." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 -msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with scikit-learn to train a linear regression model." +#: ../../source/tutorial-quickstart-huggingface.rst:175 +msgid "Starting the server" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:5 -msgid "Quickstart scikit-learn" +#: ../../source/tutorial-quickstart-huggingface.rst:177 +msgid "" +"Now that we have a way to instantiate clients, we need to create our " +"server in order to aggregate the results. Using Flower, this can be done " +"very easily by first choosing a strategy (here, we are using " +":code:`FedAvg`, which will define the global weights as the average of " +"all the clients' weights at each round) and then using the " +":code:`flwr.server.start_server` function:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:10 +#: ../../source/tutorial-quickstart-huggingface.rst:205 msgid "" -"In this tutorial, we will learn how to train a :code:`Logistic " -"Regression` model on MNIST using Flower and scikit-learn." +"The :code:`weighted_average` function is there to provide a way to " +"aggregate the metrics distributed amongst the clients (basically this " +"allows us to display a nice average accuracy and loss for every round)." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:26 -msgid "Since we want to use scikt-learn, let's go ahead and install it:" +#: ../../source/tutorial-quickstart-huggingface.rst:209 +msgid "Putting everything together" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:32 -msgid "Or simply install all dependencies using Poetry:" +#: ../../source/tutorial-quickstart-huggingface.rst:211 +msgid "We can now start client instances using:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:42 +#: ../../source/tutorial-quickstart-huggingface.rst:221 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. However, before " -"setting up the client and server, we will define all functionalities that" -" we need for our federated learning setup within :code:`utils.py`. The " -":code:`utils.py` contains different functions defining all the machine " -"learning basics:" +"And they will be able to connect to the server and start the federated " +"training." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:45 -msgid ":code:`get_model_parameters()`" +#: ../../source/tutorial-quickstart-huggingface.rst:223 +msgid "" +"If you want to check out everything put together, you should check out " +"the `full code example `_ ." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:46 -msgid "Returns the parameters of a :code:`sklearn` LogisticRegression model" +#: ../../source/tutorial-quickstart-huggingface.rst:226 +msgid "" +"Of course, this is a very basic example, and a lot can be added or " +"modified, it was just to showcase how simply we could federate a Hugging " +"Face workflow using Flower." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:47 -msgid ":code:`set_model_params()`" +#: ../../source/tutorial-quickstart-huggingface.rst:229 +msgid "" +"Note that in this example we used :code:`PyTorch`, but we could have very" +" well used :code:`TensorFlow`." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:48 -msgid "Sets the parameters of a :code:`sklean` LogisticRegression model" +#: ../../source/tutorial-quickstart-ios.rst:-1 +msgid "" +"Read this Federated Learning quickstart tutorial for creating an iOS app " +"using Flower to train a neural network on MNIST." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:49 -msgid ":code:`set_initial_params()`" +#: ../../source/tutorial-quickstart-ios.rst:5 +msgid "Quickstart iOS" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:50 -msgid "Initializes the model parameters that the Flower server will ask for" +#: ../../source/tutorial-quickstart-ios.rst:10 +msgid "" +"In this tutorial we will learn how to train a Neural Network on MNIST " +"using Flower and CoreML on iOS devices." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:51 -msgid ":code:`load_mnist()`" +#: ../../source/tutorial-quickstart-ios.rst:12 +msgid "" +"First of all, for running the Flower Python server, it is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `. For the Flower client " +"implementation in iOS, it is recommended to use Xcode as our IDE." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:52 -msgid "Loads the MNIST dataset using OpenML" +#: ../../source/tutorial-quickstart-ios.rst:15 +msgid "" +"Our example consists of one Python *server* and two iPhone *clients* that" +" all have the same model." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:53 -msgid ":code:`shuffle()`" +#: ../../source/tutorial-quickstart-ios.rst:17 +msgid "" +"*Clients* are responsible for generating individual weight updates for " +"the model based on their local datasets. These updates are then sent to " +"the *server* which will aggregate them to produce a better model. " +"Finally, the *server* sends this improved version of the model back to " +"each *client*. A complete cycle of weight updates is called a *round*." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:54 -msgid "Shuffles data and its label" +#: ../../source/tutorial-quickstart-ios.rst:21 +msgid "" +"Now that we have a rough idea of what is going on, let's get started to " +"setup our Flower server environment. We first need to install Flower. You" +" can do this by using pip:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:56 -msgid ":code:`partition()`" +#: ../../source/tutorial-quickstart-ios.rst:27 +msgid "Or Poetry:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:56 -msgid "Splits datasets into a number of partitions" +#: ../../source/tutorial-quickstart-ios.rst:34 +#: ../../source/tutorial-quickstart-mxnet.rst:36 +#: ../../source/tutorial-quickstart-pytorch.rst:37 +#: ../../source/tutorial-quickstart-scikitlearn.rst:40 +#: ../../source/tutorial-quickstart-tensorflow.rst:29 +#: ../../source/tutorial-quickstart-xgboost.rst:55 +msgid "Flower Client" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:58 +#: ../../source/tutorial-quickstart-ios.rst:36 msgid "" -"Please check out :code:`utils.py` `here " -"`_ for more details. The pre-defined functions are used in" -" the :code:`client.py` and imported. The :code:`client.py` also requires " -"to import several packages such as Flower and scikit-learn:" -msgstr "" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:73 -msgid "" -"We load the MNIST dataset from `OpenML `_, " -"a popular image classification dataset of handwritten digits for machine " -"learning. The utility :code:`utils.load_mnist()` downloads the training " -"and test data. The training set is split afterwards into 10 partitions " -"with :code:`utils.partition()`." +"Now that we have all our dependencies installed, let's run a simple " +"distributed training using CoreML as our local training pipeline and " +"MNIST as our dataset. For simplicity reasons we will use the complete " +"Flower client with CoreML, that has been implemented and stored inside " +"the Swift SDK. The client implementation can be seen below:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:85 +#: ../../source/tutorial-quickstart-ios.rst:72 msgid "" -"Next, the logistic regression model is defined and initialized with " -":code:`utils.set_initial_params()`." +"Let's create a new application project in Xcode and add :code:`flwr` as a" +" dependency in your project. For our application, we will store the logic" +" of our app in :code:`FLiOSModel.swift` and the UI elements in " +":code:`ContentView.swift`. We will focus more on :code:`FLiOSModel.swift`" +" in this quickstart. Please refer to the `full code example " +"`_ to learn more " +"about the app." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:97 -msgid "" -"The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to fit the logistic regression we defined earlier)." +#: ../../source/tutorial-quickstart-ios.rst:75 +msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:103 +#: ../../source/tutorial-quickstart-ios.rst:83 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses scikit-learn. Implementing :code:`NumPyClient` usually " -"means defining the following methods (:code:`set_parameters` is optional " -"though):" +"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " +"will be bundled inside the application during deployment to your iOS " +"device. We need to pass the url to access mlmodel and run CoreML machine " +"learning processes, it can be retrieved by calling the function " +":code:`Bundle.main.url`. For the MNIST dataset, we need to preprocess it " +"into :code:`MLBatchProvider` object. The preprocessing is done inside " +":code:`DataLoader.swift`." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:112 -msgid "is directly imported with :code:`utils.set_model_params()`" +#: ../../source/tutorial-quickstart-ios.rst:99 +msgid "" +"Since CoreML does not allow the model parameters to be seen before " +"training, and accessing the model parameters during or after the training" +" can only be done by specifying the layer name, we need to know this " +"information beforehand, through looking at the model specification, which" +" are written as proto files. The implementation can be seen in " +":code:`MLModelInspect`." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:120 -msgid "The methods can be implemented in the following way:" +#: ../../source/tutorial-quickstart-ios.rst:102 +msgid "" +"After we have all of the necessary information, let's create our Flower " +"client." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:143 +#: ../../source/tutorial-quickstart-ios.rst:117 msgid "" -"We can now create an instance of our class :code:`MnistClient` and add " -"one line to actually run this client:" +"Then start the Flower gRPC client and start communicating to the server " +"by passing our Flower client to the function :code:`startFlwrGRPC`." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:150 +#: ../../source/tutorial-quickstart-ios.rst:124 msgid "" "That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " -"implement a client of type :code:`NumPyClient` you'll need to first call " -"its :code:`to_client()` method. The string :code:`\"0.0.0.0:8080\"` tells" -" the client which server to connect to. In our case we can run the server" -" and the client on the same machine, therefore we use " -":code:`\"0.0.0.0:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we pass to the client." +"call the provided :code:`MLFlwrClient` and call :code:`startFlwrGRPC()`. " +"The attribute :code:`hostname` and :code:`port` tells the client which " +"server to connect to. This can be done by entering the hostname and port " +"in the application before clicking the start button to start the " +"federated learning process." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:159 -msgid "" -"The following Flower server is a little bit more advanced and returns an " -"evaluation function for the server-side evaluation. First, we import " -"again all required libraries such as Flower and scikit-learn." +#: ../../source/tutorial-quickstart-ios.rst:129 +#: ../../source/tutorial-quickstart-mxnet.rst:226 +#: ../../source/tutorial-quickstart-pytorch.rst:203 +#: ../../source/tutorial-quickstart-scikitlearn.rst:157 +#: ../../source/tutorial-quickstart-tensorflow.rst:98 +#: ../../source/tutorial-quickstart-xgboost.rst:309 +msgid "Flower Server" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:162 -msgid ":code:`server.py`, import Flower and start the server:" +#: ../../source/tutorial-quickstart-ios.rst:131 +#: ../../source/tutorial-quickstart-mxnet.rst:228 +#: ../../source/tutorial-quickstart-pytorch.rst:205 +#: ../../source/tutorial-quickstart-tensorflow.rst:100 +msgid "" +"For simple workloads we can start a Flower server and leave all the " +"configuration possibilities at their default values. In a file named " +":code:`server.py`, import Flower and start the server:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:173 -msgid "" -"The number of federated learning rounds is set in :code:`fit_round()` and" -" the evaluation is defined in :code:`get_evaluate_fn()`. The evaluation " -"function is called after each federated learning round and gives you " -"information about loss and accuracy." +#: ../../source/tutorial-quickstart-ios.rst:142 +#: ../../source/tutorial-quickstart-mxnet.rst:239 +#: ../../source/tutorial-quickstart-pytorch.rst:216 +#: ../../source/tutorial-quickstart-scikitlearn.rst:215 +#: ../../source/tutorial-quickstart-tensorflow.rst:112 +msgid "Train the model, federated!" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:198 +#: ../../source/tutorial-quickstart-ios.rst:144 +#: ../../source/tutorial-quickstart-pytorch.rst:218 +#: ../../source/tutorial-quickstart-tensorflow.rst:114 +#: ../../source/tutorial-quickstart-xgboost.rst:525 msgid "" -"The :code:`main` contains the server-side parameter initialization " -":code:`utils.set_initial_params()` as well as the aggregation strategy " -":code:`fl.server.strategy:FedAvg()`. The strategy is the default one, " -"federated averaging (or FedAvg), with two clients and evaluation after " -"each federated learning round. The server can be started with the command" -" :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " -"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." +"With both client and server ready, we can now run everything and see " +"federated learning in action. FL systems usually have a server and " +"multiple clients. We therefore have to start the server first:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:217 +#: ../../source/tutorial-quickstart-ios.rst:152 msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. Federated learning systems usually have a " -"server and multiple clients. We, therefore, have to start the server " -"first:" +"Once the server is running we can start the clients in different " +"terminals. Build and run the client through your Xcode, one through Xcode" +" Simulator and the other by deploying it to your iPhone. To see more " +"about how to deploy your app to iPhone or Simulator visit `here " +"`_." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:271 +#: ../../source/tutorial-quickstart-ios.rst:156 msgid "" "Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in :code:`examples/sklearn-logreg-" -"mnist`." +"learning system in your ios device. The full `source code " +"`_ for this " +"example can be found in :code:`examples/ios`." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:-1 +#: ../../source/tutorial-quickstart-jax.rst:-1 msgid "" "Check out this Federated Learning quickstart tutorial for using Flower " -"with TensorFlow to train a MobilNetV2 model on CIFAR-10." +"with Jax to train a linear regression model on a scikit-learn dataset." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:5 -msgid "Quickstart TensorFlow" +#: ../../source/tutorial-quickstart-jax.rst:5 +msgid "Quickstart JAX" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:13 -msgid "Let's build a federated learning system in less than 20 lines of code!" +#: ../../source/tutorial-quickstart-mxnet.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with MXNet to train a Sequential model on MNIST." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:15 -msgid "Before Flower can be imported we have to install it:" +#: ../../source/tutorial-quickstart-mxnet.rst:5 +msgid "Quickstart MXNet" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:21 +#: ../../source/tutorial-quickstart-mxnet.rst:7 msgid "" -"Since we want to use the Keras API of TensorFlow (TF), we have to install" -" TF as well:" +"MXNet is no longer maintained and has been moved into `Attic " +"`_. As a result, we would " +"encourage you to use other ML frameworks alongside Flower, for example, " +"PyTorch. This tutorial might be removed in future versions of Flower." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:31 -msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" +#: ../../source/tutorial-quickstart-mxnet.rst:12 +msgid "" +"In this tutorial, we will learn how to train a :code:`Sequential` model " +"on MNIST using Flower and MXNet." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:38 +#: ../../source/tutorial-quickstart-mxnet.rst:14 +#: ../../source/tutorial-quickstart-scikitlearn.rst:12 msgid "" -"We use the Keras utilities of TF to load CIFAR10, a popular colored image" -" classification dataset for machine learning. The call to " -":code:`tf.keras.datasets.cifar10.load_data()` downloads CIFAR10, caches " -"it locally, and then returns the entire training and test set as NumPy " -"ndarrays." +"It is recommended to create a virtual environment and run everything " +"within this :doc:`virtualenv `." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:47 +#: ../../source/tutorial-quickstart-mxnet.rst:16 +#: ../../source/tutorial-quickstart-pytorch.rst:17 +#: ../../source/tutorial-quickstart-scikitlearn.rst:14 msgid "" -"Next, we need a model. For the purpose of this tutorial, we use " -"MobilNetV2 with 10 output classes:" +"Our example consists of one *server* and two *clients* all having the " +"same model." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:60 +#: ../../source/tutorial-quickstart-mxnet.rst:18 +#: ../../source/tutorial-quickstart-scikitlearn.rst:16 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses Keras. The :code:`NumPyClient` interface defines three " -"methods which can be implemented in the following way:" +"*Clients* are responsible for generating individual model parameter " +"updates for the model based on their local datasets. These updates are " +"then sent to the *server* which will aggregate them to produce an updated" +" global model. Finally, the *server* sends this improved version of the " +"model back to each *client*. A complete cycle of parameters updates is " +"called a *round*." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:135 -msgid "Each client will have its own dataset." +#: ../../source/tutorial-quickstart-mxnet.rst:22 +#: ../../source/tutorial-quickstart-scikitlearn.rst:20 +msgid "" +"Now that we have a rough idea of what is going on, let's get started. We " +"first need to install Flower. You can do this by running:" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:137 -msgid "" -"You should now see how the training does in the very first terminal (the " -"one that started the server):" +#: ../../source/tutorial-quickstart-mxnet.rst:28 +msgid "Since we want to use MXNet, let's go ahead and install it:" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:169 +#: ../../source/tutorial-quickstart-mxnet.rst:38 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this can be found in :code:`examples" -"/quickstart-tensorflow/client.py`." +"Now that we have all our dependencies installed, let's run a simple " +"distributed training with two clients and one server. Our training " +"procedure and network architecture are based on MXNet´s `Hand-written " +"Digit Recognition tutorial " +"`_." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:-1 +#: ../../source/tutorial-quickstart-mxnet.rst:40 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with XGBoost to train classification models on trees." +"In a file called :code:`client.py`, import Flower and MXNet related " +"packages:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:5 -msgid "Quickstart XGBoost" +#: ../../source/tutorial-quickstart-mxnet.rst:55 +msgid "In addition, define the device allocation in MXNet with:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:14 -msgid "Federated XGBoost" +#: ../../source/tutorial-quickstart-mxnet.rst:61 +msgid "" +"We use MXNet to load MNIST, a popular image classification dataset of " +"handwritten digits for machine learning. The MXNet utility " +":code:`mx.test_utils.get_mnist()` downloads the training and test data." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:16 +#: ../../source/tutorial-quickstart-mxnet.rst:75 msgid "" -"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " -"implementation of gradient-boosted decision tree (**GBDT**), that " -"maximises the computational boundaries for boosted tree methods. It's " -"primarily designed to enhance both the performance and computational " -"speed of machine learning models. In XGBoost, trees are constructed " -"concurrently, unlike the sequential approach taken by GBDT." +"Define the training and loss with MXNet. We train the model by looping " +"over the dataset, measure the corresponding loss, and optimize it." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:20 +#: ../../source/tutorial-quickstart-mxnet.rst:113 msgid "" -"Often, for tabular data on medium-sized datasets with fewer than 10k " -"training examples, XGBoost surpasses the results of deep learning " -"techniques." +"Next, we define the validation of our machine learning model. We loop " +"over the test set and measure both loss and accuracy on the test set." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:23 -msgid "Why federated XGBoost?" +#: ../../source/tutorial-quickstart-mxnet.rst:137 +msgid "" +"After defining the training and testing of a MXNet machine learning " +"model, we use these functions to implement a Flower client." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:25 +#: ../../source/tutorial-quickstart-mxnet.rst:139 +msgid "Our Flower clients will use a simple :code:`Sequential` model:" +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:158 msgid "" -"Indeed, as the demand for data privacy and decentralized learning grows, " -"there's an increasing requirement to implement federated XGBoost systems " -"for specialised applications, like survival analysis and financial fraud " -"detection." +"After loading the dataset with :code:`load_data()` we perform one forward" +" propagation to initialize the model and model parameters with " +":code:`model(init)`. Next, we implement a Flower client." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:27 +#: ../../source/tutorial-quickstart-mxnet.rst:160 +#: ../../source/tutorial-quickstart-pytorch.rst:144 +#: ../../source/tutorial-quickstart-tensorflow.rst:54 msgid "" -"Federated learning ensures that raw data remains on the local device, " -"making it an attractive approach for sensitive domains where data " -"security and privacy are paramount. Given the robustness and efficiency " -"of XGBoost, combining it with federated learning offers a promising " -"solution for these specific challenges." +"The Flower server interacts with clients through an interface called " +":code:`Client`. When the server selects a particular client for training," +" it sends training instructions over the network. The client receives " +"those instructions and calls one of the :code:`Client` methods to run " +"your code (i.e., to train the neural network we defined earlier)." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:30 +#: ../../source/tutorial-quickstart-mxnet.rst:166 msgid "" -"In this tutorial we will learn how to train a federated XGBoost model on " -"HIGGS dataset using Flower and :code:`xgboost` package. We use a simple " -"example (`full code xgboost-quickstart " -"`_)" -" with two *clients* and one *server* to demonstrate how federated XGBoost" -" works, and then we dive into a more complex example (`full code xgboost-" -"comprehensive `_) to run various experiments." +"Flower provides a convenience class called :code:`NumPyClient` which " +"makes it easier to implement the :code:`Client` interface when your " +"workload uses MXNet. Implementing :code:`NumPyClient` usually means " +"defining the following methods (:code:`set_parameters` is optional " +"though):" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:37 -msgid "Environment Setup" +#: ../../source/tutorial-quickstart-mxnet.rst:172 +#: ../../source/tutorial-quickstart-pytorch.rst:156 +#: ../../source/tutorial-quickstart-scikitlearn.rst:109 +msgid "return the model weight as a list of NumPy ndarrays" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:41 -msgid "" -"We first need to install Flower and Flower Datasets. You can do this by " -"running :" +#: ../../source/tutorial-quickstart-mxnet.rst:173 +#: ../../source/tutorial-quickstart-pytorch.rst:157 +#: ../../source/tutorial-quickstart-scikitlearn.rst:111 +msgid ":code:`set_parameters` (optional)" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:47 +#: ../../source/tutorial-quickstart-mxnet.rst:174 +#: ../../source/tutorial-quickstart-pytorch.rst:158 +#: ../../source/tutorial-quickstart-scikitlearn.rst:111 msgid "" -"Since we want to use :code:`xgboost` package to build up XGBoost trees, " -"let's go ahead and install :code:`xgboost`:" +"update the local model weights with the parameters received from the " +"server" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:57 -msgid "" -"*Clients* are responsible for generating individual weight-updates for " -"the model based on their local datasets. Now that we have all our " -"dependencies installed, let's run a simple distributed training with two " -"clients and one server." +#: ../../source/tutorial-quickstart-mxnet.rst:176 +#: ../../source/tutorial-quickstart-pytorch.rst:160 +#: ../../source/tutorial-quickstart-scikitlearn.rst:114 +msgid "set the local model weights" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:60 -msgid "" -"In a file called :code:`client.py`, import xgboost, Flower, Flower " -"Datasets and other related functions:" +#: ../../source/tutorial-quickstart-mxnet.rst:177 +#: ../../source/tutorial-quickstart-pytorch.rst:161 +#: ../../source/tutorial-quickstart-scikitlearn.rst:115 +msgid "train the local model" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:87 -msgid "Dataset partition and hyper-parameter selection" +#: ../../source/tutorial-quickstart-mxnet.rst:178 +#: ../../source/tutorial-quickstart-pytorch.rst:162 +#: ../../source/tutorial-quickstart-scikitlearn.rst:116 +msgid "receive the updated local model weights" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:89 -msgid "" -"Prior to local training, we require loading the HIGGS dataset from Flower" -" Datasets and conduct data partitioning for FL:" +#: ../../source/tutorial-quickstart-mxnet.rst:180 +#: ../../source/tutorial-quickstart-pytorch.rst:164 +#: ../../source/tutorial-quickstart-scikitlearn.rst:118 +msgid "test the local model" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:102 -msgid "" -"In this example, we split the dataset into two partitions with uniform " -"distribution (:code:`IidPartitioner(num_partitions=2)`). Then, we load " -"the partition for the given client based on :code:`node_id`:" +#: ../../source/tutorial-quickstart-mxnet.rst:182 +msgid "They can be implemented in the following way:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:121 +#: ../../source/tutorial-quickstart-mxnet.rst:212 msgid "" -"After that, we do train/test splitting on the given partition (client's " -"local data), and transform data format for :code:`xgboost` package." +"We can now create an instance of our class :code:`MNISTClient` and add " +"one line to actually run this client:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:134 +#: ../../source/tutorial-quickstart-mxnet.rst:219 msgid "" -"The functions of :code:`train_test_split` and " -":code:`transform_dataset_to_dmatrix` are defined as below:" +"That's it for the client. We only have to implement :code:`Client` or " +":code:`NumPyClient` and call :code:`fl.client.start_client()` or " +":code:`fl.client.start_numpy_client()`. The string " +":code:`\"0.0.0.0:8080\"` tells the client which server to connect to. In " +"our case we can run the server and the client on the same machine, " +"therefore we use :code:`\"0.0.0.0:8080\"`. If we run a truly federated " +"workload with the server and clients running on different machines, all " +"that needs to change is the :code:`server_address` we pass to the client." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:158 -msgid "Finally, we define the hyper-parameters used for XGBoost training." +#: ../../source/tutorial-quickstart-mxnet.rst:241 +msgid "" +"With both client and server ready, we can now run everything and see " +"federated learning in action. Federated learning systems usually have a " +"server and multiple clients. We therefore have to start the server first:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:174 +#: ../../source/tutorial-quickstart-mxnet.rst:249 +#: ../../source/tutorial-quickstart-pytorch.rst:226 +#: ../../source/tutorial-quickstart-scikitlearn.rst:224 +#: ../../source/tutorial-quickstart-tensorflow.rst:122 +#: ../../source/tutorial-quickstart-xgboost.rst:533 msgid "" -"The :code:`num_local_round` represents the number of iterations for local" -" tree boost. We use CPU for the training in default. One can shift it to " -"GPU by setting :code:`tree_method` to :code:`gpu_hist`. We use AUC as " -"evaluation metric." +"Once the server is running we can start the clients in different " +"terminals. Open a new terminal and start the first client:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:181 -msgid "Flower client definition for XGBoost" +#: ../../source/tutorial-quickstart-mxnet.rst:256 +#: ../../source/tutorial-quickstart-pytorch.rst:233 +#: ../../source/tutorial-quickstart-scikitlearn.rst:231 +#: ../../source/tutorial-quickstart-tensorflow.rst:129 +#: ../../source/tutorial-quickstart-xgboost.rst:540 +msgid "Open another terminal and start the second client:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:183 +#: ../../source/tutorial-quickstart-mxnet.rst:262 +#: ../../source/tutorial-quickstart-pytorch.rst:239 +#: ../../source/tutorial-quickstart-scikitlearn.rst:237 +#: ../../source/tutorial-quickstart-xgboost.rst:546 msgid "" -"After loading the dataset we define the Flower client. We follow the " -"general rule to define :code:`XgbClient` class inherited from " -":code:`fl.client.Client`." +"Each client will have its own dataset. You should now see how the " +"training does in the very first terminal (the one that started the " +"server):" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:193 +#: ../../source/tutorial-quickstart-mxnet.rst:294 msgid "" -"The :code:`self.bst` is used to keep the Booster objects that remain " -"consistent across rounds, allowing them to store predictions from trees " -"integrated in earlier rounds and maintain other essential data structures" -" for training." +"Congratulations! You've successfully built and run your first federated " +"learning system. The full `source code " +"`_ for this example can be found in :code:`examples" +"/quickstart-mxnet`." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:196 +#: ../../source/tutorial-quickstart-pandas.rst:-1 msgid "" -"Then, we override :code:`get_parameters`, :code:`fit` and " -":code:`evaluate` methods insides :code:`XgbClient` class as follows." +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Pandas to perform Federated Analytics." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:210 -msgid "" -"Unlike neural network training, XGBoost trees are not started from a " -"specified random weights. In this case, we do not use " -":code:`get_parameters` and :code:`set_parameters` to initialise model " -"parameters for XGBoost. As a result, let's return an empty tensor in " -":code:`get_parameters` when it is called by the server at the first " -"round." +#: ../../source/tutorial-quickstart-pandas.rst:5 +msgid "Quickstart Pandas" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:251 -msgid "" -"In :code:`fit`, at the first round, we call :code:`xgb.train()` to build " -"up the first set of trees. the returned Booster object and config are " -"stored in :code:`self.bst` and :code:`self.config`, respectively. From " -"the second round, we load the global model sent from server to " -":code:`self.bst`, and then update model weights on local training data " -"with function :code:`local_boost` as follows:" +#: ../../source/tutorial-quickstart-pandas.rst:10 +msgid "Let's build a federated analytics system using Pandas and Flower!" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:269 +#: ../../source/tutorial-quickstart-pandas.rst:12 msgid "" -"Given :code:`num_local_round`, we update trees by calling " -":code:`self.bst.update` method. After training, the last " -":code:`N=num_local_round` trees will be extracted to send to the server." +"Please refer to the `full code example " +"`_ " +"to learn more." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:291 +#: ../../source/tutorial-quickstart-pytorch.rst:-1 msgid "" -"In :code:`evaluate`, we call :code:`self.bst.eval_set` function to " -"conduct evaluation on valid set. The AUC value will be returned." +"Check out this Federated Learning quickstart tutorial for using Flower " +"with PyTorch to train a CNN model on MNIST." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:294 +#: ../../source/tutorial-quickstart-pytorch.rst:13 msgid "" -"Now, we can create an instance of our class :code:`XgbClient` and add one" -" line to actually run this client:" +"In this tutorial we will learn how to train a Convolutional Neural " +"Network on CIFAR10 using Flower and PyTorch." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:300 +#: ../../source/tutorial-quickstart-pytorch.rst:15 +#: ../../source/tutorial-quickstart-xgboost.rst:39 msgid "" -"That's it for the client. We only have to implement :code:`Client`and " -"call :code:`fl.client.start_client()`. The string :code:`\"[::]:8080\"` " -"tells the client which server to connect to. In our case we can run the " -"server and the client on the same machine, therefore we use " -":code:`\"[::]:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we point the client at." +"First of all, it is recommended to create a virtual environment and run " +"everything within a :doc:`virtualenv `." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:311 +#: ../../source/tutorial-quickstart-pytorch.rst:19 msgid "" -"These updates are then sent to the *server* which will aggregate them to " -"produce a better model. Finally, the *server* sends this improved version" -" of the model back to each *client* to finish a complete FL round." +"*Clients* are responsible for generating individual weight-updates for " +"the model based on their local datasets. These updates are then sent to " +"the *server* which will aggregate them to produce a better model. " +"Finally, the *server* sends this improved version of the model back to " +"each *client*. A complete cycle of weight updates is called a *round*." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:314 +#: ../../source/tutorial-quickstart-pytorch.rst:23 msgid "" -"In a file named :code:`server.py`, import Flower and FedXgbBagging from " -":code:`flwr.server.strategy`." +"Now that we have a rough idea of what is going on, let's get started. We " +"first need to install Flower. You can do this by running :" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:316 -msgid "We first define a strategy for XGBoost bagging aggregation." +#: ../../source/tutorial-quickstart-pytorch.rst:29 +msgid "" +"Since we want to use PyTorch to solve a computer vision task, let's go " +"ahead and install PyTorch and the **torchvision** library:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:339 +#: ../../source/tutorial-quickstart-pytorch.rst:39 msgid "" -"We use two clients for this example. An " -":code:`evaluate_metrics_aggregation` function is defined to collect and " -"wighted average the AUC values from clients." +"Now that we have all our dependencies installed, let's run a simple " +"distributed training with two clients and one server. Our training " +"procedure and network architecture are based on PyTorch's `Deep Learning " +"with PyTorch " +"`_." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:342 -msgid "Then, we start the server:" +#: ../../source/tutorial-quickstart-pytorch.rst:41 +msgid "" +"In a file called :code:`client.py`, import Flower and PyTorch related " +"packages:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:354 -msgid "Tree-based bagging aggregation" +#: ../../source/tutorial-quickstart-pytorch.rst:56 +msgid "In addition, we define the device allocation in PyTorch with:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:356 +#: ../../source/tutorial-quickstart-pytorch.rst:62 msgid "" -"You must be curious about how bagging aggregation works. Let's look into " -"the details." +"We use PyTorch to load CIFAR10, a popular colored image classification " +"dataset for machine learning. The PyTorch :code:`DataLoader()` downloads " +"the training and test data that are then normalized." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:358 +#: ../../source/tutorial-quickstart-pytorch.rst:78 msgid "" -"In file :code:`flwr.server.strategy.fedxgb_bagging.py`, we define " -":code:`FedXgbBagging` inherited from :code:`flwr.server.strategy.FedAvg`." -" Then, we override the :code:`aggregate_fit`, :code:`aggregate_evaluate` " -"and :code:`evaluate` methods as follows:" +"Define the loss and optimizer with PyTorch. The training of the dataset " +"is done by looping over the dataset, measure the corresponding loss and " +"optimize it." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:454 +#: ../../source/tutorial-quickstart-pytorch.rst:94 msgid "" -"In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost " -"trees by calling :code:`aggregate()` function:" +"Define then the validation of the machine learning network. We loop over" +" the test set and measure the loss and accuracy of the test set." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:513 +#: ../../source/tutorial-quickstart-pytorch.rst:113 msgid "" -"In this function, we first fetch the number of trees and the number of " -"parallel trees for the current and previous model by calling " -":code:`_get_tree_nums`. Then, the fetched information will be aggregated." -" After that, the trees (containing model weights) are aggregated to " -"generate a new tree model." +"After defining the training and testing of a PyTorch machine learning " +"model, we use the functions for the Flower clients." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:518 +#: ../../source/tutorial-quickstart-pytorch.rst:115 msgid "" -"After traversal of all clients' models, a new global model is generated, " -"followed by the serialisation, and sending back to each client." -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:523 -msgid "Launch Federated XGBoost!" +"The Flower clients will use a simple CNN adapted from 'PyTorch: A 60 " +"Minute Blitz':" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:585 +#: ../../source/tutorial-quickstart-pytorch.rst:142 msgid "" -"Congratulations! You've successfully built and run your first federated " -"XGBoost system. The AUC values can be checked in " -":code:`metrics_distributed`. One can see that the average AUC increases " -"over FL rounds." +"After loading the data set with :code:`load_data()` we define the Flower " +"interface." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:590 +#: ../../source/tutorial-quickstart-pytorch.rst:150 msgid "" -"The full `source code `_ for this example can be found in :code:`examples" -"/xgboost-quickstart`." +"Flower provides a convenience class called :code:`NumPyClient` which " +"makes it easier to implement the :code:`Client` interface when your " +"workload uses PyTorch. Implementing :code:`NumPyClient` usually means " +"defining the following methods (:code:`set_parameters` is optional " +"though):" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:594 -msgid "Comprehensive Federated XGBoost" +#: ../../source/tutorial-quickstart-pytorch.rst:166 +msgid "which can be implemented in the following way:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:596 +#: ../../source/tutorial-quickstart-pytorch.rst:189 +#: ../../source/tutorial-quickstart-tensorflow.rst:82 msgid "" -"Now that you have known how federated XGBoost work with Flower, it's time" -" to run some more comprehensive experiments by customising the " -"experimental settings. In the xgboost-comprehensive example (`full code " -"`_), we provide more options to define various experimental" -" setups, including aggregation strategies, data partitioning and " -"centralised/distributed evaluation. We also support `Flower simulation " -"`_ making " -"it easy to simulate large client cohorts in a resource-aware manner. " -"Let's take a look!" +"We can now create an instance of our class :code:`CifarClient` and add " +"one line to actually run this client:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:603 -msgid "Cyclic training" +#: ../../source/tutorial-quickstart-pytorch.rst:196 +#: ../../source/tutorial-quickstart-tensorflow.rst:90 +msgid "" +"That's it for the client. We only have to implement :code:`Client` or " +":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " +"implement a client of type :code:`NumPyClient` you'll need to first call " +"its :code:`to_client()` method. The string :code:`\"[::]:8080\"` tells " +"the client which server to connect to. In our case we can run the server " +"and the client on the same machine, therefore we use " +":code:`\"[::]:8080\"`. If we run a truly federated workload with the " +"server and clients running on different machines, all that needs to " +"change is the :code:`server_address` we point the client at." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:605 +#: ../../source/tutorial-quickstart-pytorch.rst:271 msgid "" -"In addition to bagging aggregation, we offer a cyclic training scheme, " -"which performs FL in a client-by-client fashion. Instead of aggregating " -"multiple clients, there is only one single client participating in the " -"training per round in the cyclic training scenario. The trained local " -"XGBoost trees will be passed to the next client as an initialised model " -"for next round's boosting." +"Congratulations! You've successfully built and run your first federated " +"learning system. The full `source code " +"`_ for this example can be found in :code:`examples" +"/quickstart-pytorch`." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:609 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:-1 msgid "" -"To do this, we first customise a :code:`ClientManager` in " -":code:`server_utils.py`:" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with PyTorch Lightning to train an Auto Encoder model on MNIST." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:649 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 +msgid "Quickstart PyTorch Lightning" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:10 msgid "" -"The customised :code:`ClientManager` samples all available clients in " -"each FL round based on the order of connection to the server. Then, we " -"define a new strategy :code:`FedXgbCyclic` in " -":code:`flwr.server.strategy.fedxgb_cyclic.py`, in order to sequentially " -"select only one client in given round and pass the received model to next" -" client." +"Let's build a horizontal federated learning system using PyTorch " +"Lightning and Flower!" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:690 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 msgid "" -"Unlike the original :code:`FedAvg`, we don't perform aggregation here. " -"Instead, we just make a copy of the received client model as global model" -" by overriding :code:`aggregate_fit`." +"Please refer to the `full code example " +"`_ to learn more." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:693 +#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 msgid "" -"Also, the customised :code:`configure_fit` and :code:`configure_evaluate`" -" methods ensure the clients to be sequentially selected given FL round:" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with scikit-learn to train a linear regression model." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:757 -msgid "Customised data partitioning" +#: ../../source/tutorial-quickstart-scikitlearn.rst:5 +msgid "Quickstart scikit-learn" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:759 +#: ../../source/tutorial-quickstart-scikitlearn.rst:10 msgid "" -"In :code:`dataset.py`, we have a function :code:`instantiate_partitioner`" -" to instantiate the data partitioner based on the given " -":code:`num_partitions` and :code:`partitioner_type`. Currently, we " -"provide four supported partitioner type to simulate the uniformity/non-" -"uniformity in data quantity (uniform, linear, square, exponential)." +"In this tutorial, we will learn how to train a :code:`Logistic " +"Regression` model on MNIST using Flower and scikit-learn." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:790 -msgid "Customised centralised/distributed evaluation" +#: ../../source/tutorial-quickstart-scikitlearn.rst:26 +msgid "Since we want to use scikit-learn, let's go ahead and install it:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:792 -msgid "" -"To facilitate centralised evaluation, we define a function in " -":code:`server_utils.py`:" +#: ../../source/tutorial-quickstart-scikitlearn.rst:32 +msgid "Or simply install all dependencies using Poetry:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:824 +#: ../../source/tutorial-quickstart-scikitlearn.rst:42 msgid "" -"This function returns a evaluation function which instantiates a " -":code:`Booster` object and loads the global model weights to it. The " -"evaluation is conducted by calling :code:`eval_set()` method, and the " -"tested AUC value is reported." +"Now that we have all our dependencies installed, let's run a simple " +"distributed training with two clients and one server. However, before " +"setting up the client and server, we will define all functionalities that" +" we need for our federated learning setup within :code:`utils.py`. The " +":code:`utils.py` contains different functions defining all the machine " +"learning basics:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:827 -msgid "" -"As for distributed evaluation on the clients, it's same as the quick-" -"start example by overriding the :code:`evaluate()` method insides the " -":code:`XgbClient` class in :code:`client_utils.py`." +#: ../../source/tutorial-quickstart-scikitlearn.rst:45 +msgid ":code:`get_model_parameters()`" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:831 -msgid "Flower simulation" +#: ../../source/tutorial-quickstart-scikitlearn.rst:46 +msgid "Returns the parameters of a :code:`sklearn` LogisticRegression model" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:832 -msgid "" -"We also provide an example code (:code:`sim.py`) to use the simulation " -"capabilities of Flower to simulate federated XGBoost training on either a" -" single machine or a cluster of machines." +#: ../../source/tutorial-quickstart-scikitlearn.rst:47 +msgid ":code:`set_model_params()`" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:866 -msgid "" -"After importing all required packages, we define a :code:`main()` " -"function to perform the simulation process:" +#: ../../source/tutorial-quickstart-scikitlearn.rst:48 +msgid "Sets the parameters of a :code:`sklean` LogisticRegression model" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:921 -msgid "" -"We first load the dataset and perform data partitioning, and the pre-" -"processed data is stored in a :code:`list`. After the simulation begins, " -"the clients won't need to pre-process their partitions again." +#: ../../source/tutorial-quickstart-scikitlearn.rst:49 +msgid ":code:`set_initial_params()`" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:924 -msgid "Then, we define the strategies and other hyper-parameters:" +#: ../../source/tutorial-quickstart-scikitlearn.rst:50 +msgid "Initializes the model parameters that the Flower server will ask for" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:975 -msgid "" -"After that, we start the simulation by calling " -":code:`fl.simulation.start_simulation`:" +#: ../../source/tutorial-quickstart-scikitlearn.rst:51 +msgid ":code:`load_mnist()`" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:995 -msgid "" -"One of key parameters for :code:`start_simulation` is :code:`client_fn` " -"which returns a function to construct a client. We define it as follows:" +#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +msgid "Loads the MNIST dataset using OpenML" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1038 -msgid "Arguments parser" +#: ../../source/tutorial-quickstart-scikitlearn.rst:53 +msgid ":code:`shuffle()`" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1040 -msgid "" -"In :code:`utils.py`, we define the arguments parsers for clients, server " -"and simulation, allowing users to specify different experimental " -"settings. Let's first see the sever side:" +#: ../../source/tutorial-quickstart-scikitlearn.rst:54 +msgid "Shuffles data and its label" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1086 -msgid "" -"This allows user to specify training strategies / the number of total " -"clients / FL rounds / participating clients / clients for evaluation, and" -" evaluation fashion. Note that with :code:`--centralised-eval`, the sever" -" will do centralised evaluation and all functionalities for client " -"evaluation will be disabled." +#: ../../source/tutorial-quickstart-scikitlearn.rst:56 +msgid ":code:`partition()`" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1090 -msgid "Then, the argument parser on client side:" +#: ../../source/tutorial-quickstart-scikitlearn.rst:56 +msgid "Splits datasets into a number of partitions" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1144 +#: ../../source/tutorial-quickstart-scikitlearn.rst:58 msgid "" -"This defines various options for client data partitioning. Besides, " -"clients also have an option to conduct evaluation on centralised test set" -" by setting :code:`--centralised-eval`, as well as an option to perform " -"scaled learning rate based on the number of clients by setting :code" -":`--scaled-lr`." +"Please check out :code:`utils.py` `here " +"`_ for more details. The pre-defined functions are used in" +" the :code:`client.py` and imported. The :code:`client.py` also requires " +"to import several packages such as Flower and scikit-learn:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1148 -msgid "We also have an argument parser for simulation:" +#: ../../source/tutorial-quickstart-scikitlearn.rst:73 +msgid "" +"We load the MNIST dataset from `OpenML " +"`_, a popular " +"image classification dataset of handwritten digits for machine learning. " +"The utility :code:`utils.load_mnist()` downloads the training and test " +"data. The training set is split afterwards into 10 partitions with " +":code:`utils.partition()`." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1226 -msgid "This integrates all arguments for both client and server sides." +#: ../../source/tutorial-quickstart-scikitlearn.rst:85 +msgid "" +"Next, the logistic regression model is defined and initialized with " +":code:`utils.set_initial_params()`." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1229 -msgid "Example commands" +#: ../../source/tutorial-quickstart-scikitlearn.rst:97 +msgid "" +"The Flower server interacts with clients through an interface called " +":code:`Client`. When the server selects a particular client for training," +" it sends training instructions over the network. The client receives " +"those instructions and calls one of the :code:`Client` methods to run " +"your code (i.e., to fit the logistic regression we defined earlier)." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1231 +#: ../../source/tutorial-quickstart-scikitlearn.rst:103 msgid "" -"To run a centralised evaluated experiment with bagging strategy on 5 " -"clients with exponential distribution for 50 rounds, we first start the " -"server as below:" +"Flower provides a convenience class called :code:`NumPyClient` which " +"makes it easier to implement the :code:`Client` interface when your " +"workload uses scikit-learn. Implementing :code:`NumPyClient` usually " +"means defining the following methods (:code:`set_parameters` is optional " +"though):" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1238 -msgid "Then, on each client terminal, we start the clients:" +#: ../../source/tutorial-quickstart-scikitlearn.rst:112 +msgid "is directly imported with :code:`utils.set_model_params()`" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1244 -msgid "To run the same experiment with Flower simulation:" +#: ../../source/tutorial-quickstart-scikitlearn.rst:120 +msgid "The methods can be implemented in the following way:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1250 +#: ../../source/tutorial-quickstart-scikitlearn.rst:143 msgid "" -"The full `code `_ for this comprehensive example can be found in" -" :code:`examples/xgboost-comprehensive`." +"We can now create an instance of our class :code:`MnistClient` and add " +"one line to actually run this client:" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 -msgid "Build a strategy from scratch" +#: ../../source/tutorial-quickstart-scikitlearn.rst:150 +msgid "" +"That's it for the client. We only have to implement :code:`Client` or " +":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " +"implement a client of type :code:`NumPyClient` you'll need to first call " +"its :code:`to_client()` method. The string :code:`\"0.0.0.0:8080\"` tells" +" the client which server to connect to. In our case we can run the server" +" and the client on the same machine, therefore we use " +":code:`\"0.0.0.0:8080\"`. If we run a truly federated workload with the " +"server and clients running on different machines, all that needs to " +"change is the :code:`server_address` we pass to the client." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 +#: ../../source/tutorial-quickstart-scikitlearn.rst:159 msgid "" -"Welcome to the third part of the Flower federated learning tutorial. In " -"previous parts of this tutorial, we introduced federated learning with " -"PyTorch and Flower (`part 1 `__) and we learned how strategies " -"can be used to customize the execution on both the server and the clients" -" (`part 2 `__)." +"The following Flower server is a little bit more advanced and returns an " +"evaluation function for the server-side evaluation. First, we import " +"again all required libraries such as Flower and scikit-learn." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 +#: ../../source/tutorial-quickstart-scikitlearn.rst:162 +msgid ":code:`server.py`, import Flower and start the server:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:173 msgid "" -"In this notebook, we'll continue to customize the federated learning " -"system we built previously by creating a custom version of FedAvg (again," -" using `Flower `__ and `PyTorch " -"`__)." +"The number of federated learning rounds is set in :code:`fit_round()` and" +" the evaluation is defined in :code:`get_evaluate_fn()`. The evaluation " +"function is called after each federated learning round and gives you " +"information about loss and accuracy." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 +#: ../../source/tutorial-quickstart-scikitlearn.rst:198 msgid "" -"`Star Flower on GitHub `__ ⭐️ and join " -"the Flower community on Slack to connect, ask questions, and get help: " -"`Join Slack `__ 🌼 We'd love to hear from " -"you in the ``#introductions`` channel! And if anything is unclear, head " -"over to the ``#questions`` channel." +"The :code:`main` contains the server-side parameter initialization " +":code:`utils.set_initial_params()` as well as the aggregation strategy " +":code:`fl.server.strategy:FedAvg()`. The strategy is the default one, " +"federated averaging (or FedAvg), with two clients and evaluation after " +"each federated learning round. The server can be started with the command" +" :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " +"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:17 -msgid "Let's build a new ``Strategy`` from scratch!" +#: ../../source/tutorial-quickstart-scikitlearn.rst:217 +msgid "" +"With both client and server ready, we can now run everything and see " +"federated learning in action. Federated learning systems usually have a " +"server and multiple clients. We, therefore, have to start the server " +"first:" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:29 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:29 -msgid "Preparation" +#: ../../source/tutorial-quickstart-scikitlearn.rst:271 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system. The full `source code " +"`_ for this example can be found in :code:`examples/sklearn-logreg-" +"mnist`." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:31 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:32 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:31 +#: ../../source/tutorial-quickstart-tensorflow.rst:-1 msgid "" -"Before we begin with the actual code, let's make sure that we have " -"everything we need." +"Check out this Federated Learning quickstart tutorial for using Flower " +"with TensorFlow to train a MobilNetV2 model on CIFAR-10." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:43 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:44 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:43 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:43 -msgid "Installing dependencies" +#: ../../source/tutorial-quickstart-tensorflow.rst:5 +msgid "Quickstart TensorFlow" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:45 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:46 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:45 -msgid "First, we install the necessary packages:" +#: ../../source/tutorial-quickstart-tensorflow.rst:13 +msgid "Let's build a federated learning system in less than 20 lines of code!" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:65 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:66 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:65 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:65 -msgid "" -"Now that we have all dependencies installed, we can import everything we " -"need for this tutorial:" +#: ../../source/tutorial-quickstart-tensorflow.rst:15 +msgid "Before Flower can be imported we have to install it:" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:101 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:102 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:101 +#: ../../source/tutorial-quickstart-tensorflow.rst:21 msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled " -"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " -"GPU > Save``). Note, however, that Google Colab is not always able to " -"offer GPU acceleration. If you see an error related to GPU availability " -"in one of the following sections, consider switching back to CPU-based " -"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " -"has GPU acceleration enabled, you should see the output ``Training on " -"cuda``, otherwise it'll say ``Training on cpu``." +"Since we want to use the Keras API of TensorFlow (TF), we have to install" +" TF as well:" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:114 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:115 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:114 -msgid "Data loading" +#: ../../source/tutorial-quickstart-tensorflow.rst:31 +msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:116 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:116 +#: ../../source/tutorial-quickstart-tensorflow.rst:38 msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into " -"ten smaller datasets (each split into training and validation set), and " -"wrap everything in their own ``DataLoader``. We introduce a new parameter" -" ``num_clients`` which allows us to call ``load_datasets`` with different" -" numbers of clients." +"We use the Keras utilities of TF to load CIFAR10, a popular colored image" +" classification dataset for machine learning. The call to " +":code:`tf.keras.datasets.cifar10.load_data()` downloads CIFAR10, caches " +"it locally, and then returns the entire training and test set as NumPy " +"ndarrays." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:167 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:168 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:167 -msgid "Model training/evaluation" +#: ../../source/tutorial-quickstart-tensorflow.rst:47 +msgid "" +"Next, we need a model. For the purpose of this tutorial, we use " +"MobilNetV2 with 10 output classes:" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:169 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:170 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 +#: ../../source/tutorial-quickstart-tensorflow.rst:60 msgid "" -"Let's continue with the usual model definition (including " -"``set_parameters`` and ``get_parameters``), training and test functions:" +"Flower provides a convenience class called :code:`NumPyClient` which " +"makes it easier to implement the :code:`Client` interface when your " +"workload uses Keras. The :code:`NumPyClient` interface defines three " +"methods which can be implemented in the following way:" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:258 -msgid "Flower client" +#: ../../source/tutorial-quickstart-tensorflow.rst:135 +msgid "Each client will have its own dataset." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:260 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:260 +#: ../../source/tutorial-quickstart-tensorflow.rst:137 msgid "" -"To implement the Flower client, we (again) create a subclass of " -"``flwr.client.NumPyClient`` and implement the three methods " -"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " -"``cid`` to the client and use it log additional details:" +"You should now see how the training does in the very first terminal (the " +"one that started the server):" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:308 -msgid "Let's test what we have so far before we continue:" +#: ../../source/tutorial-quickstart-tensorflow.rst:169 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system. The full `source code " +"`_ for this can be found in :code:`examples" +"/quickstart-tensorflow/client.py`." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:339 -msgid "Build a Strategy from scratch" +#: ../../source/tutorial-quickstart-xgboost.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with XGBoost to train classification models on trees." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:341 -msgid "" -"Let’s overwrite the ``configure_fit`` method such that it passes a higher" -" learning rate (potentially also other hyperparameters) to the optimizer " -"of a fraction of the clients. We will keep the sampling of the clients as" -" it is in ``FedAvg`` and then change the configuration dictionary (one of" -" the ``FitIns`` attributes)." +#: ../../source/tutorial-quickstart-xgboost.rst:5 +msgid "Quickstart XGBoost" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:507 -msgid "" -"The only thing left is to use the newly created custom Strategy " -"``FedCustom`` when starting the experiment:" +#: ../../source/tutorial-quickstart-xgboost.rst:14 +msgid "Federated XGBoost" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:534 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:932 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:697 -msgid "Recap" +#: ../../source/tutorial-quickstart-xgboost.rst:16 +msgid "" +"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " +"implementation of gradient-boosted decision tree (**GBDT**), that " +"maximises the computational boundaries for boosted tree methods. It's " +"primarily designed to enhance both the performance and computational " +"speed of machine learning models. In XGBoost, trees are constructed " +"concurrently, unlike the sequential approach taken by GBDT." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:536 +#: ../../source/tutorial-quickstart-xgboost.rst:20 msgid "" -"In this notebook, we’ve seen how to implement a custom strategy. A custom" -" strategy enables granular control over client node configuration, result" -" aggregation, and more. To define a custom strategy, you only have to " -"overwrite the abstract methods of the (abstract) base class ``Strategy``." -" To make custom strategies even more powerful, you can pass custom " -"functions to the constructor of your new class (``__init__``) and then " -"call these functions whenever needed." +"Often, for tabular data on medium-sized datasets with fewer than 10k " +"training examples, XGBoost surpasses the results of deep learning " +"techniques." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:550 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:948 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:729 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:715 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 -msgid "" -"Before you continue, make sure to join the Flower community on Slack: " -"`Join Slack `__" +#: ../../source/tutorial-quickstart-xgboost.rst:23 +msgid "Why federated XGBoost?" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:552 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:950 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:731 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:717 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 +#: ../../source/tutorial-quickstart-xgboost.rst:25 msgid "" -"There's a dedicated ``#questions`` channel if you need help, but we'd " -"also love to hear who you are in ``#introductions``!" +"Indeed, as the demand for data privacy and decentralized learning grows, " +"there's an increasing requirement to implement federated XGBoost systems " +"for specialised applications, like survival analysis and financial fraud " +"detection." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:554 +#: ../../source/tutorial-quickstart-xgboost.rst:27 msgid "" -"The `Flower Federated Learning Tutorial - Part 4 " -"`__ introduces ``Client``, the flexible API underlying " -"``NumPyClient``." +"Federated learning ensures that raw data remains on the local device, " +"making it an attractive approach for sensitive domains where data " +"security and privacy are paramount. Given the robustness and efficiency " +"of XGBoost, combining it with federated learning offers a promising " +"solution for these specific challenges." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 -msgid "Customize the client" +#: ../../source/tutorial-quickstart-xgboost.rst:30 +msgid "" +"In this tutorial we will learn how to train a federated XGBoost model on " +"HIGGS dataset using Flower and :code:`xgboost` package. We use a simple " +"example (`full code xgboost-quickstart " +"`_)" +" with two *clients* and one *server* to demonstrate how federated XGBoost" +" works, and then we dive into a more complex example (`full code xgboost-" +"comprehensive `_) to run various experiments." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:11 -msgid "" -"Welcome to the fourth part of the Flower federated learning tutorial. In " -"the previous parts of this tutorial, we introduced federated learning " -"with PyTorch and Flower (`part 1 `__), we learned how " -"strategies can be used to customize the execution on both the server and " -"the clients (`part 2 `__), and we built our own " -"custom strategy from scratch (`part 3 `__)." +#: ../../source/tutorial-quickstart-xgboost.rst:37 +msgid "Environment Setup" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 +#: ../../source/tutorial-quickstart-xgboost.rst:41 msgid "" -"In this notebook, we revisit ``NumPyClient`` and introduce a new " -"baseclass for building clients, simply named ``Client``. In previous " -"parts of this tutorial, we've based our client on ``NumPyClient``, a " -"convenience class which makes it easy to work with machine learning " -"libraries that have good NumPy interoperability. With ``Client``, we gain" -" a lot of flexibility that we didn't have before, but we'll also have to " -"do a few things the we didn't have to do before." +"We first need to install Flower and Flower Datasets. You can do this by " +"running :" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:18 +#: ../../source/tutorial-quickstart-xgboost.rst:47 msgid "" -"Let's go deeper and see what it takes to move from ``NumPyClient`` to " -"``Client``!" +"Since we want to use :code:`xgboost` package to build up XGBoost trees, " +"let's go ahead and install :code:`xgboost`:" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:30 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:29 -msgid "Step 0: Preparation" +#: ../../source/tutorial-quickstart-xgboost.rst:57 +msgid "" +"*Clients* are responsible for generating individual weight-updates for " +"the model based on their local datasets. Now that we have all our " +"dependencies installed, let's run a simple distributed training with two " +"clients and one server." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:117 +#: ../../source/tutorial-quickstart-xgboost.rst:60 msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into " -"ten smaller datasets (each split into training and validation set), and " -"wrap everything in their own ``DataLoader``." +"In a file called :code:`client.py`, import xgboost, Flower, Flower " +"Datasets and other related functions:" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:259 -msgid "Step 1: Revisiting NumPyClient" +#: ../../source/tutorial-quickstart-xgboost.rst:87 +msgid "Dataset partition and hyper-parameter selection" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:261 +#: ../../source/tutorial-quickstart-xgboost.rst:89 msgid "" -"So far, we've implemented our client by subclassing " -"``flwr.client.NumPyClient``. The three methods we implemented are " -"``get_parameters``, ``fit``, and ``evaluate``. Finally, we wrap the " -"creation of instances of this class in a function called ``client_fn``:" +"Prior to local training, we require loading the HIGGS dataset from Flower" +" Datasets and conduct data partitioning for FL:" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:309 +#: ../../source/tutorial-quickstart-xgboost.rst:102 msgid "" -"We've seen this before, there's nothing new so far. The only *tiny* " -"difference compared to the previous notebook is naming, we've changed " -"``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " -"``numpyclient_fn``. Let's run it to see the output we get:" +"In this example, we split the dataset into two partitions with uniform " +"distribution (:code:`IidPartitioner(num_partitions=2)`). Then, we load " +"the partition for the given client based on :code:`node_id`:" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:339 +#: ../../source/tutorial-quickstart-xgboost.rst:121 msgid "" -"This works as expected, two clients are training for three rounds of " -"federated learning." +"After that, we do train/test splitting on the given partition (client's " +"local data), and transform data format for :code:`xgboost` package." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:341 +#: ../../source/tutorial-quickstart-xgboost.rst:134 msgid "" -"Let's dive a little bit deeper and discuss how Flower executes this " -"simulation. Whenever a client is selected to do some work, " -"``start_simulation`` calls the function ``numpyclient_fn`` to create an " -"instance of our ``FlowerNumPyClient`` (along with loading the model and " -"the data)." +"The functions of :code:`train_test_split` and " +":code:`transform_dataset_to_dmatrix` are defined as below:" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:343 -msgid "" -"But here's the perhaps surprising part: Flower doesn't actually use the " -"``FlowerNumPyClient`` object directly. Instead, it wraps the object to " -"makes it look like a subclass of ``flwr.client.Client``, not " -"``flwr.client.NumPyClient``. In fact, the Flower core framework doesn't " -"know how to handle ``NumPyClient``'s, it only knows how to handle " -"``Client``'s. ``NumPyClient`` is just a convenience abstraction built on " -"top of ``Client``." +#: ../../source/tutorial-quickstart-xgboost.rst:158 +msgid "Finally, we define the hyper-parameters used for XGBoost training." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:345 +#: ../../source/tutorial-quickstart-xgboost.rst:174 msgid "" -"Instead of building on top of ``NumPyClient``, we can directly build on " -"top of ``Client``." +"The :code:`num_local_round` represents the number of iterations for local" +" tree boost. We use CPU for the training in default. One can shift it to " +"GPU by setting :code:`tree_method` to :code:`gpu_hist`. We use AUC as " +"evaluation metric." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:357 -msgid "Step 2: Moving from ``NumPyClient`` to ``Client``" +#: ../../source/tutorial-quickstart-xgboost.rst:181 +msgid "Flower client definition for XGBoost" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:359 +#: ../../source/tutorial-quickstart-xgboost.rst:183 msgid "" -"Let's try to do the same thing using ``Client`` instead of " -"``NumPyClient``." +"After loading the dataset we define the Flower client. We follow the " +"general rule to define :code:`XgbClient` class inherited from " +":code:`fl.client.Client`." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:465 +#: ../../source/tutorial-quickstart-xgboost.rst:193 msgid "" -"Before we discuss the code in more detail, let's try to run it! Gotta " -"make sure our new ``Client``-based client works, right?" +"The :code:`self.bst` is used to keep the Booster objects that remain " +"consistent across rounds, allowing them to store predictions from trees " +"integrated in earlier rounds and maintain other essential data structures" +" for training." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:490 +#: ../../source/tutorial-quickstart-xgboost.rst:196 msgid "" -"That's it, we're now using ``Client``. It probably looks similar to what " -"we've done with ``NumPyClient``. So what's the difference?" +"Then, we override :code:`get_parameters`, :code:`fit` and " +":code:`evaluate` methods insides :code:`XgbClient` class as follows." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:492 +#: ../../source/tutorial-quickstart-xgboost.rst:210 msgid "" -"First of all, it's more code. But why? The difference comes from the fact" -" that ``Client`` expects us to take care of parameter serialization and " -"deserialization. For Flower to be able to send parameters over the " -"network, it eventually needs to turn these parameters into ``bytes``. " -"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " -"serialization. Turning raw bytes into something more useful (like NumPy " -"``ndarray``'s) is called deserialization. Flower needs to do both: it " -"needs to serialize parameters on the server-side and send them to the " -"client, the client needs to deserialize them to use them for local " -"training, and then serialize the updated parameters again to send them " -"back to the server, which (finally!) deserializes them again in order to " -"aggregate them with the updates received from other clients." +"Unlike neural network training, XGBoost trees are not started from a " +"specified random weights. In this case, we do not use " +":code:`get_parameters` and :code:`set_parameters` to initialise model " +"parameters for XGBoost. As a result, let's return an empty tensor in " +":code:`get_parameters` when it is called by the server at the first " +"round." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:495 +#: ../../source/tutorial-quickstart-xgboost.rst:251 msgid "" -"The only *real* difference between Client and NumPyClient is that " -"NumPyClient takes care of serialization and deserialization for you. It " -"can do so because it expects you to return parameters as NumPy ndarray's," -" and it knows how to handle these. This makes working with machine " -"learning libraries that have good NumPy support (most of them) a breeze." +"In :code:`fit`, at the first round, we call :code:`xgb.train()` to build " +"up the first set of trees. the returned Booster object and config are " +"stored in :code:`self.bst` and :code:`self.config`, respectively. From " +"the second round, we load the global model sent from server to " +":code:`self.bst`, and then update model weights on local training data " +"with function :code:`local_boost` as follows:" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:497 +#: ../../source/tutorial-quickstart-xgboost.rst:269 msgid "" -"In terms of API, there's one major difference: all methods in Client take" -" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " -"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " -"``NumPyClient`` on the other hand have multiple arguments (e.g., " -"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" -" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " -"``NumPyClient.fit``) if there are multiple things to handle. These " -"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " -"values you're used to from ``NumPyClient``." +"Given :code:`num_local_round`, we update trees by calling " +":code:`self.bst.update` method. After training, the last " +":code:`N=num_local_round` trees will be extracted to send to the server." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:510 -msgid "Step 3: Custom serialization" +#: ../../source/tutorial-quickstart-xgboost.rst:291 +msgid "" +"In :code:`evaluate`, we call :code:`self.bst.eval_set` function to " +"conduct evaluation on valid set. The AUC value will be returned." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:512 +#: ../../source/tutorial-quickstart-xgboost.rst:294 msgid "" -"Here we will explore how to implement custom serialization with a simple " -"example." +"Now, we can create an instance of our class :code:`XgbClient` and add one" +" line to actually run this client:" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:514 +#: ../../source/tutorial-quickstart-xgboost.rst:300 msgid "" -"But first what is serialization? Serialization is just the process of " -"converting an object into raw bytes, and equally as important, " -"deserialization is the process of converting raw bytes back into an " -"object. This is very useful for network communication. Indeed, without " -"serialization, you could not just a Python object through the internet." +"That's it for the client. We only have to implement :code:`Client`and " +"call :code:`fl.client.start_client()`. The string :code:`\"[::]:8080\"` " +"tells the client which server to connect to. In our case we can run the " +"server and the client on the same machine, therefore we use " +":code:`\"[::]:8080\"`. If we run a truly federated workload with the " +"server and clients running on different machines, all that needs to " +"change is the :code:`server_address` we point the client at." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:516 +#: ../../source/tutorial-quickstart-xgboost.rst:311 msgid "" -"Federated Learning relies heavily on internet communication for training " -"by sending Python objects back and forth between the clients and the " -"server. This means that serialization is an essential part of Federated " -"Learning." +"These updates are then sent to the *server* which will aggregate them to " +"produce a better model. Finally, the *server* sends this improved version" +" of the model back to each *client* to finish a complete FL round." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:518 +#: ../../source/tutorial-quickstart-xgboost.rst:314 msgid "" -"In the following section, we will write a basic example where instead of " -"sending a serialized version of our ``ndarray``\\ s containing our " -"parameters, we will first convert the ``ndarray`` into sparse matrices, " -"before sending them. This technique can be used to save bandwidth, as in " -"certain cases where the weights of a model are sparse (containing many 0 " -"entries), converting them to a sparse matrix can greatly improve their " -"bytesize." +"In a file named :code:`server.py`, import Flower and FedXgbBagging from " +":code:`flwr.server.strategy`." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:521 -msgid "Our custom serialization/deserialization functions" +#: ../../source/tutorial-quickstart-xgboost.rst:316 +msgid "We first define a strategy for XGBoost bagging aggregation." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:523 +#: ../../source/tutorial-quickstart-xgboost.rst:339 msgid "" -"This is where the real serialization/deserialization will happen, " -"especially in ``ndarray_to_sparse_bytes`` for serialization and " -"``sparse_bytes_to_ndarray`` for deserialization." +"We use two clients for this example. An " +":code:`evaluate_metrics_aggregation` function is defined to collect and " +"wighted average the AUC values from clients." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:525 -msgid "" -"Note that we imported the ``scipy.sparse`` library in order to convert " -"our arrays." +#: ../../source/tutorial-quickstart-xgboost.rst:342 +msgid "Then, we start the server:" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:613 -msgid "Client-side" +#: ../../source/tutorial-quickstart-xgboost.rst:354 +msgid "Tree-based bagging aggregation" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:615 +#: ../../source/tutorial-quickstart-xgboost.rst:356 msgid "" -"To be able to serialize our ``ndarray``\\ s into sparse " -"parameters, we will just have to call our custom functions in our " -"``flwr.client.Client``." +"You must be curious about how bagging aggregation works. Let's look into " +"the details." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:617 +#: ../../source/tutorial-quickstart-xgboost.rst:358 msgid "" -"Indeed, in ``get_parameters`` we need to serialize the parameters we got " -"from our network using our custom ``ndarrays_to_sparse_parameters`` " -"defined above." +"In file :code:`flwr.server.strategy.fedxgb_bagging.py`, we define " +":code:`FedXgbBagging` inherited from :code:`flwr.server.strategy.FedAvg`." +" Then, we override the :code:`aggregate_fit`, :code:`aggregate_evaluate` " +"and :code:`evaluate` methods as follows:" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:619 +#: ../../source/tutorial-quickstart-xgboost.rst:454 msgid "" -"In ``fit``, we first need to deserialize the parameters coming from the " -"server using our custom ``sparse_parameters_to_ndarrays`` and then we " -"need to serialize our local results with " -"``ndarrays_to_sparse_parameters``." +"In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost " +"trees by calling :code:`aggregate()` function:" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:621 +#: ../../source/tutorial-quickstart-xgboost.rst:513 msgid "" -"In ``evaluate``, we will only need to deserialize the global parameters " -"with our custom function." -msgstr "" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:725 -msgid "Server-side" +"In this function, we first fetch the number of trees and the number of " +"parallel trees for the current and previous model by calling " +":code:`_get_tree_nums`. Then, the fetched information will be aggregated." +" After that, the trees (containing model weights) are aggregated to " +"generate a new tree model." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:727 +#: ../../source/tutorial-quickstart-xgboost.rst:518 msgid "" -"For this example, we will just use ``FedAvg`` as a strategy. To change " -"the serialization and deserialization here, we only need to reimplement " -"the ``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other" -" functions of the strategy will be inherited from the super class " -"``FedAvg``." +"After traversal of all clients' models, a new global model is generated, " +"followed by the serialisation, and sending back to each client." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:729 -msgid "As you can see only one line as change in ``evaluate``:" +#: ../../source/tutorial-quickstart-xgboost.rst:523 +msgid "Launch Federated XGBoost!" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:735 +#: ../../source/tutorial-quickstart-xgboost.rst:585 msgid "" -"And for ``aggregate_fit``, we will first deserialize every result we " -"received:" +"Congratulations! You've successfully built and run your first federated " +"XGBoost system. The AUC values can be checked in " +":code:`metrics_distributed`. One can see that the average AUC increases " +"over FL rounds." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:744 -msgid "And then serialize the aggregated result:" +#: ../../source/tutorial-quickstart-xgboost.rst:590 +msgid "" +"The full `source code `_ for this example can be found in :code:`examples" +"/xgboost-quickstart`." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:903 -msgid "We can now run our custom serialization example!" +#: ../../source/tutorial-quickstart-xgboost.rst:594 +msgid "Comprehensive Federated XGBoost" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:934 +#: ../../source/tutorial-quickstart-xgboost.rst:596 msgid "" -"In this part of the tutorial, we've seen how we can build clients by " -"subclassing either ``NumPyClient`` or ``Client``. ``NumPyClient`` is a " -"convenience abstraction that makes it easier to work with machine " -"learning libraries that have good NumPy interoperability. ``Client`` is a" -" more flexible abstraction that allows us to do things that are not " -"possible in ``NumPyClient``. In order to do so, it requires us to handle " -"parameter serialization and deserialization ourselves." +"Now that you have known how federated XGBoost work with Flower, it's time" +" to run some more comprehensive experiments by customising the " +"experimental settings. In the xgboost-comprehensive example (`full code " +"`_), we provide more options to define various experimental" +" setups, including aggregation strategies, data partitioning and " +"centralised/distributed evaluation. We also support :doc:`Flower " +"simulation ` making it easy to simulate large " +"client cohorts in a resource-aware manner. Let's take a look!" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:952 -msgid "" -"This is the final part of the Flower tutorial (for now!), " -"congratulations! You're now well equipped to understand the rest of the " -"documentation. There are many topics we didn't cover in the tutorial, we " -"recommend the following resources:" +#: ../../source/tutorial-quickstart-xgboost.rst:603 +msgid "Cyclic training" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:954 -msgid "`Read Flower Docs `__" +#: ../../source/tutorial-quickstart-xgboost.rst:605 +msgid "" +"In addition to bagging aggregation, we offer a cyclic training scheme, " +"which performs FL in a client-by-client fashion. Instead of aggregating " +"multiple clients, there is only one single client participating in the " +"training per round in the cyclic training scenario. The trained local " +"XGBoost trees will be passed to the next client as an initialised model " +"for next round's boosting." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:955 +#: ../../source/tutorial-quickstart-xgboost.rst:609 msgid "" -"`Check out Flower Code Examples " -"`__" +"To do this, we first customise a :code:`ClientManager` in " +":code:`server_utils.py`:" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:956 +#: ../../source/tutorial-quickstart-xgboost.rst:649 msgid "" -"`Use Flower Baselines for your research " -"`__" +"The customised :code:`ClientManager` samples all available clients in " +"each FL round based on the order of connection to the server. Then, we " +"define a new strategy :code:`FedXgbCyclic` in " +":code:`flwr.server.strategy.fedxgb_cyclic.py`, in order to sequentially " +"select only one client in given round and pass the received model to next" +" client." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:957 +#: ../../source/tutorial-quickstart-xgboost.rst:690 msgid "" -"`Watch Flower Summit 2023 videos `__" +"Unlike the original :code:`FedAvg`, we don't perform aggregation here. " +"Instead, we just make a copy of the received client model as global model" +" by overriding :code:`aggregate_fit`." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:9 -msgid "Get started with Flower" +#: ../../source/tutorial-quickstart-xgboost.rst:693 +msgid "" +"Also, the customised :code:`configure_fit` and :code:`configure_evaluate`" +" methods ensure the clients to be sequentially selected given FL round:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:11 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:11 -msgid "Welcome to the Flower federated learning tutorial!" +#: ../../source/tutorial-quickstart-xgboost.rst:757 +msgid "Customised data partitioning" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:13 +#: ../../source/tutorial-quickstart-xgboost.rst:759 msgid "" -"In this notebook, we'll build a federated learning system using Flower, " -"`Flower Datasets `__ and PyTorch. In " -"part 1, we use PyTorch for the model training pipeline and data loading. " -"In part 2, we continue to federate the PyTorch-based pipeline using " -"Flower." +"In :code:`dataset.py`, we have a function :code:`instantiate_partitioner`" +" to instantiate the data partitioner based on the given " +":code:`num_partitions` and :code:`partitioner_type`. Currently, we " +"provide four supported partitioner type to simulate the uniformity/non-" +"uniformity in data quantity (uniform, linear, square, exponential)." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:17 -msgid "Let's get stated!" +#: ../../source/tutorial-quickstart-xgboost.rst:790 +msgid "Customised centralised/distributed evaluation" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:31 +#: ../../source/tutorial-quickstart-xgboost.rst:792 msgid "" -"Before we begin with any actual code, let's make sure that we have " -"everything we need." +"To facilitate centralised evaluation, we define a function in " +":code:`server_utils.py`:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:45 +#: ../../source/tutorial-quickstart-xgboost.rst:824 msgid "" -"Next, we install the necessary packages for PyTorch (``torch`` and " -"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower " -"(``flwr``):" +"This function returns a evaluation function which instantiates a " +":code:`Booster` object and loads the global model weights to it. The " +"evaluation is conducted by calling :code:`eval_set()` method, and the " +"tested AUC value is reported." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:105 +#: ../../source/tutorial-quickstart-xgboost.rst:827 msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled " -"(on Google Colab: ``Runtime > Change runtime type > Hardware accelerator:" -" GPU > Save``). Note, however, that Google Colab is not always able to " -"offer GPU acceleration. If you see an error related to GPU availability " -"in one of the following sections, consider switching back to CPU-based " -"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " -"has GPU acceleration enabled, you should see the output ``Training on " -"cuda``, otherwise it'll say ``Training on cpu``." +"As for distributed evaluation on the clients, it's same as the quick-" +"start example by overriding the :code:`evaluate()` method insides the " +":code:`XgbClient` class in :code:`client_utils.py`." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:118 -msgid "Loading the data" +#: ../../source/tutorial-quickstart-xgboost.rst:831 +msgid "Flower simulation" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:120 +#: ../../source/tutorial-quickstart-xgboost.rst:832 msgid "" -"Federated learning can be applied to many different types of tasks across" -" different domains. In this tutorial, we introduce federated learning by " -"training a simple convolutional neural network (CNN) on the popular " -"CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that " -"distinguish between images from ten different classes: 'airplane', " -"'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and " -"'truck'." +"We also provide an example code (:code:`sim.py`) to use the simulation " +"capabilities of Flower to simulate federated XGBoost training on either a" +" single machine or a cluster of machines." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:131 +#: ../../source/tutorial-quickstart-xgboost.rst:866 msgid "" -"We simulate having multiple datasets from multiple organizations (also " -"called the \"cross-silo\" setting in federated learning) by splitting the" -" original CIFAR-10 dataset into multiple partitions. Each partition will " -"represent the data from a single organization. We're doing this purely " -"for experimentation purposes, in the real world there's no need for data " -"splitting because each organization already has their own data (so the " -"data is naturally partitioned)." +"After importing all required packages, we define a :code:`main()` " +"function to perform the simulation process:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:133 +#: ../../source/tutorial-quickstart-xgboost.rst:921 msgid "" -"Each organization will act as a client in the federated learning system. " -"So having ten organizations participate in a federation means having ten " -"clients connected to the federated learning server." +"We first load the dataset and perform data partitioning, and the pre-" +"processed data is stored in a :code:`list`. After the simulation begins, " +"the clients won't need to pre-process their partitions again." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:144 -msgid "" -"Let's now create the Federated Dataset abstraction that from ``flwr-" -"datasets`` that partitions the CIFAR-10. We will create small training " -"and test set for each edge device and wrap each of them into a PyTorch " -"``DataLoader``:" +#: ../../source/tutorial-quickstart-xgboost.rst:924 +msgid "Then, we define the strategies and other hyper-parameters:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:198 +#: ../../source/tutorial-quickstart-xgboost.rst:975 msgid "" -"We now have a list of ten training sets and ten validation sets " -"(``trainloaders`` and ``valloaders``) representing the data of ten " -"different organizations. Each ``trainloader``/``valloader`` pair contains" -" 4500 training examples and 500 validation examples. There's also a " -"single ``testloader`` (we did not split the test set). Again, this is " -"only necessary for building research or educational systems, actual " -"federated learning systems have their data naturally distributed across " -"multiple partitions." +"After that, we start the simulation by calling " +":code:`fl.simulation.start_simulation`:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:201 +#: ../../source/tutorial-quickstart-xgboost.rst:995 msgid "" -"Let's take a look at the first batch of images and labels in the first " -"training set (i.e., ``trainloaders[0]``) before we move on:" +"One of key parameters for :code:`start_simulation` is :code:`client_fn` " +"which returns a function to construct a client. We define it as follows:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:240 -msgid "" -"The output above shows a random batch of images from the first " -"``trainloader`` in our list of ten ``trainloaders``. It also prints the " -"labels associated with each image (i.e., one of the ten possible labels " -"we've seen above). If you run the cell again, you should see another " -"batch of images." +#: ../../source/tutorial-quickstart-xgboost.rst:1038 +msgid "Arguments parser" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:252 -msgid "Step 1: Centralized Training with PyTorch" +#: ../../source/tutorial-quickstart-xgboost.rst:1040 +msgid "" +"In :code:`utils.py`, we define the arguments parsers for clients, server " +"and simulation, allowing users to specify different experimental " +"settings. Let's first see the sever side:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:263 +#: ../../source/tutorial-quickstart-xgboost.rst:1086 msgid "" -"Next, we're going to use PyTorch to define a simple convolutional neural " -"network. This introduction assumes basic familiarity with PyTorch, so it " -"doesn't cover the PyTorch-related aspects in full detail. If you want to " -"dive deeper into PyTorch, we recommend `DEEP LEARNING WITH PYTORCH: A 60 " -"MINUTE BLITZ " -"`__." +"This allows user to specify training strategies / the number of total " +"clients / FL rounds / participating clients / clients for evaluation, and" +" evaluation fashion. Note that with :code:`--centralised-eval`, the sever" +" will do centralised evaluation and all functionalities for client " +"evaluation will be disabled." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:275 -msgid "Defining the model" +#: ../../source/tutorial-quickstart-xgboost.rst:1090 +msgid "Then, the argument parser on client side:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:277 +#: ../../source/tutorial-quickstart-xgboost.rst:1144 msgid "" -"We use the simple CNN described in the `PyTorch tutorial " -"`__:" +"This defines various options for client data partitioning. Besides, " +"clients also have an option to conduct evaluation on centralised test set" +" by setting :code:`--centralised-eval`, as well as an option to perform " +"scaled learning rate based on the number of clients by setting :code" +":`--scaled-lr`." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:314 -msgid "Let's continue with the usual training and test functions:" +#: ../../source/tutorial-quickstart-xgboost.rst:1148 +msgid "We also have an argument parser for simulation:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:374 -msgid "Training the model" +#: ../../source/tutorial-quickstart-xgboost.rst:1226 +msgid "This integrates all arguments for both client and server sides." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:376 -msgid "" -"We now have all the basic building blocks we need: a dataset, a model, a " -"training function, and a test function. Let's put them together to train " -"the model on the dataset of one of our organizations " -"(``trainloaders[0]``). This simulates the reality of most machine " -"learning projects today: each organization has their own data and trains " -"models only on this internal data:" +#: ../../source/tutorial-quickstart-xgboost.rst:1229 +msgid "Example commands" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:406 +#: ../../source/tutorial-quickstart-xgboost.rst:1231 msgid "" -"Training the simple CNN on our CIFAR-10 split for 5 epochs should result " -"in a test set accuracy of about 41%, which is not good, but at the same " -"time, it doesn't really matter for the purposes of this tutorial. The " -"intent was just to show a simplistic centralized training pipeline that " -"sets the stage for what comes next - federated learning!" +"To run a centralised evaluated experiment with bagging strategy on 5 " +"clients with exponential distribution for 50 rounds, we first start the " +"server as below:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:418 -msgid "Step 2: Federated Learning with Flower" +#: ../../source/tutorial-quickstart-xgboost.rst:1238 +msgid "Then, on each client terminal, we start the clients:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:420 +#: ../../source/tutorial-quickstart-xgboost.rst:1244 +msgid "To run the same experiment with Flower simulation:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1250 msgid "" -"Step 1 demonstrated a simple centralized training pipeline. All data was " -"in one place (i.e., a single ``trainloader`` and a single ``valloader``)." -" Next, we'll simulate a situation where we have multiple datasets in " -"multiple organizations and where we train a model over these " -"organizations using federated learning." +"The full `code `_ for this comprehensive example can be found in" +" :code:`examples/xgboost-comprehensive`." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:432 -msgid "Updating model parameters" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 +msgid "Build a strategy from scratch" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:434 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 msgid "" -"In federated learning, the server sends the global model parameters to " -"the client, and the client updates the local model with the parameters " -"received from the server. It then trains the model on the local data " -"(which changes the model parameters locally) and sends the " -"updated/changed model parameters back to the server (or, alternatively, " -"it sends just the gradients back to the server, not the full model " -"parameters)." +"Welcome to the third part of the Flower federated learning tutorial. In " +"previous parts of this tutorial, we introduced federated learning with " +"PyTorch and Flower (`part 1 `__) and we learned how strategies " +"can be used to customize the execution on both the server and the clients" +" (`part 2 `__)." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:436 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 msgid "" -"We need two helper functions to update the local model with parameters " -"received from the server and to get the updated model parameters from the" -" local model: ``set_parameters`` and ``get_parameters``. The following " -"two functions do just that for the PyTorch model above." +"In this notebook, we'll continue to customize the federated learning " +"system we built previously by creating a custom version of FedAvg (again," +" using `Flower `__ and `PyTorch " +"`__)." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:438 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 msgid "" -"The details of how this works are not really important here (feel free to" -" consult the PyTorch documentation if you want to learn more). In " -"essence, we use ``state_dict`` to access PyTorch model parameter tensors." -" The parameter tensors are then converted to/from a list of NumPy " -"ndarray's (which Flower knows how to serialize/deserialize):" +"`Star Flower on GitHub `__ ⭐️ and join " +"the Flower community on Slack to connect, ask questions, and get help: " +"`Join Slack `__ 🌼 We'd love to hear from " +"you in the ``#introductions`` channel! And if anything is unclear, head " +"over to the ``#questions`` channel." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:466 -msgid "Implementing a Flower client" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:17 +msgid "Let's build a new ``Strategy`` from scratch!" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:468 -msgid "" -"With that out of the way, let's move on to the interesting part. " -"Federated learning systems consist of a server and multiple clients. In " -"Flower, we create clients by implementing subclasses of " -"``flwr.client.Client`` or ``flwr.client.NumPyClient``. We use " -"``NumPyClient`` in this tutorial because it is easier to implement and " -"requires us to write less boilerplate." +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:29 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:29 +msgid "Preparation" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:470 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:31 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:32 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:31 msgid "" -"To implement the Flower client, we create a subclass of " -"``flwr.client.NumPyClient`` and implement the three methods " -"``get_parameters``, ``fit``, and ``evaluate``:" +"Before we begin with the actual code, let's make sure that we have " +"everything we need." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:472 -msgid "``get_parameters``: Return the current local model parameters" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:43 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:44 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:43 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:43 +msgid "Installing dependencies" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:473 -msgid "" -"``fit``: Receive model parameters from the server, train the model " -"parameters on the local data, and return the (updated) model parameters " -"to the server" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:45 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:46 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:45 +msgid "First, we install the necessary packages:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:474 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:65 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:66 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:65 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:65 msgid "" -"``evaluate``: Receive model parameters from the server, evaluate the " -"model parameters on the local data, and return the evaluation result to " -"the server" +"Now that we have all dependencies installed, we can import everything we " +"need for this tutorial:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:476 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:101 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:102 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:101 msgid "" -"We mentioned that our clients will use the previously defined PyTorch " -"components for model training and evaluation. Let's see a simple Flower " -"client implementation that brings everything together:" +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " +"GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:513 -msgid "" -"Our class ``FlowerClient`` defines how local training/evaluation will be " -"performed and allows Flower to call the local training/evaluation through" -" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" -" *single client* in our federated learning system. Federated learning " -"systems have multiple clients (otherwise, there's not much to federate), " -"so each client will be represented by its own instance of " -"``FlowerClient``. If we have, for example, three clients in our workload," -" then we'd have three instances of ``FlowerClient``. Flower calls " -"``FlowerClient.fit`` on the respective instance when the server selects a" -" particular client for training (and ``FlowerClient.evaluate`` for " -"evaluation)." -msgstr "" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:517 -msgid "Using the Virtual Client Engine" -msgstr "" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:519 -msgid "" -"In this notebook, we want to simulate a federated learning system with 10" -" clients on a single machine. This means that the server and all 10 " -"clients will live on a single machine and share resources such as CPU, " -"GPU, and memory. Having 10 clients would mean having 10 instances of " -"``FlowerClient`` in memory. Doing this on a single machine can quickly " -"exhaust the available memory resources, even if only a subset of these " -"clients participates in a single round of federated learning." +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:114 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:115 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:114 +msgid "Data loading" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:521 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:116 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:116 msgid "" -"In addition to the regular capabilities where server and clients run on " -"multiple machines, Flower, therefore, provides special simulation " -"capabilities that create ``FlowerClient`` instances only when they are " -"actually necessary for training or evaluation. To enable the Flower " -"framework to create clients when necessary, we need to implement a " -"function called ``client_fn`` that creates a ``FlowerClient`` instance on" -" demand. Flower calls ``client_fn`` whenever it needs an instance of one " -"particular client to call ``fit`` or ``evaluate`` (those instances are " -"usually discarded after use, so they should not keep any local state). " -"Clients are identified by a client ID, or short ``cid``. The ``cid`` can " -"be used, for example, to load different local data partitions for " -"different clients, as can be seen below:" +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``. We introduce a new parameter" +" ``num_clients`` which allows us to call ``load_datasets`` with different" +" numbers of clients." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:556 -msgid "Starting the training" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:167 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:168 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:167 +msgid "Model training/evaluation" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:558 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:169 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:170 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 msgid "" -"We now have the class ``FlowerClient`` which defines client-side " -"training/evaluation and ``client_fn`` which allows Flower to create " -"``FlowerClient`` instances whenever it needs to call ``fit`` or " -"``evaluate`` on one particular client. The last step is to start the " -"actual simulation using ``flwr.simulation.start_simulation``." +"Let's continue with the usual model definition (including " +"``set_parameters`` and ``get_parameters``), training and test functions:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:560 -msgid "" -"The function ``start_simulation`` accepts a number of arguments, amongst " -"them the ``client_fn`` used to create ``FlowerClient`` instances, the " -"number of clients to simulate (``num_clients``), the number of federated " -"learning rounds (``num_rounds``), and the strategy. The strategy " -"encapsulates the federated learning approach/algorithm, for example, " -"*Federated Averaging* (FedAvg)." +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:258 +msgid "Flower client" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:562 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:260 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:260 msgid "" -"Flower has a number of built-in strategies, but we can also use our own " -"strategy implementations to customize nearly all aspects of the federated" -" learning approach. For this example, we use the built-in ``FedAvg`` " -"implementation and customize it using a few basic parameters. The last " -"step is the actual call to ``start_simulation`` which - you guessed it - " -"starts the simulation:" +"To implement the Flower client, we (again) create a subclass of " +"``flwr.client.NumPyClient`` and implement the three methods " +"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " +"``cid`` to the client and use it log additional details:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:608 -msgid "Behind the scenes" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:308 +msgid "Let's test what we have so far before we continue:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:610 -msgid "So how does this work? How does Flower execute this simulation?" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:339 +msgid "Build a Strategy from scratch" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:612 -#, python-format +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:341 msgid "" -"When we call ``start_simulation``, we tell Flower that there are 10 " -"clients (``num_clients=10``). Flower then goes ahead an asks the " -"``FedAvg`` strategy to select clients. ``FedAvg`` knows that it should " -"select 100% of the available clients (``fraction_fit=1.0``), so it goes " -"ahead and selects 10 random clients (i.e., 100% of 10)." +"Let’s overwrite the ``configure_fit`` method such that it passes a higher" +" learning rate (potentially also other hyperparameters) to the optimizer " +"of a fraction of the clients. We will keep the sampling of the clients as" +" it is in ``FedAvg`` and then change the configuration dictionary (one of" +" the ``FitIns`` attributes)." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:614 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:507 msgid "" -"Flower then asks the selected 10 clients to train the model. When the " -"server receives the model parameter updates from the clients, it hands " -"those updates over to the strategy (*FedAvg*) for aggregation. The " -"strategy aggregates those updates and returns the new global model, which" -" then gets used in the next round of federated learning." +"The only thing left is to use the newly created custom Strategy " +"``FedCustom`` when starting the experiment:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:626 -msgid "Where's the accuracy?" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:534 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:932 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:697 +msgid "Recap" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:628 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:536 msgid "" -"You may have noticed that all metrics except for ``losses_distributed`` " -"are empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" +"In this notebook, we’ve seen how to implement a custom strategy. A custom" +" strategy enables granular control over client node configuration, result" +" aggregation, and more. To define a custom strategy, you only have to " +"overwrite the abstract methods of the (abstract) base class ``Strategy``." +" To make custom strategies even more powerful, you can pass custom " +"functions to the constructor of your new class (``__init__``) and then " +"call these functions whenever needed." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:630 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:550 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:948 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:729 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:715 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 msgid "" -"Flower can automatically aggregate losses returned by individual clients," -" but it cannot do the same for metrics in the generic metrics dictionary " -"(the one with the ``accuracy`` key). Metrics dictionaries can contain " -"very different kinds of metrics and even key/value pairs that are not " -"metrics at all, so the framework does not (and can not) know how to " -"handle these automatically." +"Before you continue, make sure to join the Flower community on Slack: " +"`Join Slack `__" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:632 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:552 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:950 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:731 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:717 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 msgid "" -"As users, we need to tell the framework how to handle/aggregate these " -"custom metrics, and we do so by passing metric aggregation functions to " -"the strategy. The strategy will then call these functions whenever it " -"receives fit or evaluate metrics from clients. The two possible functions" -" are ``fit_metrics_aggregation_fn`` and " -"``evaluate_metrics_aggregation_fn``." +"There's a dedicated ``#questions`` channel if you need help, but we'd " +"also love to hear who you are in ``#introductions``!" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:634 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:554 msgid "" -"Let's create a simple weighted averaging function to aggregate the " -"``accuracy`` metric we return from ``evaluate``:" +"The `Flower Federated Learning Tutorial - Part 4 " +"`__ introduces ``Client``, the flexible API underlying " +"``NumPyClient``." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:660 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 +msgid "Customize the client" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:11 msgid "" -"The only thing left to do is to tell the strategy to call this function " -"whenever it receives evaluation metric dictionaries from the clients:" +"Welcome to the fourth part of the Flower federated learning tutorial. In " +"the previous parts of this tutorial, we introduced federated learning " +"with PyTorch and Flower (`part 1 `__), we learned how " +"strategies can be used to customize the execution on both the server and " +"the clients (`part 2 `__), and we built our own " +"custom strategy from scratch (`part 3 `__)." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:697 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 msgid "" -"We now have a full system that performs federated training and federated " -"evaluation. It uses the ``weighted_average`` function to aggregate custom" -" evaluation metrics and calculates a single ``accuracy`` metric across " -"all clients on the server side." +"In this notebook, we revisit ``NumPyClient`` and introduce a new " +"baseclass for building clients, simply named ``Client``. In previous " +"parts of this tutorial, we've based our client on ``NumPyClient``, a " +"convenience class which makes it easy to work with machine learning " +"libraries that have good NumPy interoperability. With ``Client``, we gain" +" a lot of flexibility that we didn't have before, but we'll also have to " +"do a few things the we didn't have to do before." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:699 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:18 msgid "" -"The other two categories of metrics (``losses_centralized`` and " -"``metrics_centralized``) are still empty because they only apply when " -"centralized evaluation is being used. Part two of the Flower tutorial " -"will cover centralized evaluation." +"Let's go deeper and see what it takes to move from ``NumPyClient`` to " +"``Client``!" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:711 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 -msgid "Final remarks" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:30 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:29 +msgid "Step 0: Preparation" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:713 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:117 msgid "" -"Congratulations, you just trained a convolutional neural network, " -"federated over 10 clients! With that, you understand the basics of " -"federated learning with Flower. The same approach you've seen can be used" -" with other machine learning frameworks (not just PyTorch) and tasks (not" -" just CIFAR-10 images classification), for example NLP with Hugging Face " -"Transformers or speech with SpeechBrain." +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:715 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:259 +msgid "Step 1: Revisiting NumPyClient" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:261 msgid "" -"In the next notebook, we're going to cover some more advanced concepts. " -"Want to customize your strategy? Initialize parameters on the server " -"side? Or evaluate the aggregated model on the server side? We'll cover " -"all this and more in the next tutorial." +"So far, we've implemented our client by subclassing " +"``flwr.client.NumPyClient``. The three methods we implemented are " +"``get_parameters``, ``fit``, and ``evaluate``. Finally, we wrap the " +"creation of instances of this class in a function called ``client_fn``:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:733 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:309 msgid "" -"The `Flower Federated Learning Tutorial - Part 2 " -"`__ goes into more depth about strategies and all " -"the advanced things you can build with them." +"We've seen this before, there's nothing new so far. The only *tiny* " +"difference compared to the previous notebook is naming, we've changed " +"``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " +"``numpyclient_fn``. Let's run it to see the output we get:" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:9 -msgid "Use a federated learning strategy" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:339 +msgid "" +"This works as expected, two clients are training for three rounds of " +"federated learning." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:11 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:341 msgid "" -"Welcome to the next part of the federated learning tutorial. In previous " -"parts of this tutorial, we introduced federated learning with PyTorch and" -" Flower (`part 1 `__)." +"Let's dive a little bit deeper and discuss how Flower executes this " +"simulation. Whenever a client is selected to do some work, " +"``start_simulation`` calls the function ``numpyclient_fn`` to create an " +"instance of our ``FlowerNumPyClient`` (along with loading the model and " +"the data)." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:13 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:343 msgid "" -"In this notebook, we'll begin to customize the federated learning system " -"we built in the introductory notebook (again, using `Flower " -"`__ and `PyTorch `__)." +"But here's the perhaps surprising part: Flower doesn't actually use the " +"``FlowerNumPyClient`` object directly. Instead, it wraps the object to " +"makes it look like a subclass of ``flwr.client.Client``, not " +"``flwr.client.NumPyClient``. In fact, the Flower core framework doesn't " +"know how to handle ``NumPyClient``'s, it only knows how to handle " +"``Client``'s. ``NumPyClient`` is just a convenience abstraction built on " +"top of ``Client``." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:17 -msgid "Let's move beyond FedAvg with Flower strategies!" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:345 +msgid "" +"Instead of building on top of ``NumPyClient``, we can directly build on " +"top of ``Client``." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:309 -msgid "Strategy customization" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:357 +msgid "Step 2: Moving from ``NumPyClient`` to ``Client``" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:311 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:359 msgid "" -"So far, everything should look familiar if you've worked through the " -"introductory notebook. With that, we're ready to introduce a number of " -"new features." -msgstr "" - -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:323 -msgid "Server-side parameter **initialization**" +"Let's try to do the same thing using ``Client`` instead of " +"``NumPyClient``." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:325 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:465 msgid "" -"Flower, by default, initializes the global model by asking one random " -"client for the initial parameters. In many cases, we want more control " -"over parameter initialization though. Flower therefore allows you to " -"directly pass the initial parameters to the Strategy:" +"Before we discuss the code in more detail, let's try to run it! Gotta " +"make sure our new ``Client``-based client works, right?" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:370 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:490 msgid "" -"Passing ``initial_parameters`` to the ``FedAvg`` strategy prevents Flower" -" from asking one of the clients for the initial parameters. If we look " -"closely, we can see that the logs do not show any calls to the " -"``FlowerClient.get_parameters`` method." +"That's it, we're now using ``Client``. It probably looks similar to what " +"we've done with ``NumPyClient``. So what's the difference?" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:382 -msgid "Starting with a customized strategy" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:492 +msgid "" +"First of all, it's more code. But why? The difference comes from the fact" +" that ``Client`` expects us to take care of parameter serialization and " +"deserialization. For Flower to be able to send parameters over the " +"network, it eventually needs to turn these parameters into ``bytes``. " +"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " +"serialization. Turning raw bytes into something more useful (like NumPy " +"``ndarray``'s) is called deserialization. Flower needs to do both: it " +"needs to serialize parameters on the server-side and send them to the " +"client, the client needs to deserialize them to use them for local " +"training, and then serialize the updated parameters again to send them " +"back to the server, which (finally!) deserializes them again in order to " +"aggregate them with the updates received from other clients." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:384 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:495 msgid "" -"We've seen the function ``start_simulation`` before. It accepts a number " -"of arguments, amongst them the ``client_fn`` used to create " -"``FlowerClient`` instances, the number of clients to simulate " -"``num_clients``, the number of rounds ``num_rounds``, and the strategy." +"The only *real* difference between Client and NumPyClient is that " +"NumPyClient takes care of serialization and deserialization for you. It " +"can do so because it expects you to return parameters as NumPy ndarray's," +" and it knows how to handle these. This makes working with machine " +"learning libraries that have good NumPy support (most of them) a breeze." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:386 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:497 msgid "" -"The strategy encapsulates the federated learning approach/algorithm, for " -"example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different " -"strategy this time:" +"In terms of API, there's one major difference: all methods in Client take" +" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " +"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " +"``NumPyClient`` on the other hand have multiple arguments (e.g., " +"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" +" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " +"``NumPyClient.fit``) if there are multiple things to handle. These " +"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " +"values you're used to from ``NumPyClient``." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:424 -msgid "Server-side parameter **evaluation**" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:510 +msgid "Step 3: Custom serialization" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:426 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:512 msgid "" -"Flower can evaluate the aggregated model on the server-side or on the " -"client-side. Client-side and server-side evaluation are similar in some " -"ways, but different in others." +"Here we will explore how to implement custom serialization with a simple " +"example." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:428 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:514 msgid "" -"**Centralized Evaluation** (or *server-side evaluation*) is conceptually " -"simple: it works the same way that evaluation in centralized machine " -"learning does. If there is a server-side dataset that can be used for " -"evaluation purposes, then that's great. We can evaluate the newly " -"aggregated model after each round of training without having to send the " -"model to clients. We're also fortunate in the sense that our entire " -"evaluation dataset is available at all times." +"But first what is serialization? Serialization is just the process of " +"converting an object into raw bytes, and equally as important, " +"deserialization is the process of converting raw bytes back into an " +"object. This is very useful for network communication. Indeed, without " +"serialization, you could not just a Python object through the internet." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:430 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:516 msgid "" -"**Federated Evaluation** (or *client-side evaluation*) is more complex, " -"but also more powerful: it doesn't require a centralized dataset and " -"allows us to evaluate models over a larger set of data, which often " -"yields more realistic evaluation results. In fact, many scenarios require" -" us to use **Federated Evaluation** if we want to get representative " -"evaluation results at all. But this power comes at a cost: once we start " -"to evaluate on the client side, we should be aware that our evaluation " -"dataset can change over consecutive rounds of learning if those clients " -"are not always available. Moreover, the dataset held by each client can " -"also change over consecutive rounds. This can lead to evaluation results " -"that are not stable, so even if we would not change the model, we'd see " -"our evaluation results fluctuate over consecutive rounds." +"Federated Learning relies heavily on internet communication for training " +"by sending Python objects back and forth between the clients and the " +"server. This means that serialization is an essential part of Federated " +"Learning." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:433 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:518 msgid "" -"We've seen how federated evaluation works on the client side (i.e., by " -"implementing the ``evaluate`` method in ``FlowerClient``). Now let's see " -"how we can evaluate aggregated model parameters on the server-side:" +"In the following section, we will write a basic example where instead of " +"sending a serialized version of our ``ndarray``\\ s containing our " +"parameters, we will first convert the ``ndarray`` into sparse matrices, " +"before sending them. This technique can be used to save bandwidth, as in " +"certain cases where the weights of a model are sparse (containing many 0 " +"entries), converting them to a sparse matrix can greatly improve their " +"bytesize." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:490 -msgid "Sending/receiving arbitrary values to/from clients" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:521 +msgid "Our custom serialization/deserialization functions" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:492 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:523 msgid "" -"In some situations, we want to configure client-side execution (training," -" evaluation) from the server-side. One example for that is the server " -"asking the clients to train for a certain number of local epochs. Flower " -"provides a way to send configuration values from the server to the " -"clients using a dictionary. Let's look at an example where the clients " -"receive values from the server through the ``config`` parameter in " -"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " -"method receives the configuration dictionary through the ``config`` " -"parameter and can then read values from this dictionary. In this example," -" it reads ``server_round`` and ``local_epochs`` and uses those values to " -"improve the logging and configure the number of local training epochs:" +"This is where the real serialization/deserialization will happen, " +"especially in ``ndarray_to_sparse_bytes`` for serialization and " +"``sparse_bytes_to_ndarray`` for deserialization." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:546 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:525 msgid "" -"So how can we send this config dictionary from server to clients? The " -"built-in Flower Strategies provide way to do this, and it works similarly" -" to the way server-side evaluation works. We provide a function to the " -"strategy, and the strategy calls this function for every round of " -"federated learning:" +"Note that we imported the ``scipy.sparse`` library in order to convert " +"our arrays." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:576 -msgid "" -"Next, we'll just pass this function to the FedAvg strategy before " -"starting the simulation:" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:613 +msgid "Client-side" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:613 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:615 msgid "" -"As we can see, the client logs now include the current round of federated" -" learning (which they read from the ``config`` dictionary). We can also " -"configure local training to run for one epoch during the first and second" -" round of federated learning, and then for two epochs during the third " -"round." +"To be able to serialize our ``ndarray``\\ s into sparse parameters, we " +"will just have to call our custom functions in our " +"``flwr.client.Client``." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:615 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:617 msgid "" -"Clients can also return arbitrary values to the server. To do so, they " -"return a dictionary from ``fit`` and/or ``evaluate``. We have seen and " -"used this concept throughout this notebook without mentioning it " -"explicitly: our ``FlowerClient`` returns a dictionary containing a custom" -" key/value pair as the third return value in ``evaluate``." +"Indeed, in ``get_parameters`` we need to serialize the parameters we got " +"from our network using our custom ``ndarrays_to_sparse_parameters`` " +"defined above." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:627 -msgid "Scaling federated learning" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:619 +msgid "" +"In ``fit``, we first need to deserialize the parameters coming from the " +"server using our custom ``sparse_parameters_to_ndarrays`` and then we " +"need to serialize our local results with " +"``ndarrays_to_sparse_parameters``." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:629 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:621 msgid "" -"As a last step in this notebook, let's see how we can use Flower to " -"experiment with a large number of clients." +"In ``evaluate``, we will only need to deserialize the global parameters " +"with our custom function." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:651 -#, python-format -msgid "" -"We now have 1000 partitions, each holding 45 training and 5 validation " -"examples. Given that the number of training examples on each client is " -"quite small, we should probably train the model a bit longer, so we " -"configure the clients to perform 3 local training epochs. We should also " -"adjust the fraction of clients selected for training during each round " -"(we don't want all 1000 clients participating in every round), so we " -"adjust ``fraction_fit`` to ``0.05``, which means that only 5% of " -"available clients (so 50 clients) will be selected for training each " -"round:" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:725 +msgid "Server-side" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:699 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:727 msgid "" -"In this notebook, we've seen how we can gradually enhance our system by " -"customizing the strategy, initializing parameters on the server side, " -"choosing a different strategy, and evaluating models on the server-side. " -"That's quite a bit of flexibility with so little code, right?" +"For this example, we will just use ``FedAvg`` as a strategy. To change " +"the serialization and deserialization here, we only need to reimplement " +"the ``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other" +" functions of the strategy will be inherited from the super class " +"``FedAvg``." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:701 -msgid "" -"In the later sections, we've seen how we can communicate arbitrary values" -" between server and clients to fully customize client-side execution. " -"With that capability, we built a large-scale Federated Learning " -"simulation using the Flower Virtual Client Engine and ran an experiment " -"involving 1000 clients in the same workload - all in a Jupyter Notebook!" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:729 +msgid "As you can see only one line as change in ``evaluate``:" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:719 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:735 msgid "" -"The `Flower Federated Learning Tutorial - Part 3 " -"`__ shows how to build a fully custom ``Strategy`` " -"from scratch." +"And for ``aggregate_fit``, we will first deserialize every result we " +"received:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:9 -msgid "What is Federated Learning?" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:744 +msgid "And then serialize the aggregated result:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:13 -msgid "" -"In this tutorial, you will learn what federated learning is, build your " -"first system in Flower, and gradually extend it. If you work through all " -"parts of the tutorial, you will be able to build advanced federated " -"learning systems that approach the current state of the art in the field." +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:903 +msgid "We can now run our custom serialization example!" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:934 msgid "" -"🧑‍🏫 This tutorial starts at zero and expects no familiarity with " -"federated learning. Only a basic understanding of data science and Python" -" programming is assumed." +"In this part of the tutorial, we've seen how we can build clients by " +"subclassing either ``NumPyClient`` or ``Client``. ``NumPyClient`` is a " +"convenience abstraction that makes it easier to work with machine " +"learning libraries that have good NumPy interoperability. ``Client`` is a" +" more flexible abstraction that allows us to do things that are not " +"possible in ``NumPyClient``. In order to do so, it requires us to handle " +"parameter serialization and deserialization ourselves." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:17 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:952 msgid "" -"`Star Flower on GitHub `__ ⭐️ and join " -"the open-source Flower community on Slack to connect, ask questions, and " -"get help: `Join Slack `__ 🌼 We'd love to " -"hear from you in the ``#introductions`` channel! And if anything is " -"unclear, head over to the ``#questions`` channel." +"This is the final part of the Flower tutorial (for now!), " +"congratulations! You're now well equipped to understand the rest of the " +"documentation. There are many topics we didn't cover in the tutorial, we " +"recommend the following resources:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 -msgid "Let's get started!" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:954 +msgid "`Read Flower Docs `__" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 -msgid "Classic machine learning" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:955 +msgid "" +"`Check out Flower Code Examples " +"`__" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:956 msgid "" -"Before we begin to discuss federated learning, let us quickly recap how " -"most machine learning works today." +"`Use Flower Baselines for your research " +"`__" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:35 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:957 msgid "" -"In machine learning, we have a model, and we have data. The model could " -"be a neural network (as depicted here), or something else, like classical" -" linear regression." +"`Watch Flower Summit 2023 videos `__" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 -msgid "|31e4b1afa87c4b968327bbeafbf184d4|" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:9 +msgid "Get started with Flower" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 -msgid "Model and data" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:11 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:11 +msgid "Welcome to the Flower federated learning tutorial!" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:47 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:13 msgid "" -"We train the model using the data to perform a useful task. A task could " -"be to detect objects in images, transcribe an audio recording, or play a " -"game like Go." -msgstr "" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 -msgid "|c9d935b4284e4c389a33d86b33e07c0a|" +"In this notebook, we'll build a federated learning system using Flower, " +"`Flower Datasets `__ and PyTorch. In " +"part 1, we use PyTorch for the model training pipeline and data loading. " +"In part 2, we continue to federate the PyTorch-based pipeline using " +"Flower." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 -msgid "Train model using data" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:17 +msgid "Let's get stated!" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:31 msgid "" -"Now, in practice, the training data we work with doesn't originate on the" -" machine we train the model on. It gets created somewhere else." +"Before we begin with any actual code, let's make sure that we have " +"everything we need." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:45 msgid "" -"It originates on a smartphone by the user interacting with an app, a car " -"collecting sensor data, a laptop receiving input via the keyboard, or a " -"smart speaker listening to someone trying to sing a song." +"Next, we install the necessary packages for PyTorch (``torch`` and " +"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower " +"(``flwr``):" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 -msgid "|00727b5faffb468f84dd1b03ded88638|" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:105 +msgid "" +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware accelerator:" +" GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 -msgid "Data on a phone" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:118 +msgid "Loading the data" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:73 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:120 msgid "" -"What's also important to mention, this \"somewhere else\" is usually not " -"just one place, it's many places. It could be several devices all running" -" the same app. But it could also be several organizations, all generating" -" data for the same task." +"Federated learning can be applied to many different types of tasks across" +" different domains. In this tutorial, we introduce federated learning by " +"training a simple convolutional neural network (CNN) on the popular " +"CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that " +"distinguish between images from ten different classes: 'airplane', " +"'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and " +"'truck'." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 -msgid "|daf0cf0ff4c24fd29439af78416cf47b|" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:131 +msgid "" +"We simulate having multiple datasets from multiple organizations (also " +"called the \"cross-silo\" setting in federated learning) by splitting the" +" original CIFAR-10 dataset into multiple partitions. Each partition will " +"represent the data from a single organization. We're doing this purely " +"for experimentation purposes, in the real world there's no need for data " +"splitting because each organization already has their own data (so the " +"data is naturally partitioned)." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 -msgid "Data is on many devices" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:133 +msgid "" +"Each organization will act as a client in the federated learning system. " +"So having ten organizations participate in a federation means having ten " +"clients connected to the federated learning server." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:144 msgid "" -"So to use machine learning, or any kind of data analysis, the approach " -"that has been used in the past was to collect all data on a central " -"server. This server can be somewhere in a data center, or somewhere in " -"the cloud." +"Let's now create the Federated Dataset abstraction that from ``flwr-" +"datasets`` that partitions the CIFAR-10. We will create small training " +"and test set for each edge device and wrap each of them into a PyTorch " +"``DataLoader``:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 -msgid "|9f093007080d471d94ca90d3e9fde9b6|" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:198 +msgid "" +"We now have a list of ten training sets and ten validation sets " +"(``trainloaders`` and ``valloaders``) representing the data of ten " +"different organizations. Each ``trainloader``/``valloader`` pair contains" +" 4500 training examples and 500 validation examples. There's also a " +"single ``testloader`` (we did not split the test set). Again, this is " +"only necessary for building research or educational systems, actual " +"federated learning systems have their data naturally distributed across " +"multiple partitions." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 -msgid "Central data collection" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:201 +msgid "" +"Let's take a look at the first batch of images and labels in the first " +"training set (i.e., ``trainloaders[0]``) before we move on:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:97 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:240 msgid "" -"Once all the data is collected in one place, we can finally use machine " -"learning algorithms to train our model on the data. This is the machine " -"learning approach that we've basically always relied on." +"The output above shows a random batch of images from the first " +"``trainloader`` in our list of ten ``trainloaders``. It also prints the " +"labels associated with each image (i.e., one of the ten possible labels " +"we've seen above). If you run the cell again, you should see another " +"batch of images." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 -msgid "|46a26e6150e0479fbd3dfd655f36eb13|" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:252 +msgid "Step 1: Centralized Training with PyTorch" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 -msgid "Central model training" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:263 +msgid "" +"Next, we're going to use PyTorch to define a simple convolutional neural " +"network. This introduction assumes basic familiarity with PyTorch, so it " +"doesn't cover the PyTorch-related aspects in full detail. If you want to " +"dive deeper into PyTorch, we recommend `DEEP LEARNING WITH PYTORCH: A 60 " +"MINUTE BLITZ " +"`__." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:130 -msgid "Challenges of classical machine learning" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:275 +msgid "Defining the model" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:277 msgid "" -"The classic machine learning approach we've just seen can be used in some" -" cases. Great examples include categorizing holiday photos, or analyzing " -"web traffic. Cases, where all the data is naturally available on a " -"centralized server." +"We use the simple CNN described in the `PyTorch tutorial " +"`__:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 -msgid "|3daba297595c4c7fb845d90404a6179a|" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:314 +msgid "Let's continue with the usual training and test functions:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 -msgid "Centralized possible" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:374 +msgid "Training the model" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:144 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:376 msgid "" -"But the approach can not be used in many other cases. Cases, where the " -"data is not available on a centralized server, or cases where the data " -"available on one server is not enough to train a good model." +"We now have all the basic building blocks we need: a dataset, a model, a " +"training function, and a test function. Let's put them together to train " +"the model on the dataset of one of our organizations " +"(``trainloaders[0]``). This simulates the reality of most machine " +"learning projects today: each organization has their own data and trains " +"models only on this internal data:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 -msgid "|5769874fa9c4455b80b2efda850d39d7|" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:406 +msgid "" +"Training the simple CNN on our CIFAR-10 split for 5 epochs should result " +"in a test set accuracy of about 41%, which is not good, but at the same " +"time, it doesn't really matter for the purposes of this tutorial. The " +"intent was just to show a simplistic centralized training pipeline that " +"sets the stage for what comes next - federated learning!" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 -msgid "Centralized impossible" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:418 +msgid "Step 2: Federated Learning with Flower" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:420 msgid "" -"There are many reasons why the classic centralized machine learning " -"approach does not work for a large number of highly important real-world " -"use cases. Those reasons include:" +"Step 1 demonstrated a simple centralized training pipeline. All data was " +"in one place (i.e., a single ``trainloader`` and a single ``valloader``)." +" Next, we'll simulate a situation where we have multiple datasets in " +"multiple organizations and where we train a model over these " +"organizations using federated learning." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:432 +msgid "Updating model parameters" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:434 msgid "" -"**Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD " -"(Brazil), PDPL (Argentina), KVKK (Turkey), POPI (South Africa), FSS " -"(Russia), CDPR (China), PDPB (India), PIPA (Korea), APPI (Japan), PDP " -"(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " -"protect sensitive data from being moved. In fact, those regulations " -"sometimes even prevent single organizations from combining their own " -"users' data for artificial intelligence training because those users live" -" in different parts of the world, and their data is governed by different" -" data protection regulations." +"In federated learning, the server sends the global model parameters to " +"the client, and the client updates the local model with the parameters " +"received from the server. It then trains the model on the local data " +"(which changes the model parameters locally) and sends the " +"updated/changed model parameters back to the server (or, alternatively, " +"it sends just the gradients back to the server, not the full model " +"parameters)." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:436 msgid "" -"**User preference**: In addition to regulation, there are use cases where" -" users just expect that no data leaves their device, ever. If you type " -"your passwords and credit card info into the digital keyboard of your " -"phone, you don't expect those passwords to end up on the server of the " -"company that developed that keyboard, do you? In fact, that use case was " -"the reason federated learning was invented in the first place." +"We need two helper functions to update the local model with parameters " +"received from the server and to get the updated model parameters from the" +" local model: ``set_parameters`` and ``get_parameters``. The following " +"two functions do just that for the PyTorch model above." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:161 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:438 msgid "" -"**Data volume**: Some sensors, like cameras, produce such a high data " -"volume that it is neither feasible nor economic to collect all the data " -"(due to, for example, bandwidth or communication efficiency). Think about" -" a national rail service with hundreds of train stations across the " -"country. If each of these train stations is outfitted with a number of " -"security cameras, the volume of raw on-device data they produce requires " -"incredibly powerful and exceedingly expensive infrastructure to process " -"and store. And most of the data isn't even useful." +"The details of how this works are not really important here (feel free to" +" consult the PyTorch documentation if you want to learn more). In " +"essence, we use ``state_dict`` to access PyTorch model parameter tensors." +" The parameter tensors are then converted to/from a list of NumPy " +"ndarray's (which Flower knows how to serialize/deserialize):" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:164 -msgid "Examples where centralized machine learning does not work include:" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:466 +msgid "Implementing a Flower client" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:468 msgid "" -"Sensitive healthcare records from multiple hospitals to train cancer " -"detection models" +"With that out of the way, let's move on to the interesting part. " +"Federated learning systems consist of a server and multiple clients. In " +"Flower, we create clients by implementing subclasses of " +"``flwr.client.Client`` or ``flwr.client.NumPyClient``. We use " +"``NumPyClient`` in this tutorial because it is easier to implement and " +"requires us to write less boilerplate." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:470 msgid "" -"Financial information from different organizations to detect financial " -"fraud" +"To implement the Flower client, we create a subclass of " +"``flwr.client.NumPyClient`` and implement the three methods " +"``get_parameters``, ``fit``, and ``evaluate``:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 -msgid "Location data from your electric car to make better range prediction" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:472 +msgid "``get_parameters``: Return the current local model parameters" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 -msgid "End-to-end encrypted messages to train better auto-complete models" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:473 +msgid "" +"``fit``: Receive model parameters from the server, train the model " +"parameters on the local data, and return the (updated) model parameters " +"to the server" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:474 msgid "" -"The popularity of privacy-enhancing systems like the `Brave " -"`__ browser or the `Signal `__ " -"messenger shows that users care about privacy. In fact, they choose the " -"privacy-enhancing version over other alternatives, if such an alternative " -"exists. But what can we do to apply machine learning and data science to " -"these cases to utilize private data? After all, these are all areas that " -"would benefit significantly from recent advances in AI." +"``evaluate``: Receive model parameters from the server, evaluate the " +"model parameters on the local data, and return the evaluation result to " +"the server" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 -msgid "Federated learning" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:476 +msgid "" +"We mentioned that our clients will use the previously defined PyTorch " +"components for model training and evaluation. Let's see a simple Flower " +"client implementation that brings everything together:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:513 msgid "" -"Federated learning simply reverses this approach. It enables machine " -"learning on distributed data by moving the training to the data, instead " -"of moving the data to the training. Here's the single-sentence " -"explanation:" +"Our class ``FlowerClient`` defines how local training/evaluation will be " +"performed and allows Flower to call the local training/evaluation through" +" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" +" *single client* in our federated learning system. Federated learning " +"systems have multiple clients (otherwise, there's not much to federate), " +"so each client will be represented by its own instance of " +"``FlowerClient``. If we have, for example, three clients in our workload," +" then we'd have three instances of ``FlowerClient``. Flower calls " +"``FlowerClient.fit`` on the respective instance when the server selects a" +" particular client for training (and ``FlowerClient.evaluate`` for " +"evaluation)." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 -msgid "Central machine learning: move the data to the computation" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:517 +msgid "Using the Virtual Client Engine" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 -msgid "Federated (machine) learning: move the computation to the data" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:519 +msgid "" +"In this notebook, we want to simulate a federated learning system with 10" +" clients on a single machine. This means that the server and all 10 " +"clients will live on a single machine and share resources such as CPU, " +"GPU, and memory. Having 10 clients would mean having 10 instances of " +"``FlowerClient`` in memory. Doing this on a single machine can quickly " +"exhaust the available memory resources, even if only a subset of these " +"clients participates in a single round of federated learning." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:521 msgid "" -"By doing so, it enables us to use machine learning (and other data " -"science approaches) in areas where it wasn't possible before. We can now " -"train excellent medical AI models by enabling different hospitals to work" -" together. We can solve financial fraud by training AI models on the data" -" of different financial institutions. We can build novel privacy-" -"enhancing applications (such as secure messaging) that have better built-" -"in AI than their non-privacy-enhancing alternatives. And those are just a" -" few of the examples that come to mind. As we deploy federated learning, " -"we discover more and more areas that can suddenly be reinvented because " -"they now have access to vast amounts of previously inaccessible data." +"In addition to the regular capabilities where server and clients run on " +"multiple machines, Flower, therefore, provides special simulation " +"capabilities that create ``FlowerClient`` instances only when they are " +"actually necessary for training or evaluation. To enable the Flower " +"framework to create clients when necessary, we need to implement a " +"function called ``client_fn`` that creates a ``FlowerClient`` instance on" +" demand. Flower calls ``client_fn`` whenever it needs an instance of one " +"particular client to call ``fit`` or ``evaluate`` (those instances are " +"usually discarded after use, so they should not keep any local state). " +"Clients are identified by a client ID, or short ``cid``. The ``cid`` can " +"be used, for example, to load different local data partitions for " +"different clients, as can be seen below:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 -msgid "" -"So how does federated learning work, exactly? Let's start with an " -"intuitive explanation." +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:556 +msgid "Starting the training" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:199 -msgid "Federated learning in five steps" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:558 +msgid "" +"We now have the class ``FlowerClient`` which defines client-side " +"training/evaluation and ``client_fn`` which allows Flower to create " +"``FlowerClient`` instances whenever it needs to call ``fit`` or " +"``evaluate`` on one particular client. The last step is to start the " +"actual simulation using ``flwr.simulation.start_simulation``." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:202 -msgid "Step 0: Initialize global model" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:560 +msgid "" +"The function ``start_simulation`` accepts a number of arguments, amongst " +"them the ``client_fn`` used to create ``FlowerClient`` instances, the " +"number of clients to simulate (``num_clients``), the number of federated " +"learning rounds (``num_rounds``), and the strategy. The strategy " +"encapsulates the federated learning approach/algorithm, for example, " +"*Federated Averaging* (FedAvg)." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:204 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:562 msgid "" -"We start by initializing the model on the server. This is exactly the " -"same in classic centralized learning: we initialize the model parameters," -" either randomly or from a previously saved checkpoint." +"Flower has a number of built-in strategies, but we can also use our own " +"strategy implementations to customize nearly all aspects of the federated" +" learning approach. For this example, we use the built-in ``FedAvg`` " +"implementation and customize it using a few basic parameters. The last " +"step is the actual call to ``start_simulation`` which - you guessed it - " +"starts the simulation:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 -msgid "|ba47ffb421814b0f8f9fa5719093d839|" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:608 +msgid "Behind the scenes" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 -msgid "Initialize global model" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:610 +msgid "So how does this work? How does Flower execute this simulation?" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:217 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:612 +#, python-format msgid "" -"Step 1: Send model to a number of connected organizations/devices (client" -" nodes)" +"When we call ``start_simulation``, we tell Flower that there are 10 " +"clients (``num_clients=10``). Flower then goes ahead an asks the " +"``FedAvg`` strategy to select clients. ``FedAvg`` knows that it should " +"select 100% of the available clients (``fraction_fit=1.0``), so it goes " +"ahead and selects 10 random clients (i.e., 100% of 10)." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:614 msgid "" -"Next, we send the parameters of the global model to the connected client " -"nodes (think: edge devices like smartphones or servers belonging to " -"organizations). This is to ensure that each participating node starts " -"their local training using the same model parameters. We often use only a" -" few of the connected nodes instead of all nodes. The reason for this is " -"that selecting more and more client nodes has diminishing returns." +"Flower then asks the selected 10 clients to train the model. When the " +"server receives the model parameter updates from the clients, it hands " +"those updates over to the strategy (*FedAvg*) for aggregation. The " +"strategy aggregates those updates and returns the new global model, which" +" then gets used in the next round of federated learning." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 -msgid "|aeac5bf79cbf497082e979834717e01b|" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:626 +msgid "Where's the accuracy?" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 -msgid "Send global model" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:628 +msgid "" +"You may have noticed that all metrics except for ``losses_distributed`` " +"are empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:232 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:630 msgid "" -"Step 2: Train model locally on the data of each organization/device " -"(client node)" +"Flower can automatically aggregate losses returned by individual clients," +" but it cannot do the same for metrics in the generic metrics dictionary " +"(the one with the ``accuracy`` key). Metrics dictionaries can contain " +"very different kinds of metrics and even key/value pairs that are not " +"metrics at all, so the framework does not (and can not) know how to " +"handle these automatically." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:234 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:632 msgid "" -"Now that all (selected) client nodes have the latest version of the " -"global model parameters, they start the local training. They use their " -"own local dataset to train their own local model. They don't train the " -"model until full convergence, but they only train for a little while. " -"This could be as little as one epoch on the local data, or even just a " -"few steps (mini-batches)." +"As users, we need to tell the framework how to handle/aggregate these " +"custom metrics, and we do so by passing metric aggregation functions to " +"the strategy. The strategy will then call these functions whenever it " +"receives fit or evaluate metrics from clients. The two possible functions" +" are ``fit_metrics_aggregation_fn`` and " +"``evaluate_metrics_aggregation_fn``." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 -msgid "|ce27ed4bbe95459dba016afc42486ba2|" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:634 +msgid "" +"Let's create a simple weighted averaging function to aggregate the " +"``accuracy`` metric we return from ``evaluate``:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 -msgid "Train on local data" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:660 +msgid "" +"The only thing left to do is to tell the strategy to call this function " +"whenever it receives evaluation metric dictionaries from the clients:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:247 -msgid "Step 3: Return model updates back to the server" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:697 +msgid "" +"We now have a full system that performs federated training and federated " +"evaluation. It uses the ``weighted_average`` function to aggregate custom" +" evaluation metrics and calculates a single ``accuracy`` metric across " +"all clients on the server side." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:249 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:699 msgid "" -"After local training, each client node has a slightly different version " -"of the model parameters they originally received. The parameters are all " -"different because each client node has different examples in its local " -"dataset. The client nodes then send those model updates back to the " -"server. The model updates they send can either be the full model " -"parameters or just the gradients that were accumulated during local " -"training." +"The other two categories of metrics (``losses_centralized`` and " +"``metrics_centralized``) are still empty because they only apply when " +"centralized evaluation is being used. Part two of the Flower tutorial " +"will cover centralized evaluation." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 -msgid "|ae94a7f71dda443cbec2385751427d41|" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:711 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 +msgid "Final remarks" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 -msgid "Send model updates" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:713 +msgid "" +"Congratulations, you just trained a convolutional neural network, " +"federated over 10 clients! With that, you understand the basics of " +"federated learning with Flower. The same approach you've seen can be used" +" with other machine learning frameworks (not just PyTorch) and tasks (not" +" just CIFAR-10 images classification), for example NLP with Hugging Face " +"Transformers or speech with SpeechBrain." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:262 -msgid "Step 4: Aggregate model updates into a new global model" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:715 +msgid "" +"In the next notebook, we're going to cover some more advanced concepts. " +"Want to customize your strategy? Initialize parameters on the server " +"side? Or evaluate the aggregated model on the server side? We'll cover " +"all this and more in the next tutorial." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:264 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:733 msgid "" -"The server receives model updates from the selected client nodes. If it " -"selected 100 client nodes, it now has 100 slightly different versions of " -"the original global model, each trained on the local data of one client. " -"But didn't we want to have one model that contains the learnings from the" -" data of all 100 client nodes?" +"The `Flower Federated Learning Tutorial - Part 2 " +"`__ goes into more depth about strategies and all " +"the advanced things you can build with them." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:9 +msgid "Use a federated learning strategy" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:11 msgid "" -"In order to get one single model, we have to combine all the model " -"updates we received from the client nodes. This process is called " -"*aggregation*, and there are many different ways to do it. The most basic" -" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " -"`__), often abbreviated as *FedAvg*. " -"*FedAvg* takes the 100 model updates and, as the name suggests, averages " -"them. To be more precise, it takes the *weighted average* of the model " -"updates, weighted by the number of examples each client used for " -"training. The weighting is important to make sure that each data example " -"has the same \"influence\" on the resulting global model. If one client " -"has 10 examples, and another client has 100 examples, then - without " -"weighting - each of the 10 examples would influence the global model ten " -"times as much as each of the 100 examples." +"Welcome to the next part of the federated learning tutorial. In previous " +"parts of this tutorial, we introduced federated learning with PyTorch and" +" Flower (`part 1 `__)." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 -msgid "|e61fce4d43d243e7bb08bdde97d81ce6|" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:13 +msgid "" +"In this notebook, we'll begin to customize the federated learning system " +"we built in the introductory notebook (again, using `Flower " +"`__ and `PyTorch `__)." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 -msgid "Aggregate model updates" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:17 +msgid "Let's move beyond FedAvg with Flower strategies!" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:280 -msgid "Step 5: Repeat steps 1 to 4 until the model converges" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:309 +msgid "Strategy customization" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:282 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:311 msgid "" -"Steps 1 to 4 are what we call a single round of federated learning. The " -"global model parameters get sent to the participating client nodes (step " -"1), the client nodes train on their local data (step 2), they send their " -"updated models to the server (step 3), and the server then aggregates the" -" model updates to get a new version of the global model (step 4)." +"So far, everything should look familiar if you've worked through the " +"introductory notebook. With that, we're ready to introduce a number of " +"new features." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:284 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:323 +msgid "Server-side parameter **initialization**" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:325 msgid "" -"During a single round, each client node that participates in that " -"iteration only trains for a little while. This means that after the " -"aggregation step (step 4), we have a model that has been trained on all " -"the data of all participating client nodes, but only for a little while. " -"We then have to repeat this training process over and over again to " -"eventually arrive at a fully trained model that performs well across the " -"data of all client nodes." +"Flower, by default, initializes the global model by asking one random " +"client for the initial parameters. In many cases, we want more control " +"over parameter initialization though. Flower therefore allows you to " +"directly pass the initial parameters to the Strategy:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:289 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:370 msgid "" -"Congratulations, you now understand the basics of federated learning. " -"There's a lot more to discuss, of course, but that was federated learning" -" in a nutshell. In later parts of this tutorial, we will go into more " -"detail. Interesting questions include: How can we select the best client " -"nodes that should participate in the next round? What's the best way to " -"aggregate model updates? How can we handle failing client nodes " -"(stragglers)?" +"Passing ``initial_parameters`` to the ``FedAvg`` strategy prevents Flower" +" from asking one of the clients for the initial parameters. If we look " +"closely, we can see that the logs do not show any calls to the " +"``FlowerClient.get_parameters`` method." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:294 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:382 +msgid "Starting with a customized strategy" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:384 msgid "" -"Just like we can train a model on the decentralized data of different " -"client nodes, we can also evaluate the model on that data to receive " -"valuable metrics. This is called federated evaluation, sometimes " -"abbreviated as FE. In fact, federated evaluation is an integral part of " -"most federated learning systems." +"We've seen the function ``start_simulation`` before. It accepts a number " +"of arguments, amongst them the ``client_fn`` used to create " +"``FlowerClient`` instances, the number of clients to simulate " +"``num_clients``, the number of rounds ``num_rounds``, and the strategy." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:386 +msgid "" +"The strategy encapsulates the federated learning approach/algorithm, for " +"example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different " +"strategy this time:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:424 +msgid "Server-side parameter **evaluation**" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:426 +msgid "" +"Flower can evaluate the aggregated model on the server-side or on the " +"client-side. Client-side and server-side evaluation are similar in some " +"ways, but different in others." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:428 +msgid "" +"**Centralized Evaluation** (or *server-side evaluation*) is conceptually " +"simple: it works the same way that evaluation in centralized machine " +"learning does. If there is a server-side dataset that can be used for " +"evaluation purposes, then that's great. We can evaluate the newly " +"aggregated model after each round of training without having to send the " +"model to clients. We're also fortunate in the sense that our entire " +"evaluation dataset is available at all times." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:430 +msgid "" +"**Federated Evaluation** (or *client-side evaluation*) is more complex, " +"but also more powerful: it doesn't require a centralized dataset and " +"allows us to evaluate models over a larger set of data, which often " +"yields more realistic evaluation results. In fact, many scenarios require" +" us to use **Federated Evaluation** if we want to get representative " +"evaluation results at all. But this power comes at a cost: once we start " +"to evaluate on the client side, we should be aware that our evaluation " +"dataset can change over consecutive rounds of learning if those clients " +"are not always available. Moreover, the dataset held by each client can " +"also change over consecutive rounds. This can lead to evaluation results " +"that are not stable, so even if we would not change the model, we'd see " +"our evaluation results fluctuate over consecutive rounds." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:433 +msgid "" +"We've seen how federated evaluation works on the client side (i.e., by " +"implementing the ``evaluate`` method in ``FlowerClient``). Now let's see " +"how we can evaluate aggregated model parameters on the server-side:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:490 +msgid "Sending/receiving arbitrary values to/from clients" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:492 +msgid "" +"In some situations, we want to configure client-side execution (training," +" evaluation) from the server-side. One example for that is the server " +"asking the clients to train for a certain number of local epochs. Flower " +"provides a way to send configuration values from the server to the " +"clients using a dictionary. Let's look at an example where the clients " +"receive values from the server through the ``config`` parameter in " +"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " +"method receives the configuration dictionary through the ``config`` " +"parameter and can then read values from this dictionary. In this example," +" it reads ``server_round`` and ``local_epochs`` and uses those values to " +"improve the logging and configure the number of local training epochs:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:546 +msgid "" +"So how can we send this config dictionary from server to clients? The " +"built-in Flower Strategies provide way to do this, and it works similarly" +" to the way server-side evaluation works. We provide a function to the " +"strategy, and the strategy calls this function for every round of " +"federated learning:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 -msgid "Federated analytics" -msgstr "" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:576 +msgid "" +"Next, we'll just pass this function to the FedAvg strategy before " +"starting the simulation:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:613 +msgid "" +"As we can see, the client logs now include the current round of federated" +" learning (which they read from the ``config`` dictionary). We can also " +"configure local training to run for one epoch during the first and second" +" round of federated learning, and then for two epochs during the third " +"round." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:615 +msgid "" +"Clients can also return arbitrary values to the server. To do so, they " +"return a dictionary from ``fit`` and/or ``evaluate``. We have seen and " +"used this concept throughout this notebook without mentioning it " +"explicitly: our ``FlowerClient`` returns a dictionary containing a custom" +" key/value pair as the third return value in ``evaluate``." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:627 +msgid "Scaling federated learning" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:629 +msgid "" +"As a last step in this notebook, let's see how we can use Flower to " +"experiment with a large number of clients." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:651 +#, python-format +msgid "" +"We now have 1000 partitions, each holding 45 training and 5 validation " +"examples. Given that the number of training examples on each client is " +"quite small, we should probably train the model a bit longer, so we " +"configure the clients to perform 3 local training epochs. We should also " +"adjust the fraction of clients selected for training during each round " +"(we don't want all 1000 clients participating in every round), so we " +"adjust ``fraction_fit`` to ``0.05``, which means that only 5% of " +"available clients (so 50 clients) will be selected for training each " +"round:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:699 +msgid "" +"In this notebook, we've seen how we can gradually enhance our system by " +"customizing the strategy, initializing parameters on the server side, " +"choosing a different strategy, and evaluating models on the server-side. " +"That's quite a bit of flexibility with so little code, right?" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:701 +msgid "" +"In the later sections, we've seen how we can communicate arbitrary values" +" between server and clients to fully customize client-side execution. " +"With that capability, we built a large-scale Federated Learning " +"simulation using the Flower Virtual Client Engine and ran an experiment " +"involving 1000 clients in the same workload - all in a Jupyter Notebook!" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:719 +msgid "" +"The `Flower Federated Learning Tutorial - Part 3 " +"`__ shows how to build a fully custom ``Strategy`` from " +"scratch." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:9 +msgid "What is Federated Learning?" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:13 +msgid "" +"In this tutorial, you will learn what federated learning is, build your " +"first system in Flower, and gradually extend it. If you work through all " +"parts of the tutorial, you will be able to build advanced federated " +"learning systems that approach the current state of the art in the field." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 +msgid "" +"🧑‍🏫 This tutorial starts at zero and expects no familiarity with " +"federated learning. Only a basic understanding of data science and Python" +" programming is assumed." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:17 +msgid "" +"`Star Flower on GitHub `__ ⭐️ and join " +"the open-source Flower community on Slack to connect, ask questions, and " +"get help: `Join Slack `__ 🌼 We'd love to " +"hear from you in the ``#introductions`` channel! And if anything is " +"unclear, head over to the ``#questions`` channel." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 +msgid "Let's get started!" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 +msgid "Classic machine learning" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 +msgid "" +"Before we begin to discuss federated learning, let us quickly recap how " +"most machine learning works today." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:35 +msgid "" +"In machine learning, we have a model, and we have data. The model could " +"be a neural network (as depicted here), or something else, like classical" +" linear regression." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 +msgid "|2b5c62c529f6416f840c594cce062fbb|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 +msgid "Model and data" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:47 +msgid "" +"We train the model using the data to perform a useful task. A task could " +"be to detect objects in images, transcribe an audio recording, or play a " +"game like Go." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 +msgid "|90b334680cb7467d9a04d39b8e8dca9f|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 +msgid "Train model using data" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 +msgid "" +"Now, in practice, the training data we work with doesn't originate on the" +" machine we train the model on. It gets created somewhere else." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 +msgid "" +"It originates on a smartphone by the user interacting with an app, a car " +"collecting sensor data, a laptop receiving input via the keyboard, or a " +"smart speaker listening to someone trying to sing a song." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 +msgid "|65764ceee89f4335bfd93fd0b115e831|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 +msgid "Data on a phone" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:73 +msgid "" +"What's also important to mention, this \"somewhere else\" is usually not " +"just one place, it's many places. It could be several devices all running" +" the same app. But it could also be several organizations, all generating" +" data for the same task." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 +msgid "|d97319ec28bb407ea0ab9705e38f3bcf|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 +msgid "Data is on many devices" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 +msgid "" +"So to use machine learning, or any kind of data analysis, the approach " +"that has been used in the past was to collect all data on a central " +"server. This server can be somewhere in a data center, or somewhere in " +"the cloud." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 +msgid "|11e95ac83a8548d8b3505b4663187d07|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 +msgid "Central data collection" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:97 +msgid "" +"Once all the data is collected in one place, we can finally use machine " +"learning algorithms to train our model on the data. This is the machine " +"learning approach that we've basically always relied on." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 +msgid "|1dab2f3a23674abc8a6731f20fa10730|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 +msgid "Central model training" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:130 +msgid "Challenges of classical machine learning" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 +msgid "" +"The classic machine learning approach we've just seen can be used in some" +" cases. Great examples include categorizing holiday photos, or analyzing " +"web traffic. Cases, where all the data is naturally available on a " +"centralized server." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 +msgid "|7f0ee162da38450788493a21627306f7|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 +msgid "Centralized possible" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:144 +msgid "" +"But the approach can not be used in many other cases. Cases, where the " +"data is not available on a centralized server, or cases where the data " +"available on one server is not enough to train a good model." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 +msgid "|296a1fb72c514b23b3d8905ff0ff98c6|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 +msgid "Centralized impossible" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 +msgid "" +"There are many reasons why the classic centralized machine learning " +"approach does not work for a large number of highly important real-world " +"use cases. Those reasons include:" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 +msgid "" +"**Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD " +"(Brazil), PDPL (Argentina), KVKK (Turkey), POPI (South Africa), FSS " +"(Russia), CDPR (China), PDPB (India), PIPA (Korea), APPI (Japan), PDP " +"(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " +"protect sensitive data from being moved. In fact, those regulations " +"sometimes even prevent single organizations from combining their own " +"users' data for artificial intelligence training because those users live" +" in different parts of the world, and their data is governed by different" +" data protection regulations." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 +msgid "" +"**User preference**: In addition to regulation, there are use cases where" +" users just expect that no data leaves their device, ever. If you type " +"your passwords and credit card info into the digital keyboard of your " +"phone, you don't expect those passwords to end up on the server of the " +"company that developed that keyboard, do you? In fact, that use case was " +"the reason federated learning was invented in the first place." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:161 +msgid "" +"**Data volume**: Some sensors, like cameras, produce such a high data " +"volume that it is neither feasible nor economic to collect all the data " +"(due to, for example, bandwidth or communication efficiency). Think about" +" a national rail service with hundreds of train stations across the " +"country. If each of these train stations is outfitted with a number of " +"security cameras, the volume of raw on-device data they produce requires " +"incredibly powerful and exceedingly expensive infrastructure to process " +"and store. And most of the data isn't even useful." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:164 +msgid "Examples where centralized machine learning does not work include:" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 +msgid "" +"Sensitive healthcare records from multiple hospitals to train cancer " +"detection models" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 +msgid "" +"Financial information from different organizations to detect financial " +"fraud" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 +msgid "Location data from your electric car to make better range prediction" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 +msgid "End-to-end encrypted messages to train better auto-complete models" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 +msgid "" +"The popularity of privacy-enhancing systems like the `Brave " +"`__ browser or the `Signal `__ " +"messenger shows that users care about privacy. In fact, they choose the " +"privacy-enhancing version over other alternatives, if such an alternative" +" exists. But what can we do to apply machine learning and data science to" +" these cases to utilize private data? After all, these are all areas that" +" would benefit significantly from recent advances in AI." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 +msgid "Federated learning" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 +msgid "" +"Federated learning simply reverses this approach. It enables machine " +"learning on distributed data by moving the training to the data, instead " +"of moving the data to the training. Here's the single-sentence " +"explanation:" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 +msgid "Central machine learning: move the data to the computation" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 +msgid "Federated (machine) learning: move the computation to the data" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 +msgid "" +"By doing so, it enables us to use machine learning (and other data " +"science approaches) in areas where it wasn't possible before. We can now " +"train excellent medical AI models by enabling different hospitals to work" +" together. We can solve financial fraud by training AI models on the data" +" of different financial institutions. We can build novel privacy-" +"enhancing applications (such as secure messaging) that have better built-" +"in AI than their non-privacy-enhancing alternatives. And those are just a" +" few of the examples that come to mind. As we deploy federated learning, " +"we discover more and more areas that can suddenly be reinvented because " +"they now have access to vast amounts of previously inaccessible data." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 +msgid "" +"So how does federated learning work, exactly? Let's start with an " +"intuitive explanation." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:199 +msgid "Federated learning in five steps" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:202 +msgid "Step 0: Initialize global model" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:204 +msgid "" +"We start by initializing the model on the server. This is exactly the " +"same in classic centralized learning: we initialize the model parameters," +" either randomly or from a previously saved checkpoint." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 +msgid "|5b1408eec0d746cdb91162a9107b6089|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 +msgid "Initialize global model" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:217 +msgid "" +"Step 1: Send model to a number of connected organizations/devices (client" +" nodes)" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 +msgid "" +"Next, we send the parameters of the global model to the connected client " +"nodes (think: edge devices like smartphones or servers belonging to " +"organizations). This is to ensure that each participating node starts " +"their local training using the same model parameters. We often use only a" +" few of the connected nodes instead of all nodes. The reason for this is " +"that selecting more and more client nodes has diminishing returns." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 +msgid "|aef19f4b122c4e8d9f4c57f99bcd5dd2|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 +msgid "Send global model" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:232 +msgid "" +"Step 2: Train model locally on the data of each organization/device " +"(client node)" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:234 +msgid "" +"Now that all (selected) client nodes have the latest version of the " +"global model parameters, they start the local training. They use their " +"own local dataset to train their own local model. They don't train the " +"model until full convergence, but they only train for a little while. " +"This could be as little as one epoch on the local data, or even just a " +"few steps (mini-batches)." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 +msgid "|2881a86d8fc54ba29d96b29fc2819f4a|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 +msgid "Train on local data" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:247 +msgid "Step 3: Return model updates back to the server" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:249 +msgid "" +"After local training, each client node has a slightly different version " +"of the model parameters they originally received. The parameters are all " +"different because each client node has different examples in its local " +"dataset. The client nodes then send those model updates back to the " +"server. The model updates they send can either be the full model " +"parameters or just the gradients that were accumulated during local " +"training." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 +msgid "|ec1fe880237247e0975f52766775ab84|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 +msgid "Send model updates" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:262 +msgid "Step 4: Aggregate model updates into a new global model" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:264 +msgid "" +"The server receives model updates from the selected client nodes. If it " +"selected 100 client nodes, it now has 100 slightly different versions of " +"the original global model, each trained on the local data of one client. " +"But didn't we want to have one model that contains the learnings from the" +" data of all 100 client nodes?" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 +msgid "" +"In order to get one single model, we have to combine all the model " +"updates we received from the client nodes. This process is called " +"*aggregation*, and there are many different ways to do it. The most basic" +" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " +"`__), often abbreviated as *FedAvg*. " +"*FedAvg* takes the 100 model updates and, as the name suggests, averages " +"them. To be more precise, it takes the *weighted average* of the model " +"updates, weighted by the number of examples each client used for " +"training. The weighting is important to make sure that each data example " +"has the same \"influence\" on the resulting global model. If one client " +"has 10 examples, and another client has 100 examples, then - without " +"weighting - each of the 10 examples would influence the global model ten " +"times as much as each of the 100 examples." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 +msgid "|9fdf048ed58d4467b2718cdf4aaf1ec3|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 +msgid "Aggregate model updates" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:280 +msgid "Step 5: Repeat steps 1 to 4 until the model converges" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:282 +msgid "" +"Steps 1 to 4 are what we call a single round of federated learning. The " +"global model parameters get sent to the participating client nodes (step " +"1), the client nodes train on their local data (step 2), they send their " +"updated models to the server (step 3), and the server then aggregates the" +" model updates to get a new version of the global model (step 4)." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:284 +msgid "" +"During a single round, each client node that participates in that " +"iteration only trains for a little while. This means that after the " +"aggregation step (step 4), we have a model that has been trained on all " +"the data of all participating client nodes, but only for a little while. " +"We then have to repeat this training process over and over again to " +"eventually arrive at a fully trained model that performs well across the " +"data of all client nodes." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:289 +msgid "" +"Congratulations, you now understand the basics of federated learning. " +"There's a lot more to discuss, of course, but that was federated learning" +" in a nutshell. In later parts of this tutorial, we will go into more " +"detail. Interesting questions include: How can we select the best client " +"nodes that should participate in the next round? What's the best way to " +"aggregate model updates? How can we handle failing client nodes " +"(stragglers)?" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:294 +msgid "" +"Just like we can train a model on the decentralized data of different " +"client nodes, we can also evaluate the model on that data to receive " +"valuable metrics. This is called federated evaluation, sometimes " +"abbreviated as FE. In fact, federated evaluation is an integral part of " +"most federated learning systems." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 +msgid "Federated analytics" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 +msgid "" +"In many cases, machine learning isn't necessary to derive value from " +"data. Data analysis can yield valuable insights, but again, there's often" +" not enough data to get a clear answer. What's the average age at which " +"people develop a certain type of health condition? Federated analytics " +"enables such queries over multiple client nodes. It is usually used in " +"conjunction with other privacy-enhancing technologies like secure " +"aggregation to prevent the server from seeing the results submitted by " +"individual client nodes." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 +msgid "" +"Differential privacy (DP) is often mentioned in the context of Federated " +"Learning. It is a privacy-preserving method used when analyzing and " +"sharing statistical data, ensuring the privacy of individual " +"participants. DP achieves this by adding statistical noise to the model " +"updates, ensuring any individual participants’ information cannot be " +"distinguished or re-identified. This technique can be considered an " +"optimization that provides a quantifiable privacy protection measure." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:326 +msgid "Flower" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 +msgid "" +"Federated learning, federated evaluation, and federated analytics require" +" infrastructure to move machine learning models back and forth, train and" +" evaluate them on local data, and then aggregate the updated models. " +"Flower provides the infrastructure to do exactly that in an easy, " +"scalable, and secure way. In short, Flower presents a unified approach to" +" federated learning, analytics, and evaluation. It allows the user to " +"federate any workload, any ML framework, and any programming language." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 +msgid "|ff726bc5505e432388ee2fdd6ef420b9|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 +msgid "" +"Flower federated learning server and client nodes (car, scooter, personal" +" computer, roomba, and phone)" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 +msgid "" +"Congratulations, you just learned the basics of federated learning and " +"how it relates to the classic (centralized) machine learning!" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 +msgid "" +"In the next part of this tutorial, we are going to build a first " +"federated learning system with Flower." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 +msgid "" +"The `Flower Federated Learning Tutorial - Part 1 " +"`__ shows how to build a simple federated learning system " +"with PyTorch and Flower." +msgstr "" + +#~ msgid "" +#~ "Configuring and setting up the " +#~ ":code:`Dockerfile` as well the configuration" +#~ " for the devcontainer can be a " +#~ "bit more involved. The good thing " +#~ "is you want have to do it. " +#~ "Usually it should be enough to " +#~ "install Docker on your system and " +#~ "ensure its available on your command " +#~ "line. Additionally, install the `VSCode " +#~ "Containers Extension `_." +#~ msgstr "" + +#~ msgid "" +#~ "``flwr = { path = " +#~ "\"../../dist/flwr-1.0.0-py3-none-any.whl\" }`` " +#~ "(without extras)" +#~ msgstr "" + +#~ msgid "" +#~ "``flwr = { path = " +#~ "\"../../dist/flwr-1.0.0-py3-none-any.whl\", extras =" +#~ " [\"simulation\"] }`` (with extras)" +#~ msgstr "" + +#~ msgid "Upload the whl (e.g., ``flwr-1.7.0-py3-none-any.whl``)" +#~ msgstr "" + +#~ msgid "" +#~ "Change ``!pip install -q 'flwr[simulation]'" +#~ " torch torchvision matplotlib`` to ``!pip" +#~ " install -q 'flwr-1.7.0-py3-none-" +#~ "any.whl[simulation]' torch torchvision matplotlib``" +#~ msgstr "" + +#~ msgid "Before the release" +#~ msgstr "" + +#~ msgid "" +#~ "Update the changelog (``changelog.md``) with" +#~ " all relevant changes that happened " +#~ "after the last release. If the " +#~ "last release was tagged ``v1.2.0``, you" +#~ " can use the following URL to " +#~ "see all commits that got merged " +#~ "into ``main`` since then:" +#~ msgstr "" + +#~ msgid "" +#~ "`GitHub: Compare v1.2.0...main " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "Thank the authors who contributed since" +#~ " the last release. This can be " +#~ "done by running the ``./dev/add-" +#~ "shortlog.sh`` convenience script (it can " +#~ "be ran multiple times and will " +#~ "update the names in the list if" +#~ " new contributors were added in the" +#~ " meantime)." +#~ msgstr "" + +#~ msgid "" +#~ "Update the ``changelog.md`` section header " +#~ "``Unreleased`` to contain the version " +#~ "number and date for the release " +#~ "you are building. Create a pull " +#~ "request with the change." +#~ msgstr "" + +#~ msgid "" +#~ "Tag the release commit with the " +#~ "version number as soon as the PR" +#~ " is merged: ``git tag v0.12.3``, then" +#~ " ``git push --tags``. This will " +#~ "create a draft release on GitHub " +#~ "containing the correct artifacts and the" +#~ " relevant part of the changelog." +#~ msgstr "" + +#~ msgid "" +#~ "Note that, in order to build the" +#~ " documentation locally (with ``poetry run" +#~ " make html``, like described below), " +#~ "`Pandoc _` needs " +#~ "to be installed on the system." +#~ msgstr "" + +#~ msgid "" +#~ "If you're familiar with how contributing" +#~ " on GitHub works, you can directly" +#~ " checkout our `getting started guide " +#~ "for contributors `_ and examples " +#~ "of `good first contributions " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "This will create a `flower/` (or " +#~ "the name of your fork if you " +#~ "renamed it) folder in the current " +#~ "working directory." +#~ msgstr "" + +#~ msgid "Otherwise you can always find this option in the `Branches` page." +#~ msgstr "" + +#~ msgid "" +#~ "Once you click the `Compare & pull" +#~ " request` button, you should see " +#~ "something similar to this:" +#~ msgstr "" + +#~ msgid "Find the source file in `doc/source`" +#~ msgstr "" + +#~ msgid "" +#~ "Make the change in the `.rst` file" +#~ " (beware, the dashes under the title" +#~ " should be the same length as " +#~ "the title itself)" +#~ msgstr "" + +#~ msgid "Change the file name to `save-progress.rst`" +#~ msgstr "" + +#~ msgid "Add a redirect rule to `doc/source/conf.py`" +#~ msgstr "" + +#~ msgid "" +#~ "This will cause a redirect from " +#~ "`saving-progress.html` to `save-progress.html`," +#~ " old links will continue to work." +#~ msgstr "" + +#~ msgid "" +#~ "For the lateral navigation bar to " +#~ "work properly, it is very important " +#~ "to update the `index.rst` file as " +#~ "well. This is where we define the" +#~ " whole arborescence of the navbar." +#~ msgstr "" + +#~ msgid "Find and modify the file name in `index.rst`" +#~ msgstr "" + +#~ msgid "Add CI job to deploy the staging system when the `main` branch changes" +#~ msgstr "" + +#~ msgid "`Python 3.7 `_ or above" +#~ msgstr "" + +#~ msgid "" +#~ "First, clone the `Flower repository " +#~ "`_ from GitHub::" +#~ msgstr "" + +#~ msgid "" +#~ "Second, create a virtual environment " +#~ "(and activate it). If you chose to" +#~ " use :code:`pyenv` (with the :code" +#~ ":`pyenv-virtualenv` plugin) and already " +#~ "have it installed , you can use" +#~ " the following convenience script (by " +#~ "default it will use :code:`Python " +#~ "3.8.17`, but you can change it by" +#~ " providing a specific :code:``)::" +#~ msgstr "" + +#~ msgid "" +#~ "If you don't have :code:`pyenv` " +#~ "installed, you can use the following " +#~ "script that will install pyenv, set " +#~ "it up and create the virtual " +#~ "environment (with :code:`Python 3.8.17` by " +#~ "default)::" +#~ msgstr "" + +#~ msgid "" +#~ "Third, install the Flower package in " +#~ "development mode (think :code:`pip install " +#~ "-e`) along with all necessary " +#~ "dependencies::" +#~ msgstr "" + +#~ msgid "" +#~ "Developers could run the full set " +#~ "of Github Actions workflows under their" +#~ " local environment by using `Act " +#~ "_`. Please refer to" +#~ " the installation instructions under the" +#~ " linked repository and run the next" +#~ " command under Flower main cloned " +#~ "repository folder::" +#~ msgstr "" + +#~ msgid "" +#~ "Please note that these components are" +#~ " still experimental, the correct " +#~ "configuration of DP for a specific " +#~ "task is still an unsolved problem." +#~ msgstr "" + +#~ msgid "" +#~ "The distribution of the update norm " +#~ "has been shown to vary from " +#~ "task-to-task and to evolve as " +#~ "training progresses. Therefore, we use " +#~ "an adaptive approach [andrew]_ that " +#~ "continuously adjusts the clipping threshold" +#~ " to track a prespecified quantile of" +#~ " the update norm distribution." +#~ msgstr "" + +#~ msgid "" +#~ "We make (and attempt to enforce) a" +#~ " number of assumptions that must be" +#~ " satisfied to ensure that the " +#~ "training process actually realises the " +#~ ":math:`(\\epsilon, \\delta)` guarantees the " +#~ "user has in mind when configuring " +#~ "the setup." +#~ msgstr "" + +#~ msgid "" +#~ "The first two are useful for " +#~ "eliminating a multitude of complications " +#~ "associated with calibrating the noise to" +#~ " the clipping threshold while the " +#~ "third one is required to comply " +#~ "with the assumptions of the privacy " +#~ "analysis." +#~ msgstr "" + +#~ msgid "" +#~ "The first version of our solution " +#~ "was to define a decorator whose " +#~ "constructor accepted, among other things, " +#~ "a boolean valued variable indicating " +#~ "whether adaptive clipping was to be " +#~ "enabled or not. We quickly realized " +#~ "that this would clutter its " +#~ ":code:`__init__()` function with variables " +#~ "corresponding to hyperparameters of adaptive" +#~ " clipping that would remain unused " +#~ "when it was disabled. A cleaner " +#~ "implementation could be achieved by " +#~ "splitting the functionality into two " +#~ "decorators, :code:`DPFedAvgFixed` and " +#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" +#~ " classing the former. The constructors " +#~ "for both classes accept a boolean " +#~ "parameter :code:`server_side_noising`, which, as " +#~ "the name suggests, determines where " +#~ "noising is to be performed." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`aggregate_fit()`: We check whether any" +#~ " of the sampled clients dropped out" +#~ " or failed to upload an update " +#~ "before the round timed out. In " +#~ "that case, we need to abort the" +#~ " current round, discarding any successful" +#~ " updates that were received, and move" +#~ " on to the next one. On the " +#~ "other hand, if all clients responded " +#~ "successfully, we must force the " +#~ "averaging of the updates to happen " +#~ "in an unweighted manner by intercepting" +#~ " the :code:`parameters` field of " +#~ ":code:`FitRes` for each received update " +#~ "and setting it to 1. Furthermore, " +#~ "if :code:`server_side_noising=true`, each update " +#~ "is perturbed with an amount of " +#~ "noise equal to what it would have" +#~ " been subjected to had client-side" +#~ " noising being enabled. This entails " +#~ "*pre*-processing of the arguments to " +#~ "this method before passing them on " +#~ "to the wrappee's implementation of " +#~ ":code:`aggregate_fit()`." +#~ msgstr "" + +#~ msgid "" +#~ "McMahan, H. Brendan, et al. \"Learning" +#~ " differentially private recurrent language " +#~ "models.\" arXiv preprint arXiv:1710.06963 " +#~ "(2017)." +#~ msgstr "" + +#~ msgid "" +#~ "Andrew, Galen, et al. \"Differentially " +#~ "private learning with adaptive clipping.\" " +#~ "Advances in Neural Information Processing " +#~ "Systems 34 (2021): 17455-17466." +#~ msgstr "" + +#~ msgid "" +#~ "The following command can be used " +#~ "to verfiy if Flower was successfully " +#~ "installed. If everything worked, it " +#~ "should print the version of Flower " +#~ "to the command line::" +#~ msgstr "" + +#~ msgid "flwr (Python API reference)" +#~ msgstr "" + +#~ msgid "start_client" +#~ msgstr "" + +#~ msgid "start_numpy_client" +#~ msgstr "" + +#~ msgid "start_simulation" +#~ msgstr "" + +#~ msgid "server.start_server" +#~ msgstr "" + +#~ msgid "server.strategy" +#~ msgstr "" + +#~ msgid "server.strategy.Strategy" +#~ msgstr "" + +#~ msgid "server.strategy.FedAvg" +#~ msgstr "" + +#~ msgid "server.strategy.FedAvgM" +#~ msgstr "" + +#~ msgid "server.strategy.FedMedian" +#~ msgstr "" + +#~ msgid "server.strategy.QFedAvg" +#~ msgstr "" + +#~ msgid "server.strategy.FaultTolerantFedAvg" +#~ msgstr "" + +#~ msgid "server.strategy.FedOpt" +#~ msgstr "" + +#~ msgid "server.strategy.FedProx" +#~ msgstr "" + +#~ msgid "server.strategy.FedAdagrad" +#~ msgstr "" + +#~ msgid "server.strategy.FedAdam" +#~ msgstr "" + +#~ msgid "server.strategy.FedYogi" +#~ msgstr "" + +#~ msgid "server.strategy.FedTrimmedAvg" +#~ msgstr "" + +#~ msgid "server.strategy.Krum" +#~ msgstr "" + +#~ msgid "server.strategy.FedXgbNnAvg" +#~ msgstr "" + +#~ msgid "server.strategy.DPFedAvgAdaptive" +#~ msgstr "" + +#~ msgid "server.strategy.DPFedAvgFixed" +#~ msgstr "" + +#~ msgid "" +#~ "**Fix the incorrect return types of " +#~ "Strategy** " +#~ "([#2432](https://github.com/adap/flower/pull/2432/files))" +#~ msgstr "" + +#~ msgid "" +#~ "The types of the return values in" +#~ " the docstrings in two methods " +#~ "(`aggregate_fit` and `aggregate_evaluate`) now " +#~ "match the hint types in the code." +#~ msgstr "" + +#~ msgid "" +#~ "Using the `client_fn`, Flower clients " +#~ "can interchangeably run as standalone " +#~ "processes (i.e. via `start_client`) or " +#~ "in simulation (i.e. via `start_simulation`)" +#~ " without requiring changes to how the" +#~ " client class is defined and " +#~ "instantiated. Calling `start_numpy_client` is " +#~ "now deprecated." +#~ msgstr "" + +#~ msgid "" +#~ "**Update Flower Examples** " +#~ "([#2384](https://github.com/adap/flower/pull/2384)), " +#~ "([#2425](https://github.com/adap/flower/pull/2425))" +#~ msgstr "" + +#~ msgid "" +#~ "**General updates to baselines** " +#~ "([#2301](https://github.com/adap/flower/pull/2301), " +#~ "[#2305](https://github.com/adap/flower/pull/2305), " +#~ "[#2307](https://github.com/adap/flower/pull/2307), " +#~ "[#2327](https://github.com/adap/flower/pull/2327), " +#~ "[#2435](https://github.com/adap/flower/pull/2435))" +#~ msgstr "" + +#~ msgid "" +#~ "**General updates to the simulation " +#~ "engine** ([#2331](https://github.com/adap/flower/pull/2331), " +#~ "[#2447](https://github.com/adap/flower/pull/2447), " +#~ "[#2448](https://github.com/adap/flower/pull/2448))" +#~ msgstr "" + +#~ msgid "" +#~ "**General improvements** " +#~ "([#2309](https://github.com/adap/flower/pull/2309), " +#~ "[#2310](https://github.com/adap/flower/pull/2310), " +#~ "[2313](https://github.com/adap/flower/pull/2313), " +#~ "[#2316](https://github.com/adap/flower/pull/2316), " +#~ "[2317](https://github.com/adap/flower/pull/2317),[#2349](https://github.com/adap/flower/pull/2349)," +#~ " [#2360](https://github.com/adap/flower/pull/2360), " +#~ "[#2402](https://github.com/adap/flower/pull/2402), " +#~ "[#2446](https://github.com/adap/flower/pull/2446))" +#~ msgstr "" + +#~ msgid "" +#~ "`flower-superlink --driver-api-address " +#~ "\"0.0.0.0:8081\" --fleet-api-address " +#~ "\"0.0.0.0:8086\"`" +#~ msgstr "" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. The string " +#~ ":code:`\"0.0.0.0:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" +#~ " we run a truly federated workload" +#~ " with the server and clients running" +#~ " on different machines, all that " +#~ "needs to change is the " +#~ ":code:`server_address` we pass to the " +#~ "client." +#~ msgstr "" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." +#~ msgstr "" + +#~ msgid "" +#~ "Let's build a horizontal federated " +#~ "learning system using XGBoost and " +#~ "Flower!" +#~ msgstr "" + +#~ msgid "" +#~ "Please refer to the `full code " +#~ "example `_ to learn " +#~ "more." +#~ msgstr "" + +#~ msgid "" +#~ "In this notebook, we'll build a " +#~ "federated learning system using Flower " +#~ "and PyTorch. In part 1, we use " +#~ "PyTorch for the model training pipeline" +#~ " and data loading. In part 2, " +#~ "we continue to federate the PyTorch-" +#~ "based pipeline using Flower." +#~ msgstr "" + +#~ msgid "" +#~ "Next, we install the necessary packages" +#~ " for PyTorch (``torch`` and " +#~ "``torchvision``) and Flower (``flwr``):" +#~ msgstr "" + +#~ msgid "" +#~ "Federated learning can be applied to " +#~ "many different types of tasks across " +#~ "different domains. In this tutorial, we" +#~ " introduce federated learning by training" +#~ " a simple convolutional neural network " +#~ "(CNN) on the popular CIFAR-10 dataset." +#~ " CIFAR-10 can be used to train " +#~ "image classifiers that distinguish between " +#~ "images from ten different classes:" +#~ msgstr "" + +#~ msgid "" +#~ "Each organization will act as a " +#~ "client in the federated learning system." +#~ " So having ten organizations participate" +#~ " in a federation means having ten " +#~ "clients connected to the federated " +#~ "learning server:" +#~ msgstr "" + +#~ msgid "" +#~ "Let's now load the CIFAR-10 training " +#~ "and test set, partition them into " +#~ "ten smaller datasets (each split into" +#~ " training and validation set), and " +#~ "wrap the resulting partitions by " +#~ "creating a PyTorch ``DataLoader`` for " +#~ "each of them:" +#~ msgstr "" + +#~ msgid "|ed6498a023f2477a9ccd57ee4514bda4|" +#~ msgstr "" + +#~ msgid "|5a4f742489ac4f819afefdd4dc9ab272|" +#~ msgstr "" + +#~ msgid "|3331c80cd05045f6a56524d8e3e76d0c|" +#~ msgstr "" + +#~ msgid "|4987b26884ec4b2c8f06c1264bcebe60|" +#~ msgstr "" + +#~ msgid "|ec8ae2d778aa493a986eb2fa29c220e5|" +#~ msgstr "" + +#~ msgid "|b8949d0669fe4f8eadc9a4932f4e9c57|" +#~ msgstr "" + +#~ msgid "|94ff30bdcd09443e8488b5f29932a541|" +#~ msgstr "" + +#~ msgid "|48dccf1d6d0544bba8917d2783a47719|" +#~ msgstr "" + +#~ msgid "|0366618db96b4f329f0d4372d1150fde|" +#~ msgstr "" + +#~ msgid "|ac80eddc76e6478081b1ca35eed029c0|" +#~ msgstr "" + +#~ msgid "|1ac94140c317450e89678db133c7f3c2|" +#~ msgstr "" + +#~ msgid "|f8850c6e96fc4430b55e53bba237a7c0|" +#~ msgstr "" + +#~ msgid "|4a368fdd3fc34adabd20a46752a68582|" +#~ msgstr "" + +#~ msgid "|40f69c17bb444652a7c8dfe577cd120e|" +#~ msgstr "" + +#~ msgid "" +#~ "Please follow the first section on " +#~ "`Run Flower using Docker " +#~ "`_ which covers this" +#~ " step in more detail." +#~ msgstr "" + +#~ msgid "" +#~ "Since `Flower 1.5 `_ we have " +#~ "introduced translations to our doc " +#~ "pages, but, as you might have " +#~ "noticed, the translations are often " +#~ "imperfect. If you speak languages other" +#~ " than English, you might be able " +#~ "to help us in our effort to " +#~ "make Federated Learning accessible to as" +#~ " many people as possible by " +#~ "contributing to those translations! This " +#~ "might also be a great opportunity " +#~ "for those wanting to become open " +#~ "source contributors with little prerequistes." +#~ msgstr "" + +#~ msgid "" +#~ "You input your translation in the " +#~ "textbox at the top and then, once" +#~ " you are happy with it, you " +#~ "either press ``Save and continue`` (to" +#~ " save the translation and go to " +#~ "the next untranslated string), ``Save " +#~ "and stay`` (to save the translation " +#~ "and stay on the same page), " +#~ "``Suggest`` (to add your translation to" +#~ " suggestions for other users to " +#~ "view), or ``Skip`` (to go to the" +#~ " next untranslated string without saving" +#~ " anything)." +#~ msgstr "" + +#~ msgid "" +#~ "The first thing we need to do " +#~ "is to define a message type for" +#~ " the RPC system in :code:`transport.proto`." +#~ " Note that we have to do it " +#~ "for both the request and response " +#~ "messages. For more details on the " +#~ "syntax of proto3, please see the " +#~ "`official documentation `_." +#~ msgstr "" + +#~ msgid "" +#~ "Source: `Official VSCode documentation " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "`Developing inside a Container " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "`Remote development in Containers " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "If you are not familiar with " +#~ "Flower Baselines, you should probably " +#~ "check-out our `contributing guide for " +#~ "baselines `_." +#~ msgstr "" + +#~ msgid "" +#~ "You should then check out the open" +#~ " `issues " +#~ "`_" +#~ " for baseline requests. If you find" +#~ " a baseline that you'd like to " +#~ "work on and that has no assignes," +#~ " feel free to assign it to " +#~ "yourself and start working on it!" +#~ msgstr "" + +#~ msgid "" +#~ "If you're familiar with how contributing" +#~ " on GitHub works, you can directly" +#~ " checkout our `getting started guide " +#~ "for contributors `_." +#~ msgstr "" + +#~ msgid "" +#~ "Git is a distributed version control " +#~ "tool. This allows for an entire " +#~ "codebase's history to be stored and " +#~ "every developer's machine. It is a " +#~ "software that will need to be " +#~ "installed on your local machine, you " +#~ "can follow this `guide " +#~ "`_ to set it up." +#~ msgstr "" + +#~ msgid "" +#~ "A fork is a personal copy of " +#~ "a GitHub repository. To create one " +#~ "for Flower, you must navigate to " +#~ "https://github.com/adap/flower (while connected to" +#~ " your GitHub account) and click the" +#~ " ``Fork`` button situated on the top" +#~ " right of the page." +#~ msgstr "" + +#~ msgid "" +#~ "Now we will add an upstream " +#~ "address to our repository. Still in " +#~ "the same directroy, we must run " +#~ "the following command:" +#~ msgstr "" + +#~ msgid "" +#~ "This can be achieved by following " +#~ "this `getting started guide for " +#~ "contributors`_ (note that you won't need" +#~ " to clone the repository). Once you" +#~ " are able to write code and " +#~ "test it, you can finally start " +#~ "making changes!" +#~ msgstr "" + +#~ msgid "" +#~ "For our documentation, we’ve started to" +#~ " use the `Diàtaxis framework " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "Our “How to” guides should have " +#~ "titles that continue the sencence “How" +#~ " to …”, for example, “How to " +#~ "upgrade to Flower 1.0”." +#~ msgstr "" + +#~ msgid "" +#~ "This issue is about changing the " +#~ "title of a doc from present " +#~ "continious to present simple." +#~ msgstr "" + +#~ msgid "" +#~ "Let's take the example of “Saving " +#~ "Progress” which we changed to “Save " +#~ "Progress”. Does this pass our check?" +#~ msgstr "" + +#~ msgid "Before: ”How to saving progress” ❌" +#~ msgstr "" + +#~ msgid "After: ”How to save progress” ✅" +#~ msgstr "" + +#~ msgid "" +#~ "This is a tiny change, but it’ll" +#~ " allow us to test your end-" +#~ "to-end setup. After cloning and " +#~ "setting up the Flower repo, here’s " +#~ "what you should do:" +#~ msgstr "" + +#~ msgid "" +#~ "Build the docs and check the " +#~ "result: ``_" +#~ msgstr "" + +#~ msgid "Here’s how to change the file name:" +#~ msgstr "" + +#~ msgid "" +#~ "Commit the changes (commit messages are" +#~ " always imperative: “Do something”, in " +#~ "this case “Change …”)" +#~ msgstr "" + +#~ msgid "" +#~ "`Good first contributions " +#~ "`_, where you should" +#~ " particularly look into the " +#~ ":code:`baselines` contributions." +#~ msgstr "" + +#~ msgid "" +#~ "If the section is completely empty " +#~ "(without any token) or non-existant, " +#~ "the changelog will just contain the " +#~ "title of the PR for the changelog" +#~ " entry, without any description." +#~ msgstr "" + +#~ msgid "" +#~ "Flower uses :code:`pyproject.toml` to manage" +#~ " dependencies and configure development " +#~ "tools (the ones which support it). " +#~ "Poetry is a build tool which " +#~ "supports `PEP 517 " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "This tutorial will show you how to" +#~ " use Flower to build a federated " +#~ "version of an existing machine learning" +#~ " workload with `FedBN `_, a federated training strategy" +#~ " designed for non-iid data. We " +#~ "are using PyTorch to train a " +#~ "Convolutional Neural Network(with Batch " +#~ "Normalization layers) on the CIFAR-10 " +#~ "dataset. When applying FedBN, only few" +#~ " changes needed compared to `Example: " +#~ "PyTorch - From Centralized To Federated" +#~ " `_." +#~ msgstr "" + +#~ msgid "" +#~ "All files are revised based on " +#~ "`Example: PyTorch - From Centralized To" +#~ " Federated `_. The " +#~ "only thing to do is modifying the" +#~ " file called :code:`cifar.py`, revised part" +#~ " is shown below:" +#~ msgstr "" + +#~ msgid "" +#~ "So far this should all look fairly" +#~ " familiar if you've used PyTorch " +#~ "before. Let's take the next step " +#~ "and use what we've built to create" +#~ " a federated learning system within " +#~ "FedBN, the sytstem consists of one " +#~ "server and two clients." +#~ msgstr "" + +#~ msgid "" +#~ "If you have read `Example: PyTorch " +#~ "- From Centralized To Federated " +#~ "`_, the following" +#~ " parts are easy to follow, onyl " +#~ ":code:`get_parameters` and :code:`set_parameters` " +#~ "function in :code:`client.py` needed to " +#~ "revise. If not, please read the " +#~ "`Example: PyTorch - From Centralized To" +#~ " Federated `_. first." +#~ msgstr "" + +#~ msgid "Example: Walk-Through PyTorch & MNIST" +#~ msgstr "" + +#~ msgid "" +#~ "In this tutorial we will learn, " +#~ "how to train a Convolutional Neural " +#~ "Network on MNIST using Flower and " +#~ "PyTorch." +#~ msgstr "" + +#~ msgid "" +#~ "Since we want to use PyTorch to" +#~ " solve a computer vision task, let's" +#~ " go ahead an install PyTorch and " +#~ "the **torchvision** library:" +#~ msgstr "" + +#~ msgid "Ready... Set... Train!" +#~ msgstr "" + +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on PyTorch's `Basic MNIST Example" +#~ " `_. " +#~ "This will allow you see how easy" +#~ " it is to wrap your code with" +#~ " Flower and begin training in a " +#~ "federated way. We provide you with " +#~ "two helper scripts, namely *run-" +#~ "server.sh*, and *run-clients.sh*. Don't " +#~ "be afraid to look inside, they are" +#~ " simple enough =)." +#~ msgstr "" + +#~ msgid "" +#~ "Go ahead and launch on a terminal" +#~ " the *run-server.sh* script first as" +#~ " follows:" +#~ msgstr "" + +#~ msgid "Now that the server is up and running, go ahead and launch the clients." +#~ msgstr "" + +#~ msgid "" +#~ "Et voilà! You should be seeing the" +#~ " training procedure and, after a few" +#~ " iterations, the test accuracy for " +#~ "each client." +#~ msgstr "" + +#~ msgid "Now, let's see what is really happening inside." +#~ msgstr "" + +#~ msgid "" +#~ "Inside the server helper script *run-" +#~ "server.sh* you will find the following" +#~ " code that basically runs the " +#~ ":code:`server.py`" +#~ msgstr "" + +#~ msgid "" +#~ "We can go a bit deeper and " +#~ "see that :code:`server.py` simply launches " +#~ "a server that will coordinate three " +#~ "rounds of training. Flower Servers are" +#~ " very customizable, but for simple " +#~ "workloads, we can start a server " +#~ "using the :ref:`start_server ` function and leave " +#~ "all the configuration possibilities at " +#~ "their default values, as seen below." +#~ msgstr "" + +#~ msgid "" +#~ "Next, let's take a look at the " +#~ "*run-clients.sh* file. You will see " +#~ "that it contains the main loop " +#~ "that starts a set of *clients*." +#~ msgstr "" + +#~ msgid "" +#~ "**cid**: is the client ID. It is" +#~ " an integer that uniquely identifies " +#~ "client identifier." +#~ msgstr "" + +#~ msgid "**sever_address**: String that identifies IP and port of the server." +#~ msgstr "" + +#~ msgid "" +#~ "**nb_clients**: This defines the number " +#~ "of clients being created. This piece " +#~ "of information is not required by " +#~ "the client, but it helps us " +#~ "partition the original MNIST dataset to" +#~ " make sure that every client is " +#~ "working on unique subsets of both " +#~ "*training* and *test* sets." +#~ msgstr "" + +#~ msgid "" +#~ "Again, we can go deeper and look" +#~ " inside :code:`flwr_example/quickstart-" +#~ "pytorch/client.py`. After going through the" +#~ " argument parsing code at the " +#~ "beginning of our :code:`main` function, " +#~ "you will find a call to " +#~ ":code:`mnist.load_data`. This function is " +#~ "responsible for partitioning the original " +#~ "MNIST datasets (*training* and *test*) " +#~ "and returning a :code:`torch.utils.data.DataLoader`" +#~ " s for each of them. We then" +#~ " instantiate a :code:`PytorchMNISTClient` object" +#~ " with our client ID, our DataLoaders," +#~ " the number of epochs in each " +#~ "round, and which device we want to" +#~ " use for training (CPU or GPU)." +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`PytorchMNISTClient` object when " +#~ "finally passed to :code:`fl.client.start_client` " +#~ "along with the server's address as " +#~ "the training process begins." +#~ msgstr "" + +#~ msgid "A Closer Look" +#~ msgstr "" + +#~ msgid "" +#~ "Now, let's look closely into the " +#~ ":code:`PytorchMNISTClient` inside :code:`flwr_example" +#~ ".quickstart-pytorch.mnist` and see what it" +#~ " is doing:" +#~ msgstr "" + +#~ msgid "" +#~ "The first thing to notice is that" +#~ " :code:`PytorchMNISTClient` instantiates a CNN" +#~ " model inside its constructor" +#~ msgstr "" + +#~ msgid "" +#~ "The code for the CNN is available" +#~ " under :code:`quickstart-pytorch.mnist` and " +#~ "it is reproduced below. It is the" +#~ " same network found in `Basic MNIST" +#~ " Example " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "The second thing to notice is that" +#~ " :code:`PytorchMNISTClient` class inherits from" +#~ " the :code:`fl.client.Client`, and hence it" +#~ " must implement the following methods:" +#~ msgstr "" + +#~ msgid "" +#~ "When comparing the abstract class to " +#~ "its derived class :code:`PytorchMNISTClient` " +#~ "you will notice that :code:`fit` calls" +#~ " a :code:`train` function and that " +#~ ":code:`evaluate` calls a :code:`test`: " +#~ "function." +#~ msgstr "" + +#~ msgid "" +#~ "These functions can both be found " +#~ "inside the same :code:`quickstart-" +#~ "pytorch.mnist` module:" +#~ msgstr "" + +#~ msgid "" +#~ "Observe that these functions encapsulate " +#~ "regular training and test loops and " +#~ "provide :code:`fit` and :code:`evaluate` with" +#~ " final statistics for each round. You" +#~ " could substitute them with your " +#~ "custom train and test loops and " +#~ "change the network architecture, and the" +#~ " entire example would still work " +#~ "flawlessly. As a matter of fact, " +#~ "why not try and modify the code" +#~ " to an example of your liking?" +#~ msgstr "" + +#~ msgid "Give It a Try" +#~ msgstr "" + +#~ msgid "" +#~ "Looking through the quickstart code " +#~ "description above will have given a " +#~ "good understanding of how *clients* and" +#~ " *servers* work in Flower, how to " +#~ "run a simple experiment, and the " +#~ "internals of a client wrapper. Here " +#~ "are a few things you could try " +#~ "on your own and get more " +#~ "experience with Flower:" +#~ msgstr "" + +#~ msgid "" +#~ "Try and change :code:`PytorchMNISTClient` so" +#~ " it can accept different architectures." +#~ msgstr "" + +#~ msgid "" +#~ "Modify the :code:`train` function so " +#~ "that it accepts different optimizers" +#~ msgstr "" + +#~ msgid "" +#~ "Modify the :code:`test` function so that" +#~ " it proves not only the top-1 " +#~ "(regular accuracy) but also the top-5" +#~ " accuracy?" +#~ msgstr "" + +#~ msgid "" +#~ "Go larger! Try to adapt the code" +#~ " to larger images and datasets. Why" +#~ " not try training on ImageNet with" +#~ " a ResNet-50?" +#~ msgstr "" + +#~ msgid "You are ready now. Enjoy learning in a federated way!" +#~ msgstr "" + +#~ msgid "Differential privacy" +#~ msgstr "" + +#~ msgid "" +#~ "Flower provides differential privacy (DP) " +#~ "wrapper classes for the easy integration" +#~ " of the central DP guarantees " +#~ "provided by DP-FedAvg into training " +#~ "pipelines defined in any of the " +#~ "various ML frameworks that Flower is " +#~ "compatible with." +#~ msgstr "" + +#~ msgid "" +#~ "Please note that these components are" +#~ " still experimental; the correct " +#~ "configuration of DP for a specific " +#~ "task is still an unsolved problem." +#~ msgstr "" + +#~ msgid "" +#~ "The name DP-FedAvg is misleading " +#~ "since it can be applied on top " +#~ "of any FL algorithm that conforms " +#~ "to the general structure prescribed by" +#~ " the FedOpt family of algorithms." +#~ msgstr "" + +#~ msgid "DP-FedAvg" +#~ msgstr "" + +#~ msgid "" +#~ "DP-FedAvg, originally proposed by " +#~ "McMahan et al. [mcmahan]_ and extended" +#~ " by Andrew et al. [andrew]_, is " +#~ "essentially FedAvg with the following " +#~ "modifications." +#~ msgstr "" + +#~ msgid "" +#~ "**Clipping** : The influence of each " +#~ "client's update is bounded by clipping" +#~ " it. This is achieved by enforcing" +#~ " a cap on the L2 norm of " +#~ "the update, scaling it down if " +#~ "needed." +#~ msgstr "" + +#~ msgid "" +#~ "**Noising** : Gaussian noise, calibrated " +#~ "to the clipping threshold, is added " +#~ "to the average computed at the " +#~ "server." +#~ msgstr "" + +#~ msgid "" +#~ "The distribution of the update norm " +#~ "has been shown to vary from " +#~ "task-to-task and to evolve as " +#~ "training progresses. This variability is " +#~ "crucial in understanding its impact on" +#~ " differential privacy guarantees, emphasizing " +#~ "the need for an adaptive approach " +#~ "[andrew]_ that continuously adjusts the " +#~ "clipping threshold to track a " +#~ "prespecified quantile of the update norm" +#~ " distribution." +#~ msgstr "" + +#~ msgid "Simplifying Assumptions" +#~ msgstr "" + +#~ msgid "" +#~ "We make (and attempt to enforce) a" +#~ " number of assumptions that must be" +#~ " satisfied to ensure that the " +#~ "training process actually realizes the " +#~ ":math:`(\\epsilon, \\delta)` guarantees the " +#~ "user has in mind when configuring " +#~ "the setup." +#~ msgstr "" + +#~ msgid "" +#~ "**Fixed-size subsampling** :Fixed-size " +#~ "subsamples of the clients must be " +#~ "taken at each round, as opposed to" +#~ " variable-sized Poisson subsamples." +#~ msgstr "" + +#~ msgid "" +#~ "**Unweighted averaging** : The contributions" +#~ " from all the clients must weighted" +#~ " equally in the aggregate to " +#~ "eliminate the requirement for the server" +#~ " to know in advance the sum of" +#~ " the weights of all clients available" +#~ " for selection." +#~ msgstr "" + +#~ msgid "" +#~ "**No client failures** : The set " +#~ "of available clients must stay constant" +#~ " across all rounds of training. In" +#~ " other words, clients cannot drop out" +#~ " or fail." +#~ msgstr "" + +#~ msgid "" +#~ "The first two are useful for " +#~ "eliminating a multitude of complications " +#~ "associated with calibrating the noise to" +#~ " the clipping threshold, while the " +#~ "third one is required to comply " +#~ "with the assumptions of the privacy " +#~ "analysis." +#~ msgstr "" + +#~ msgid "" +#~ "These restrictions are in line with " +#~ "constraints imposed by Andrew et al. " +#~ "[andrew]_." +#~ msgstr "" + +#~ msgid "Customizable Responsibility for Noise injection" +#~ msgstr "" + +#~ msgid "" +#~ "In contrast to other implementations " +#~ "where the addition of noise is " +#~ "performed at the server, you can " +#~ "configure the site of noise injection" +#~ " to better match your threat model." +#~ " We provide users with the " +#~ "flexibility to set up the training " +#~ "such that each client independently adds" +#~ " a small amount of noise to the" +#~ " clipped update, with the result that" +#~ " simply aggregating the noisy updates " +#~ "is equivalent to the explicit addition" +#~ " of noise to the non-noisy " +#~ "aggregate at the server." +#~ msgstr "" + +#~ msgid "" +#~ "To be precise, if we let :math:`m`" +#~ " be the number of clients sampled " +#~ "each round and :math:`\\sigma_\\Delta` be " +#~ "the scale of the total Gaussian " +#~ "noise that needs to be added to" +#~ " the sum of the model updates, " +#~ "we can use simple maths to show" +#~ " that this is equivalent to each " +#~ "client adding noise with scale " +#~ ":math:`\\sigma_\\Delta/\\sqrt{m}`." +#~ msgstr "" + +#~ msgid "Wrapper-based approach" +#~ msgstr "" + +#~ msgid "" +#~ "Introducing DP to an existing workload" +#~ " can be thought of as adding an" +#~ " extra layer of security around it." +#~ " This inspired us to provide the " +#~ "additional server and client-side logic" +#~ " needed to make the training process" +#~ " differentially private as wrappers for " +#~ "instances of the :code:`Strategy` and " +#~ ":code:`NumPyClient` abstract classes respectively." +#~ " This wrapper-based approach has the" +#~ " advantage of being easily composable " +#~ "with other wrappers that someone might" +#~ " contribute to the Flower library in" +#~ " the future, e.g., for secure " +#~ "aggregation. Using Inheritance instead can " +#~ "be tedious because that would require" +#~ " the creation of new sub- classes " +#~ "every time a new class implementing " +#~ ":code:`Strategy` or :code:`NumPyClient` is " +#~ "defined." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 -msgid "" -"In many cases, machine learning isn't necessary to derive value from " -"data. Data analysis can yield valuable insights, but again, there's often" -" not enough data to get a clear answer. What's the average age at which " -"people develop a certain type of health condition? Federated analytics " -"enables such queries over multiple client nodes. It is usually used in " -"conjunction with other privacy-enhancing technologies like secure " -"aggregation to prevent the server from seeing the results submitted by " -"individual client nodes." -msgstr "" +#~ msgid "Server-side logic" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 -msgid "Differential Privacy" -msgstr "" +#~ msgid "" +#~ "The first version of our solution " +#~ "was to define a decorator whose " +#~ "constructor accepted, among other things, " +#~ "a boolean-valued variable indicating " +#~ "whether adaptive clipping was to be " +#~ "enabled or not. We quickly realized " +#~ "that this would clutter its " +#~ ":code:`__init__()` function with variables " +#~ "corresponding to hyperparameters of adaptive" +#~ " clipping that would remain unused " +#~ "when it was disabled. A cleaner " +#~ "implementation could be achieved by " +#~ "splitting the functionality into two " +#~ "decorators, :code:`DPFedAvgFixed` and " +#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" +#~ " classing the former. The constructors " +#~ "for both classes accept a boolean " +#~ "parameter :code:`server_side_noising`, which, as " +#~ "the name suggests, determines where " +#~ "noising is to be performed." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 -msgid "" -"Differential privacy (DP) is often mentioned in the context of Federated " -"Learning. It is a privacy-preserving method used when analyzing and " -"sharing statistical data, ensuring the privacy of individual " -"participants. DP achieves this by adding statistical noise to the model " -"updates, ensuring any individual participants’ information cannot be " -"distinguished or re-identified. This technique can be considered an " -"optimization that provides a quantifiable privacy protection measure." -msgstr "" +#~ msgid "" +#~ "The server-side capabilities required " +#~ "for the original version of DP-" +#~ "FedAvg, i.e., the one which performed" +#~ " fixed clipping, can be completely " +#~ "captured with the help of wrapper " +#~ "logic for just the following two " +#~ "methods of the :code:`Strategy` abstract " +#~ "class." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:326 -msgid "Flower" -msgstr "" +#~ msgid "" +#~ ":code:`configure_fit()` : The config " +#~ "dictionary being sent by the wrapped " +#~ ":code:`Strategy` to each client needs to" +#~ " be augmented with an additional " +#~ "value equal to the clipping threshold" +#~ " (keyed under :code:`dpfedavg_clip_norm`) and," +#~ " if :code:`server_side_noising=true`, another one" +#~ " equal to the scale of the " +#~ "Gaussian noise that needs to be " +#~ "added at the client (keyed under " +#~ ":code:`dpfedavg_noise_stddev`). This entails " +#~ "*post*-processing of the results returned " +#~ "by the wrappee's implementation of " +#~ ":code:`configure_fit()`." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 -msgid "" -"Federated learning, federated evaluation, and federated analytics require" -" infrastructure to move machine learning models back and forth, train and" -" evaluate them on local data, and then aggregate the updated models. " -"Flower provides the infrastructure to do exactly that in an easy, " -"scalable, and secure way. In short, Flower presents a unified approach to" -" federated learning, analytics, and evaluation. It allows the user to " -"federate any workload, any ML framework, and any programming language." -msgstr "" +#~ msgid "" +#~ ":code:`aggregate_fit()`: We check whether any" +#~ " of the sampled clients dropped out" +#~ " or failed to upload an update " +#~ "before the round timed out. In " +#~ "that case, we need to abort the" +#~ " current round, discarding any successful" +#~ " updates that were received, and move" +#~ " on to the next one. On the " +#~ "other hand, if all clients responded " +#~ "successfully, we must force the " +#~ "averaging of the updates to happen " +#~ "in an unweighted manner by intercepting" +#~ " the :code:`parameters` field of " +#~ ":code:`FitRes` for each received update " +#~ "and setting it to 1. Furthermore, " +#~ "if :code:`server_side_noising=true`, each update " +#~ "is perturbed with an amount of " +#~ "noise equal to what it would have" +#~ " been subjected to had client-side" +#~ " noising being enabled. This entails " +#~ "*pre*-processing of the arguments to " +#~ "this method before passing them on " +#~ "to the wrappee's implementation of " +#~ ":code:`aggregate_fit()`." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 -msgid "|08cb60859b07461588fe44e55810b050|" -msgstr "" +#~ msgid "" +#~ "We can't directly change the aggregation" +#~ " function of the wrapped strategy to" +#~ " force it to add noise to the" +#~ " aggregate, hence we simulate client-" +#~ "side noising to implement server-side" +#~ " noising." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 -msgid "" -"Flower federated learning server and client nodes (car, scooter, personal" -" computer, roomba, and phone)" -msgstr "" +#~ msgid "" +#~ "These changes have been put together " +#~ "into a class called :code:`DPFedAvgFixed`, " +#~ "whose constructor accepts the strategy " +#~ "being decorated, the clipping threshold " +#~ "and the number of clients sampled " +#~ "every round as compulsory arguments. The" +#~ " user is expected to specify the " +#~ "clipping threshold since the order of" +#~ " magnitude of the update norms is " +#~ "highly dependent on the model being " +#~ "trained and providing a default value" +#~ " would be misleading. The number of" +#~ " clients sampled at every round is" +#~ " required to calculate the amount of" +#~ " noise that must be added to " +#~ "each individual update, either by the" +#~ " server or the clients." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 -msgid "" -"Congratulations, you just learned the basics of federated learning and " -"how it relates to the classic (centralized) machine learning!" -msgstr "" +#~ msgid "" +#~ "The additional functionality required to " +#~ "facilitate adaptive clipping has been " +#~ "provided in :code:`DPFedAvgAdaptive`, a " +#~ "subclass of :code:`DPFedAvgFixed`. It " +#~ "overrides the above-mentioned methods to" +#~ " do the following." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 -msgid "" -"In the next part of this tutorial, we are going to build a first " -"federated learning system with Flower." -msgstr "" +#~ msgid "" +#~ ":code:`configure_fit()` : It intercepts the" +#~ " config dict returned by " +#~ ":code:`super.configure_fit()` to add the " +#~ "key-value pair " +#~ ":code:`dpfedavg_adaptive_clip_enabled:True` to it, " +#~ "which the client interprets as an " +#~ "instruction to include an indicator bit" +#~ " (1 if update norm <= clipping " +#~ "threshold, 0 otherwise) in the results" +#~ " returned by it." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 -msgid "" -"The `Flower Federated Learning Tutorial - Part 1 " -"`__ shows how to build a simple federated learning system " -"with PyTorch and Flower." -msgstr "" +#~ msgid "" +#~ ":code:`aggregate_fit()` : It follows a " +#~ "call to :code:`super.aggregate_fit()` with one" +#~ " to :code:`__update_clip_norm__()`, a procedure" +#~ " which adjusts the clipping threshold " +#~ "on the basis of the indicator bits" +#~ " received from the sampled clients." +#~ msgstr "" + +#~ msgid "Client-side logic" +#~ msgstr "" #~ msgid "" -#~ "Configuring and setting up the " -#~ ":code:`Dockerfile` as well the configuration" -#~ " for the devcontainer can be a " -#~ "bit more involved. The good thing " -#~ "is you want have to do it. " -#~ "Usually it should be enough to " -#~ "install Docker on your system and " -#~ "ensure its available on your command " -#~ "line. Additionally, install the `VSCode " -#~ "Containers Extension `_." +#~ "The client-side capabilities required " +#~ "can be completely captured through " +#~ "wrapper logic for just the :code:`fit()`" +#~ " method of the :code:`NumPyClient` abstract" +#~ " class. To be precise, we need " +#~ "to *post-process* the update computed" +#~ " by the wrapped client to clip " +#~ "it, if necessary, to the threshold " +#~ "value supplied by the server as " +#~ "part of the config dictionary. In " +#~ "addition to this, it may need to" +#~ " perform some extra work if either" +#~ " (or both) of the following keys " +#~ "are also present in the dict." #~ msgstr "" #~ msgid "" -#~ "``flwr = { path = " -#~ "\"../../dist/flwr-1.0.0-py3-none-any.whl\" }`` " -#~ "(without extras)" +#~ ":code:`dpfedavg_noise_stddev` : Generate and " +#~ "add the specified amount of noise " +#~ "to the clipped update." #~ msgstr "" #~ msgid "" -#~ "``flwr = { path = " -#~ "\"../../dist/flwr-1.0.0-py3-none-any.whl\", extras =" -#~ " [\"simulation\"] }`` (with extras)" +#~ ":code:`dpfedavg_adaptive_clip_enabled` : Augment the" +#~ " metrics dict in the :code:`FitRes` " +#~ "object being returned to the server " +#~ "with an indicator bit, calculated as " +#~ "described earlier." #~ msgstr "" -#~ msgid "Upload the whl (e.g., ``flwr-1.7.0-py3-none-any.whl``)" +#~ msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" #~ msgstr "" #~ msgid "" -#~ "Change ``!pip install -q 'flwr[simulation]'" -#~ " torch torchvision matplotlib`` to ``!pip" -#~ " install -q 'flwr-1.7.0-py3-none-" -#~ "any.whl[simulation]' torch torchvision matplotlib``" +#~ "Assume you have trained for :math:`n`" +#~ " rounds with sampling fraction :math:`q`" +#~ " and noise multiplier :math:`z`. In " +#~ "order to calculate the :math:`\\epsilon` " +#~ "value this would result in for a" +#~ " particular :math:`\\delta`, the following " +#~ "script may be used." #~ msgstr "" -#~ msgid "Before the release" +#~ msgid "" +#~ "McMahan et al. \"Learning Differentially " +#~ "Private Recurrent Language Models.\" " +#~ "International Conference on Learning " +#~ "Representations (ICLR), 2017." #~ msgstr "" #~ msgid "" -#~ "Update the changelog (``changelog.md``) with" -#~ " all relevant changes that happened " -#~ "after the last release. If the " -#~ "last release was tagged ``v1.2.0``, you" -#~ " can use the following URL to " -#~ "see all commits that got merged " -#~ "into ``main`` since then:" +#~ "Andrew, Galen, et al. \"Differentially " +#~ "Private Learning with Adaptive Clipping.\" " +#~ "Advances in Neural Information Processing " +#~ "Systems (NeurIPS), 2021." #~ msgstr "" #~ msgid "" -#~ "`GitHub: Compare v1.2.0...main " -#~ "`_" +#~ "This can be achieved by customizing " +#~ "an existing strategy or by `implementing" +#~ " a custom strategy from scratch " +#~ "`_. Here's a nonsensical " +#~ "example that customizes :code:`FedAvg` by " +#~ "adding a custom ``\"hello\": \"world\"`` " +#~ "configuration key/value pair to the " +#~ "config dict of a *single client* " +#~ "(only the first client in the " +#~ "list, the other clients in this " +#~ "round to not receive this \"special\"" +#~ " config value):" #~ msgstr "" #~ msgid "" -#~ "Thank the authors who contributed since" -#~ " the last release. This can be " -#~ "done by running the ``./dev/add-" -#~ "shortlog.sh`` convenience script (it can " -#~ "be ran multiple times and will " -#~ "update the names in the list if" -#~ " new contributors were added in the" -#~ " meantime)." +#~ "More sophisticated implementations can use " +#~ ":code:`configure_fit` to implement custom " +#~ "client selection logic. A client will" +#~ " only participate in a round if " +#~ "the corresponding :code:`ClientProxy` is " +#~ "included in the the list returned " +#~ "from :code:`configure_fit`." #~ msgstr "" #~ msgid "" -#~ "Update the ``changelog.md`` section header " -#~ "``Unreleased`` to contain the version " -#~ "number and date for the release " -#~ "you are building. Create a pull " -#~ "request with the change." +#~ "More sophisticated implementations can use " +#~ ":code:`configure_evaluate` to implement custom " +#~ "client selection logic. A client will" +#~ " only participate in a round if " +#~ "the corresponding :code:`ClientProxy` is " +#~ "included in the the list returned " +#~ "from :code:`configure_evaluate`." #~ msgstr "" #~ msgid "" -#~ "Tag the release commit with the " -#~ "version number as soon as the PR" -#~ " is merged: ``git tag v0.12.3``, then" -#~ " ``git push --tags``. This will " -#~ "create a draft release on GitHub " -#~ "containing the correct artifacts and the" -#~ " relevant part of the changelog." +#~ "`How to run Flower using Docker " +#~ "`_" #~ msgstr "" #~ msgid "" -#~ "Note that, in order to build the" -#~ " documentation locally (with ``poetry run" -#~ " make html``, like described below), " -#~ "`Pandoc _` needs " -#~ "to be installed on the system." +#~ "Ray Dashboard: ``_" #~ msgstr "" #~ msgid "" -#~ "If you're familiar with how contributing" -#~ " on GitHub works, you can directly" -#~ " checkout our `getting started guide " -#~ "for contributors `_ and examples " -#~ "of `good first contributions " -#~ "`_." +#~ "Ray Metrics: ``_" +#~ msgstr "" + +#~ msgid "Enjoy building more robust and flexible ``ClientApp``s with mods!" #~ msgstr "" #~ msgid "" -#~ "This will create a `flower/` (or " -#~ "the name of your fork if you " -#~ "renamed it) folder in the current " -#~ "working directory." +#~ ":py:obj:`ClientApp `\\ " +#~ "\\(client\\_fn\\[\\, mods\\]\\)" #~ msgstr "" -#~ msgid "Otherwise you can always find this option in the `Branches` page." +#~ msgid ":py:obj:`flwr.server.driver `\\" +#~ msgstr "" + +#~ msgid "Flower driver SDK." +#~ msgstr "" + +#~ msgid "driver" #~ msgstr "" #~ msgid "" -#~ "Once you click the `Compare & pull" -#~ " request` button, you should see " -#~ "something similar to this:" +#~ ":py:obj:`start_driver `\\ " +#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" #~ msgstr "" -#~ msgid "Find the source file in `doc/source`" +#~ msgid "" +#~ ":py:obj:`Driver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" #~ msgstr "" #~ msgid "" -#~ "Make the change in the `.rst` file" -#~ " (beware, the dashes under the title" -#~ " should be the same length as " -#~ "the title itself)" +#~ ":py:obj:`GrpcDriver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" #~ msgstr "" -#~ msgid "Change the file name to `save-progress.rst`" +#~ msgid "`GrpcDriver` provides access to the gRPC Driver API/service." #~ msgstr "" -#~ msgid "Add a redirect rule to `doc/source/conf.py`" +#~ msgid ":py:obj:`get_nodes `\\ \\(\\)" #~ msgstr "" #~ msgid "" -#~ "This will cause a redirect from " -#~ "`saving-progress.html` to `save-progress.html`," -#~ " old links will continue to work." +#~ ":py:obj:`pull_task_res " +#~ "`\\ \\(task\\_ids\\)" +#~ msgstr "" + +#~ msgid "Get task results." #~ msgstr "" #~ msgid "" -#~ "For the lateral navigation bar to " -#~ "work properly, it is very important " -#~ "to update the `index.rst` file as " -#~ "well. This is where we define the" -#~ " whole arborescence of the navbar." +#~ ":py:obj:`push_task_ins " +#~ "`\\ " +#~ "\\(task\\_ins\\_list\\)" #~ msgstr "" -#~ msgid "Find and modify the file name in `index.rst`" +#~ msgid "Schedule tasks." #~ msgstr "" -#~ msgid "Add CI job to deploy the staging system when the `main` branch changes" +#~ msgid "GrpcDriver" #~ msgstr "" -#~ msgid "`Python 3.7 `_ or above" +#~ msgid ":py:obj:`connect `\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Connect to the Driver API." #~ msgstr "" #~ msgid "" -#~ "First, clone the `Flower repository " -#~ "`_ from GitHub::" +#~ ":py:obj:`create_run " +#~ "`\\ \\(req\\)" +#~ msgstr "" + +#~ msgid "Request for run ID." #~ msgstr "" #~ msgid "" -#~ "Second, create a virtual environment " -#~ "(and activate it). If you chose to" -#~ " use :code:`pyenv` (with the :code" -#~ ":`pyenv-virtualenv` plugin) and already " -#~ "have it installed , you can use" -#~ " the following convenience script (by " -#~ "default it will use :code:`Python " -#~ "3.8.17`, but you can change it by" -#~ " providing a specific :code:``)::" +#~ ":py:obj:`disconnect " +#~ "`\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Disconnect from the Driver API." #~ msgstr "" #~ msgid "" -#~ "If you don't have :code:`pyenv` " -#~ "installed, you can use the following " -#~ "script that will install pyenv, set " -#~ "it up and create the virtual " -#~ "environment (with :code:`Python 3.8.17` by " -#~ "default)::" +#~ ":py:obj:`get_nodes `\\" +#~ " \\(req\\)" +#~ msgstr "" + +#~ msgid "Get client IDs." #~ msgstr "" #~ msgid "" -#~ "Third, install the Flower package in " -#~ "development mode (think :code:`pip install " -#~ "-e`) along with all necessary " -#~ "dependencies::" +#~ ":py:obj:`pull_task_res " +#~ "`\\ \\(req\\)" #~ msgstr "" #~ msgid "" -#~ "Developers could run the full set " -#~ "of Github Actions workflows under their" -#~ " local environment by using `Act " -#~ "_`. Please refer to" -#~ " the installation instructions under the" -#~ " linked repository and run the next" -#~ " command under Flower main cloned " -#~ "repository folder::" +#~ ":py:obj:`push_task_ins " +#~ "`\\ \\(req\\)" #~ msgstr "" #~ msgid "" -#~ "Please note that these components are" -#~ " still experimental, the correct " -#~ "configuration of DP for a specific " -#~ "task is still an unsolved problem." +#~ "Optionally specify the type of actor " +#~ "to use. The actor object, which " +#~ "persists throughout the simulation, will " +#~ "be the process in charge of " +#~ "running the clients' jobs (i.e. their" +#~ " `fit()` method)." #~ msgstr "" #~ msgid "" -#~ "The distribution of the update norm " -#~ "has been shown to vary from " -#~ "task-to-task and to evolve as " -#~ "training progresses. Therefore, we use " -#~ "an adaptive approach [andrew]_ that " -#~ "continuously adjusts the clipping threshold" -#~ " to track a prespecified quantile of" -#~ " the update norm distribution." +#~ "Much effort went into a completely " +#~ "restructured Flower docs experience. The " +#~ "documentation on [flower.ai/docs](flower.ai/docs) is" +#~ " now divided into Flower Framework, " +#~ "Flower Baselines, Flower Android SDK, " +#~ "Flower iOS SDK, and code example " +#~ "projects." #~ msgstr "" #~ msgid "" -#~ "We make (and attempt to enforce) a" -#~ " number of assumptions that must be" -#~ " satisfied to ensure that the " -#~ "training process actually realises the " -#~ ":math:`(\\epsilon, \\delta)` guarantees the " -#~ "user has in mind when configuring " -#~ "the setup." +#~ "The first preview release of Flower " +#~ "Baselines has arrived! We're kickstarting " +#~ "Flower Baselines with implementations of " +#~ "FedOpt (FedYogi, FedAdam, FedAdagrad), FedBN," +#~ " and FedAvgM. Check the documentation " +#~ "on how to use [Flower " +#~ "Baselines](https://flower.ai/docs/using-baselines.html). " +#~ "With this first preview release we're" +#~ " also inviting the community to " +#~ "[contribute their own " +#~ "baselines](https://flower.ai/docs/contributing-baselines.html)." #~ msgstr "" #~ msgid "" -#~ "The first two are useful for " -#~ "eliminating a multitude of complications " -#~ "associated with calibrating the noise to" -#~ " the clipping threshold while the " -#~ "third one is required to comply " -#~ "with the assumptions of the privacy " -#~ "analysis." +#~ "Flower usage examples used to be " +#~ "bundled with Flower in a package " +#~ "called ``flwr_example``. We are migrating " +#~ "those examples to standalone projects to" +#~ " make them easier to use. All " +#~ "new examples are based in the " +#~ "directory `examples " +#~ "`_." #~ msgstr "" -#~ msgid "" -#~ "The first version of our solution " -#~ "was to define a decorator whose " -#~ "constructor accepted, among other things, " -#~ "a boolean valued variable indicating " -#~ "whether adaptive clipping was to be " -#~ "enabled or not. We quickly realized " -#~ "that this would clutter its " -#~ ":code:`__init__()` function with variables " -#~ "corresponding to hyperparameters of adaptive" -#~ " clipping that would remain unused " -#~ "when it was disabled. A cleaner " -#~ "implementation could be achieved by " -#~ "splitting the functionality into two " -#~ "decorators, :code:`DPFedAvgFixed` and " -#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" -#~ " classing the former. The constructors " -#~ "for both classes accept a boolean " -#~ "parameter :code:`server_side_noising`, which, as " -#~ "the name suggests, determines where " -#~ "noising is to be performed." +#~ msgid "The following examples are available as standalone projects." #~ msgstr "" -#~ msgid "" -#~ ":code:`aggregate_fit()`: We check whether any" -#~ " of the sampled clients dropped out" -#~ " or failed to upload an update " -#~ "before the round timed out. In " -#~ "that case, we need to abort the" -#~ " current round, discarding any successful" -#~ " updates that were received, and move" -#~ " on to the next one. On the " -#~ "other hand, if all clients responded " -#~ "successfully, we must force the " -#~ "averaging of the updates to happen " -#~ "in an unweighted manner by intercepting" -#~ " the :code:`parameters` field of " -#~ ":code:`FitRes` for each received update " -#~ "and setting it to 1. Furthermore, " -#~ "if :code:`server_side_noising=true`, each update " -#~ "is perturbed with an amount of " -#~ "noise equal to what it would have" -#~ " been subjected to had client-side" -#~ " noising being enabled. This entails " -#~ "*pre*-processing of the arguments to " -#~ "this method before passing them on " -#~ "to the wrappee's implementation of " -#~ ":code:`aggregate_fit()`." +#~ msgid "Quickstart TensorFlow/Keras" #~ msgstr "" #~ msgid "" -#~ "McMahan, H. Brendan, et al. \"Learning" -#~ " differentially private recurrent language " -#~ "models.\" arXiv preprint arXiv:1710.06963 " -#~ "(2017)." +#~ "`Quickstart TensorFlow (Tutorial) " +#~ "`_" #~ msgstr "" #~ msgid "" -#~ "Andrew, Galen, et al. \"Differentially " -#~ "private learning with adaptive clipping.\" " -#~ "Advances in Neural Information Processing " -#~ "Systems 34 (2021): 17455-17466." +#~ "`Quickstart PyTorch (Tutorial) " +#~ "`_" #~ msgstr "" #~ msgid "" -#~ "The following command can be used " -#~ "to verfiy if Flower was successfully " -#~ "installed. If everything worked, it " -#~ "should print the version of Flower " -#~ "to the command line::" +#~ "`PyTorch: From Centralized To Federated " +#~ "(Tutorial) `_" #~ msgstr "" -#~ msgid "flwr (Python API reference)" +#~ msgid "Legacy Examples (`flwr_example`)" #~ msgstr "" -#~ msgid "start_client" +#~ msgid "" +#~ "The useage examples in `flwr_example` " +#~ "are deprecated and will be removed " +#~ "in the future. New examples are " +#~ "provided as standalone projects in " +#~ "`examples `_." #~ msgstr "" -#~ msgid "start_numpy_client" +#~ msgid "Extra Dependencies" #~ msgstr "" -#~ msgid "start_simulation" +#~ msgid "" +#~ "The core Flower framework keeps a " +#~ "minimal set of dependencies. The " +#~ "examples demonstrate Flower in the " +#~ "context of different machine learning " +#~ "frameworks, so additional dependencies need" +#~ " to be installed before an example" +#~ " can be run." #~ msgstr "" -#~ msgid "server.start_server" +#~ msgid "For PyTorch examples::" #~ msgstr "" -#~ msgid "server.strategy" +#~ msgid "For TensorFlow examples::" #~ msgstr "" -#~ msgid "server.strategy.Strategy" +#~ msgid "For both PyTorch and TensorFlow examples::" #~ msgstr "" -#~ msgid "server.strategy.FedAvg" +#~ msgid "" +#~ "Please consult :code:`pyproject.toml` for a" +#~ " full list of possible extras " +#~ "(section :code:`[tool.poetry.extras]`)." #~ msgstr "" -#~ msgid "server.strategy.FedAvgM" +#~ msgid "PyTorch Examples" #~ msgstr "" -#~ msgid "server.strategy.FedMedian" +#~ msgid "" +#~ "Our PyTorch examples are based on " +#~ "PyTorch 1.7. They should work with " +#~ "other releases as well. So far, we" +#~ " provide the following examples." #~ msgstr "" -#~ msgid "server.strategy.QFedAvg" +#~ msgid "CIFAR-10 Image Classification" #~ msgstr "" -#~ msgid "server.strategy.FaultTolerantFedAvg" +#~ msgid "" +#~ "`CIFAR-10 and CIFAR-100 " +#~ "`_ are " +#~ "popular RGB image datasets. The Flower" +#~ " CIFAR-10 example uses PyTorch to " +#~ "train a simple CNN classifier in a" +#~ " federated learning setup with two " +#~ "clients." #~ msgstr "" -#~ msgid "server.strategy.FedOpt" +#~ msgid "First, start a Flower server:" #~ msgstr "" -#~ msgid "server.strategy.FedProx" +#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" #~ msgstr "" -#~ msgid "server.strategy.FedAdagrad" +#~ msgid "Then, start the two clients in a new terminal window:" #~ msgstr "" -#~ msgid "server.strategy.FedAdam" +#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" #~ msgstr "" -#~ msgid "server.strategy.FedYogi" +#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." #~ msgstr "" -#~ msgid "server.strategy.FedTrimmedAvg" +#~ msgid "ImageNet-2012 Image Classification" #~ msgstr "" -#~ msgid "server.strategy.Krum" +#~ msgid "" +#~ "`ImageNet-2012 `_ is " +#~ "one of the major computer vision " +#~ "datasets. The Flower ImageNet example " +#~ "uses PyTorch to train a ResNet-18 " +#~ "classifier in a federated learning setup" +#~ " with ten clients." #~ msgstr "" -#~ msgid "server.strategy.FedXgbNnAvg" +#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" #~ msgstr "" -#~ msgid "server.strategy.DPFedAvgAdaptive" +#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" #~ msgstr "" -#~ msgid "server.strategy.DPFedAvgFixed" +#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." #~ msgstr "" -#~ msgid "" -#~ "**Fix the incorrect return types of " -#~ "Strategy** " -#~ "([#2432](https://github.com/adap/flower/pull/2432/files))" +#~ msgid "TensorFlow Examples" #~ msgstr "" #~ msgid "" -#~ "The types of the return values in" -#~ " the docstrings in two methods " -#~ "(`aggregate_fit` and `aggregate_evaluate`) now " -#~ "match the hint types in the code." +#~ "Our TensorFlow examples are based on " +#~ "TensorFlow 2.0 or newer. So far, " +#~ "we provide the following examples." #~ msgstr "" -#~ msgid "" -#~ "Using the `client_fn`, Flower clients " -#~ "can interchangeably run as standalone " -#~ "processes (i.e. via `start_client`) or " -#~ "in simulation (i.e. via `start_simulation`)" -#~ " without requiring changes to how the" -#~ " client class is defined and " -#~ "instantiated. Calling `start_numpy_client` is " -#~ "now deprecated." +#~ msgid "Fashion-MNIST Image Classification" #~ msgstr "" #~ msgid "" -#~ "**Update Flower Examples** " -#~ "([#2384](https://github.com/adap/flower/pull/2384)), " -#~ "([#2425](https://github.com/adap/flower/pull/2425))" +#~ "`Fashion-MNIST `_ is often used as " +#~ "the \"Hello, world!\" of machine " +#~ "learning. We follow this tradition and" +#~ " provide an example which samples " +#~ "random local datasets from Fashion-MNIST" +#~ " and trains a simple image " +#~ "classification model over those partitions." #~ msgstr "" -#~ msgid "" -#~ "**General updates to baselines** " -#~ "([#2301](https://github.com/adap/flower/pull/2301), " -#~ "[#2305](https://github.com/adap/flower/pull/2305), " -#~ "[#2307](https://github.com/adap/flower/pull/2307), " -#~ "[#2327](https://github.com/adap/flower/pull/2327), " -#~ "[#2435](https://github.com/adap/flower/pull/2435))" +#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" #~ msgstr "" -#~ msgid "" -#~ "**General updates to the simulation " -#~ "engine** ([#2331](https://github.com/adap/flower/pull/2331), " -#~ "[#2447](https://github.com/adap/flower/pull/2447), " -#~ "[#2448](https://github.com/adap/flower/pull/2448))" +#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" #~ msgstr "" #~ msgid "" -#~ "**General improvements** " -#~ "([#2309](https://github.com/adap/flower/pull/2309), " -#~ "[#2310](https://github.com/adap/flower/pull/2310), " -#~ "[2313](https://github.com/adap/flower/pull/2313), " -#~ "[#2316](https://github.com/adap/flower/pull/2316), " -#~ "[2317](https://github.com/adap/flower/pull/2317),[#2349](https://github.com/adap/flower/pull/2349)," -#~ " [#2360](https://github.com/adap/flower/pull/2360), " -#~ "[#2402](https://github.com/adap/flower/pull/2402), " -#~ "[#2446](https://github.com/adap/flower/pull/2446))" +#~ "For more details, see " +#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." +#~ msgstr "" + +#~ msgid ":fa:`eye,mr-1` Can Flower run on Juptyter Notebooks / Google Colab?" #~ msgstr "" #~ msgid "" -#~ "`flower-superlink --driver-api-address " -#~ "\"0.0.0.0:8081\" --fleet-api-address " -#~ "\"0.0.0.0:8086\"`" +#~ "`Flower meets KOSMoS `_." #~ msgstr "" #~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()`. The string " -#~ ":code:`\"0.0.0.0:8080\"` tells the client " -#~ "which server to connect to. In our" -#~ " case we can run the server and" -#~ " the client on the same machine, " -#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" -#~ " we run a truly federated workload" -#~ " with the server and clients running" -#~ " on different machines, all that " -#~ "needs to change is the " -#~ ":code:`server_address` we pass to the " -#~ "client." +#~ "If you want to check out " +#~ "everything put together, you should " +#~ "check out the full code example: " +#~ "[https://github.com/adap/flower/tree/main/examples/quickstart-" +#~ "huggingface](https://github.com/adap/flower/tree/main/examples" +#~ "/quickstart-huggingface)." #~ msgstr "" #~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()`. The string " -#~ ":code:`\"[::]:8080\"` tells the client which" -#~ " server to connect to. In our " -#~ "case we can run the server and " -#~ "the client on the same machine, " -#~ "therefore we use :code:`\"[::]:8080\"`. If " -#~ "we run a truly federated workload " -#~ "with the server and clients running " -#~ "on different machines, all that needs" -#~ " to change is the :code:`server_address`" -#~ " we point the client at." +#~ "First of all, for running the " +#~ "Flower Python server, it is recommended" +#~ " to create a virtual environment and" +#~ " run everything within a `virtualenv " +#~ "`_. " +#~ "For the Flower client implementation in" +#~ " iOS, it is recommended to use " +#~ "Xcode as our IDE." #~ msgstr "" #~ msgid "" -#~ "Let's build a horizontal federated " -#~ "learning system using XGBoost and " -#~ "Flower!" +#~ "Since CoreML does not allow the " +#~ "model parameters to be seen before " +#~ "training, and accessing the model " +#~ "parameters during or after the training" +#~ " can only be done by specifying " +#~ "the layer name, we need to know" +#~ " this informations beforehand, through " +#~ "looking at the model specification, " +#~ "which are written as proto files. " +#~ "The implementation can be seen in " +#~ ":code:`MLModelInspect`." #~ msgstr "" #~ msgid "" -#~ "Please refer to the `full code " -#~ "example `_ to learn " -#~ "more." +#~ "After we have all of the necessary" +#~ " informations, let's create our Flower " +#~ "client." #~ msgstr "" #~ msgid "" -#~ "In this notebook, we'll build a " -#~ "federated learning system using Flower " -#~ "and PyTorch. In part 1, we use " -#~ "PyTorch for the model training pipeline" -#~ " and data loading. In part 2, " -#~ "we continue to federate the PyTorch-" -#~ "based pipeline using Flower." +#~ "MXNet is no longer maintained and " +#~ "has been moved into `Attic " +#~ "`_. As a " +#~ "result, we would encourage you to " +#~ "use other ML frameworks alongise Flower," +#~ " for example, PyTorch. This tutorial " +#~ "might be removed in future versions " +#~ "of Flower." #~ msgstr "" #~ msgid "" -#~ "Next, we install the necessary packages" -#~ " for PyTorch (``torch`` and " -#~ "``torchvision``) and Flower (``flwr``):" +#~ "It is recommended to create a " +#~ "virtual environment and run everything " +#~ "within this `virtualenv `_." #~ msgstr "" #~ msgid "" -#~ "Federated learning can be applied to " -#~ "many different types of tasks across " -#~ "different domains. In this tutorial, we" -#~ " introduce federated learning by training" -#~ " a simple convolutional neural network " -#~ "(CNN) on the popular CIFAR-10 dataset." -#~ " CIFAR-10 can be used to train " -#~ "image classifiers that distinguish between " -#~ "images from ten different classes:" +#~ "First of all, it is recommended to" +#~ " create a virtual environment and run" +#~ " everything within a `virtualenv " +#~ "`_." +#~ msgstr "" + +#~ msgid "Since we want to use scikt-learn, let's go ahead and install it:" #~ msgstr "" #~ msgid "" -#~ "Each organization will act as a " -#~ "client in the federated learning system." -#~ " So having ten organizations participate" -#~ " in a federation means having ten " -#~ "clients connected to the federated " -#~ "learning server:" +#~ "We load the MNIST dataset from " +#~ "`OpenML `_, a popular" +#~ " image classification dataset of " +#~ "handwritten digits for machine learning. " +#~ "The utility :code:`utils.load_mnist()` downloads " +#~ "the training and test data. The " +#~ "training set is split afterwards into" +#~ " 10 partitions with :code:`utils.partition()`." #~ msgstr "" #~ msgid "" -#~ "Let's now load the CIFAR-10 training " -#~ "and test set, partition them into " -#~ "ten smaller datasets (each split into" -#~ " training and validation set), and " -#~ "wrap the resulting partitions by " -#~ "creating a PyTorch ``DataLoader`` for " -#~ "each of them:" +#~ "Now that you have known how " +#~ "federated XGBoost work with Flower, it's" +#~ " time to run some more comprehensive" +#~ " experiments by customising the " +#~ "experimental settings. In the xgboost-" +#~ "comprehensive example (`full code " +#~ "`_), we provide more options " +#~ "to define various experimental setups, " +#~ "including aggregation strategies, data " +#~ "partitioning and centralised/distributed evaluation." +#~ " We also support `Flower simulation " +#~ "`_ making it easy to " +#~ "simulate large client cohorts in a " +#~ "resource-aware manner. Let's take a " +#~ "look!" #~ msgstr "" -#~ msgid "|ed6498a023f2477a9ccd57ee4514bda4|" +#~ msgid "|31e4b1afa87c4b968327bbeafbf184d4|" #~ msgstr "" -#~ msgid "|5a4f742489ac4f819afefdd4dc9ab272|" +#~ msgid "|c9d935b4284e4c389a33d86b33e07c0a|" #~ msgstr "" -#~ msgid "|3331c80cd05045f6a56524d8e3e76d0c|" +#~ msgid "|00727b5faffb468f84dd1b03ded88638|" #~ msgstr "" -#~ msgid "|4987b26884ec4b2c8f06c1264bcebe60|" +#~ msgid "|daf0cf0ff4c24fd29439af78416cf47b|" #~ msgstr "" -#~ msgid "|ec8ae2d778aa493a986eb2fa29c220e5|" +#~ msgid "|9f093007080d471d94ca90d3e9fde9b6|" #~ msgstr "" -#~ msgid "|b8949d0669fe4f8eadc9a4932f4e9c57|" +#~ msgid "|46a26e6150e0479fbd3dfd655f36eb13|" #~ msgstr "" -#~ msgid "|94ff30bdcd09443e8488b5f29932a541|" +#~ msgid "|3daba297595c4c7fb845d90404a6179a|" #~ msgstr "" -#~ msgid "|48dccf1d6d0544bba8917d2783a47719|" +#~ msgid "|5769874fa9c4455b80b2efda850d39d7|" #~ msgstr "" -#~ msgid "|0366618db96b4f329f0d4372d1150fde|" +#~ msgid "|ba47ffb421814b0f8f9fa5719093d839|" #~ msgstr "" -#~ msgid "|ac80eddc76e6478081b1ca35eed029c0|" +#~ msgid "|aeac5bf79cbf497082e979834717e01b|" #~ msgstr "" -#~ msgid "|1ac94140c317450e89678db133c7f3c2|" +#~ msgid "|ce27ed4bbe95459dba016afc42486ba2|" #~ msgstr "" -#~ msgid "|f8850c6e96fc4430b55e53bba237a7c0|" +#~ msgid "|ae94a7f71dda443cbec2385751427d41|" #~ msgstr "" -#~ msgid "|4a368fdd3fc34adabd20a46752a68582|" +#~ msgid "|e61fce4d43d243e7bb08bdde97d81ce6|" #~ msgstr "" -#~ msgid "|40f69c17bb444652a7c8dfe577cd120e|" +#~ msgid "|08cb60859b07461588fe44e55810b050|" #~ msgstr "" diff --git a/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po b/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po index f22b74db8896..86d96e5e6865 100644 --- a/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po +++ b/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po @@ -7,18 +7,17 @@ msgid "" msgstr "" "Project-Id-Version: Flower main\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-02-13 11:23+0100\n" +"POT-Creation-Date: 2024-03-15 14:23+0000\n" "PO-Revision-Date: 2024-02-19 11:37+0000\n" "Last-Translator: Yan Gao \n" -"Language-Team: Chinese (Simplified) \n" "Language: zh_Hans\n" +"Language-Team: Chinese (Simplified) \n" +"Plural-Forms: nplurals=1; plural=0;\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Plural-Forms: nplurals=1; plural=0;\n" -"X-Generator: Weblate 5.4\n" -"Generated-By: Babel 2.13.1\n" +"Generated-By: Babel 2.14.0\n" #: ../../source/contributor-explanation-architecture.rst:2 msgid "Flower Architecture" @@ -85,9 +84,8 @@ msgstr "" #: ../../source/contributor-how-to-build-docker-images.rst:19 msgid "" -"Please follow the first section on `Run Flower using Docker " -"`_ " -"which covers this step in more detail." +"Please follow the first section on :doc:`Run Flower using Docker ` which covers this step in more detail." msgstr "" #: ../../source/contributor-how-to-build-docker-images.rst:23 @@ -293,6 +291,7 @@ msgid "Contribute translations" msgstr "贡献译文" #: ../../source/contributor-how-to-contribute-translations.rst:4 +#, fuzzy msgid "" "Since `Flower 1.5 `_ we have introduced translations to " @@ -301,7 +300,7 @@ msgid "" "to help us in our effort to make Federated Learning accessible to as many" " people as possible by contributing to those translations! This might " "also be a great opportunity for those wanting to become open source " -"contributors with little prerequistes." +"contributors with little prerequisites." msgstr "" "从 `Flower 1.5 `_ " @@ -362,8 +361,9 @@ msgid "This is what the interface looks like:" msgstr "这就是界面的样子:" #: ../../source/contributor-how-to-contribute-translations.rst:47 +#, fuzzy msgid "" -"You input your translation in the textbox at the top and then, once you " +"You input your translation in the text box at the top and then, once you " "are happy with it, you either press ``Save and continue`` (to save the " "translation and go to the next untranslated string), ``Save and stay`` " "(to save the translation and stay on the same page), ``Suggest`` (to add " @@ -408,11 +408,11 @@ msgstr "添加新语言" #: ../../source/contributor-how-to-contribute-translations.rst:69 msgid "" "If you want to add a new language, you will first have to contact us, " -"either on `Slack `_, or by opening an " -"issue on our `GitHub repo `_." +"either on `Slack `_, or by opening an issue" +" on our `GitHub repo `_." msgstr "" -"如果您想添加新语言,请先联系我们,可以在 `Slack `_ 上联系,也可以在我们的" -" `GitHub repo `_ 上提交问题。" +"如果您想添加新语言,请先联系我们,可以在 `Slack `_ 上联系,也可以在我们的 " +"`GitHub repo `_ 上提交问题。" #: ../../source/contributor-how-to-create-new-messages.rst:2 msgid "Creating New Messages" @@ -449,12 +449,13 @@ msgid "Message Types for Protocol Buffers" msgstr "协议缓冲区的信息类型" #: ../../source/contributor-how-to-create-new-messages.rst:32 +#, fuzzy msgid "" "The first thing we need to do is to define a message type for the RPC " "system in :code:`transport.proto`. Note that we have to do it for both " "the request and response messages. For more details on the syntax of " -"proto3, please see the `official documentation " -"`_." +"proto3, please see the `official documentation `_." msgstr "" "我们需要做的第一件事是在脚本code:`transport.proto`中定义 RPC " "系统的消息类型。请注意,我们必须对请求信息和响应信息都这样做。有关 proto3 语法的更多详情,请参阅官方文档 " @@ -575,9 +576,10 @@ msgid "" msgstr "工作区文件从本地文件系统加载,或复制或克隆到容器中。扩展在容器内安装和运行,在容器内它们可以完全访问工具、平台和文件系统。这意味着,只需连接到不同的容器,就能无缝切换整个开发环境。" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:11 +#, fuzzy msgid "" "Source: `Official VSCode documentation " -"`_" +"`_" msgstr "来源:`VSCode 官方文档 `_" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:15 @@ -618,18 +620,20 @@ msgid "" msgstr "在某些情况下,您的设置可能更复杂。有关这些情况,请参考以下资料:" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:23 +#, fuzzy msgid "" "`Developing inside a Container " -"`_" msgstr "" "在容器内开发 `_" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:24 +#, fuzzy msgid "" "`Remote development in Containers " -"`_" +"`_" msgstr "容器中的远程开发 `_" #: ../../source/contributor-how-to-install-development-versions.rst:2 @@ -909,8 +913,8 @@ msgstr "在 ``changelog.md`` 中添加新的 ``Unreleased`` 部分。" #: ../../source/contributor-how-to-release-flower.rst:25 msgid "" -"Merge the pull request on the same day (i.e., before a new nightly release" -" gets published to PyPI)." +"Merge the pull request on the same day (i.e., before a new nightly " +"release gets published to PyPI)." msgstr "在同一天合并拉取请求(即在新版本发布到 PyPI 之前)。" #: ../../source/contributor-how-to-release-flower.rst:28 @@ -923,8 +927,8 @@ msgstr "释放前命名" #: ../../source/contributor-how-to-release-flower.rst:33 msgid "" -"PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases " -"MUST use one of the following naming patterns:" +"PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases" +" MUST use one of the following naming patterns:" msgstr "PyPI 支持预发布版本(alpha、beta、release candidate)。预发布版本必须使用以下命名模式之一:" #: ../../source/contributor-how-to-release-flower.rst:35 @@ -1193,8 +1197,8 @@ msgid "" "where to start to increase your chances of getting your PR accepted into " "the Flower codebase." msgstr "" -"我们欢迎为Flower做出代码贡献!然而,要知道从哪里开始并非易事。因此,我们提出" -"了一些建议,告诉您从哪里开始,以增加您的 PR 被 Flower 代码库接受的机会。" +"我们欢迎为Flower做出代码贡献!然而,要知道从哪里开始并非易事。因此,我们提出了一些建议,告诉您从哪里开始,以增加您的 PR 被 Flower" +" 代码库接受的机会。" #: ../../source/contributor-ref-good-first-contributions.rst:11 msgid "Where to start" @@ -1224,33 +1228,33 @@ msgid "Request for Flower Baselines" msgstr "Flower Baselines的申请" #: ../../source/contributor-ref-good-first-contributions.rst:25 +#, fuzzy msgid "" "If you are not familiar with Flower Baselines, you should probably check-" -"out our `contributing guide for baselines `_." +"out our `contributing guide for baselines " +"`_." msgstr "" "如果您对 Flower Baselines 还不熟悉,也许可以看看我们的 `Baselines贡献指南 " "`_。" #: ../../source/contributor-ref-good-first-contributions.rst:27 +#, fuzzy msgid "" "You should then check out the open `issues " "`_" " for baseline requests. If you find a baseline that you'd like to work on" -" and that has no assignes, feel free to assign it to yourself and start " +" and that has no assignees, feel free to assign it to yourself and start " "working on it!" msgstr "" -"然后查看开放的 `issues `_ baseline请求。如" -"果您发现了自己想做的baseline,而它还没有被分配,请随时把它分配给自己,然后开" -"始工作!" +"然后查看开放的 `issues " +"`_" +" baseline请求。如果您发现了自己想做的baseline,而它还没有被分配,请随时把它分配给自己,然后开始工作!" #: ../../source/contributor-ref-good-first-contributions.rst:31 msgid "" "Otherwise, if you don't find a baseline you'd like to work on, be sure to" " open a new issue with the baseline request template!" -msgstr "如果您没有找到想要做的baseline,请务必使用baseline请求模板打开一个新问题(" -"GitHub issue)!" +msgstr "如果您没有找到想要做的baseline,请务必使用baseline请求模板打开一个新问题(GitHub issue)!" #: ../../source/contributor-ref-good-first-contributions.rst:34 msgid "Request for examples" @@ -1261,8 +1265,7 @@ msgid "" "We wish we had more time to write usage examples because we believe they " "help users to get started with building what they want to build. Here are" " a few ideas where we'd be happy to accept a PR:" -msgstr "我们希望有更多的时间来撰写使用示例,因为我们相信这些示例可以帮助用户开始构建" -"他们想要的东西。以下是我们乐意接受 PR 的几个想法:" +msgstr "我们希望有更多的时间来撰写使用示例,因为我们相信这些示例可以帮助用户开始构建他们想要的东西。以下是我们乐意接受 PR 的几个想法:" #: ../../source/contributor-ref-good-first-contributions.rst:40 msgid "Llama 2 fine-tuning, with Hugging Face Transformers and PyTorch" @@ -1330,50 +1333,50 @@ msgid "" msgstr "本指南适用于想参与 Flower,但不习惯为 GitHub 项目贡献的人。" #: ../../source/contributor-tutorial-contribute-on-github.rst:6 +#, fuzzy msgid "" "If you're familiar with how contributing on GitHub works, you can " -"directly checkout our `getting started guide for contributors " -"`_." +"directly checkout our :doc:`getting started guide for contributors " +"`." msgstr "" -"如果您熟悉如何在 GitHub 上贡献,可以直接查看我们的 \"贡献者入门指南\" " -"`_ 和 " -"\"优秀的首次贡献示例\" `_。" +"如果您熟悉如何在 GitHub 上贡献,可以直接查看我们的 \"贡献者入门指南\" `_ 和 \"优秀的首次贡献示例\" " +"`_。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:11 +#: ../../source/contributor-tutorial-contribute-on-github.rst:10 msgid "Setting up the repository" msgstr "建立资源库" -#: ../../source/contributor-tutorial-contribute-on-github.rst:22 +#: ../../source/contributor-tutorial-contribute-on-github.rst:21 msgid "**Create a GitHub account and setup Git**" msgstr "**创建 GitHub 账户并设置 Git**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:14 +#: ../../source/contributor-tutorial-contribute-on-github.rst:13 +#, fuzzy msgid "" "Git is a distributed version control tool. This allows for an entire " "codebase's history to be stored and every developer's machine. It is a " "software that will need to be installed on your local machine, you can " -"follow this `guide `_ to set it up." +"follow this `guide `_ to set it up." msgstr "" "Git 是一种分布式版本控制工具。它可以将整个代码库的历史记录保存在每个开发人员的机器上。您需要在本地计算机上安装该软件,可以按照本指南 " "`_ 进行设置。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:17 +#: ../../source/contributor-tutorial-contribute-on-github.rst:16 msgid "" "GitHub, itself, is a code hosting platform for version control and " "collaboration. It allows for everyone to collaborate and work from " "anywhere on remote repositories." msgstr "GitHub 本身是一个用于版本控制和协作的代码托管平台。它允许每个人在任何地方对远程仓库进行协作和工作。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:19 +#: ../../source/contributor-tutorial-contribute-on-github.rst:18 msgid "" "If you haven't already, you will need to create an account on `GitHub " "`_." msgstr "如果还没有,您需要在 `GitHub `_ 上创建一个账户。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:21 +#: ../../source/contributor-tutorial-contribute-on-github.rst:20 msgid "" "The idea behind the generic Git and GitHub workflow boils down to this: " "you download code from a remote repository on GitHub, make changes " @@ -1383,21 +1386,22 @@ msgstr "" "通用的 Git 和 GitHub 工作流程背后的理念可以归结为:从 GitHub 上的远程仓库下载代码,在本地进行修改并使用 Git " "进行跟踪,然后将新的历史记录上传回 GitHub。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:33 +#: ../../source/contributor-tutorial-contribute-on-github.rst:32 msgid "**Forking the Flower repository**" msgstr "**叉花仓库**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:25 +#: ../../source/contributor-tutorial-contribute-on-github.rst:24 +#, fuzzy msgid "" "A fork is a personal copy of a GitHub repository. To create one for " -"Flower, you must navigate to https://github.com/adap/flower (while " +"Flower, you must navigate to ``_ (while " "connected to your GitHub account) and click the ``Fork`` button situated " "on the top right of the page." msgstr "" "fork 是 GitHub 仓库的个人副本。要为 Flower 创建一个 fork,您必须导航到 " "https://github.com/adap/flower(同时连接到您的 GitHub 账户),然后点击页面右上方的 ``Fork`` 按钮。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:30 +#: ../../source/contributor-tutorial-contribute-on-github.rst:29 msgid "" "You can change the name if you want, but this is not necessary as this " "version of Flower will be yours and will sit inside your own account " @@ -1407,11 +1411,11 @@ msgstr "" "您可以更改名称,但没有必要,因为这个版本的 Flower " "将是您自己的,并位于您自己的账户中(即,在您自己的版本库列表中)。创建完成后,您会在左上角看到自己的 Flower 版本。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:48 +#: ../../source/contributor-tutorial-contribute-on-github.rst:47 msgid "**Cloning your forked repository**" msgstr "**克隆你的分叉仓库**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:36 +#: ../../source/contributor-tutorial-contribute-on-github.rst:35 msgid "" "The next step is to download the forked repository on your machine to be " "able to make changes to it. On your forked repository page, you should " @@ -1421,28 +1425,28 @@ msgstr "" "下一步是在你的机器上下载分叉版本库,以便对其进行修改。在分叉版本库页面上,首先点击右侧的 \"代码 \"按钮,这样就能复制版本库的 HTTPS " "链接。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:42 +#: ../../source/contributor-tutorial-contribute-on-github.rst:41 msgid "" "Once you copied the \\, you can open a terminal on your machine, " "navigate to the place you want to download the repository to and type:" msgstr "一旦复制了 (),你就可以在你的机器上打开一个终端,导航到你想下载软件源的地方,然后键入:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:48 +#: ../../source/contributor-tutorial-contribute-on-github.rst:47 #, fuzzy msgid "" "This will create a ``flower/`` (or the name of your fork if you renamed " "it) folder in the current working directory." msgstr "这将在当前工作目录下创建一个 `flower/`(如果重命名了,则使用 fork 的名称)文件夹。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:67 +#: ../../source/contributor-tutorial-contribute-on-github.rst:66 msgid "**Add origin**" msgstr "**添加原产地**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:51 +#: ../../source/contributor-tutorial-contribute-on-github.rst:50 msgid "You can then go into the repository folder:" msgstr "然后,您就可以进入存储库文件夹:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:57 +#: ../../source/contributor-tutorial-contribute-on-github.rst:56 msgid "" "And here we will need to add an origin to our repository. The origin is " "the \\ of the remote fork repository. To obtain it, we can do as " @@ -1452,27 +1456,28 @@ msgstr "" "在这里,我们需要为我们的版本库添加一个 origin。origin 是远程 fork 仓库的 " "\\。要获得它,我们可以像前面提到的那样,访问 GitHub 账户上的分叉仓库并复制链接。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:62 +#: ../../source/contributor-tutorial-contribute-on-github.rst:61 msgid "" "Once the \\ is copied, we can type the following command in our " "terminal:" msgstr "一旦复制了 \\ ,我们就可以在终端中键入以下命令:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:91 +#: ../../source/contributor-tutorial-contribute-on-github.rst:90 msgid "**Add upstream**" msgstr "**增加上游**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:70 +#: ../../source/contributor-tutorial-contribute-on-github.rst:69 +#, fuzzy msgid "" "Now we will add an upstream address to our repository. Still in the same " -"directroy, we must run the following command:" +"directory, we must run the following command:" msgstr "现在,我们要为版本库添加一个上游地址。还是在同一目录下,我们必须运行以下命令:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:77 +#: ../../source/contributor-tutorial-contribute-on-github.rst:76 msgid "The following diagram visually explains what we did in the previous steps:" msgstr "下图直观地解释了我们在前面步骤中的操作:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:81 +#: ../../source/contributor-tutorial-contribute-on-github.rst:80 msgid "" "The upstream is the GitHub remote address of the parent repository (in " "this case Flower), i.e. the one we eventually want to contribute to and " @@ -1483,110 +1488,111 @@ msgstr "" "上游是父版本库(这里是 Flower)的 GitHub 远程地址,即我们最终要贡献的版本库,因此需要最新的历史记录。origin " "只是我们创建的分叉仓库的 GitHub 远程地址,即我们自己账户中的副本(分叉)。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:85 +#: ../../source/contributor-tutorial-contribute-on-github.rst:84 msgid "" "To make sure our local version of the fork is up-to-date with the latest " "changes from the Flower repository, we can execute the following command:" msgstr "为了确保本地版本的分叉程序与 Flower 代码库的最新更改保持一致,我们可以执行以下命令:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:94 +#: ../../source/contributor-tutorial-contribute-on-github.rst:93 msgid "Setting up the coding environment" msgstr "设置编码环境" -#: ../../source/contributor-tutorial-contribute-on-github.rst:96 +#: ../../source/contributor-tutorial-contribute-on-github.rst:95 +#, fuzzy msgid "" -"This can be achieved by following this `getting started guide for " -"contributors`_ (note that you won't need to clone the repository). Once " -"you are able to write code and test it, you can finally start making " -"changes!" +"This can be achieved by following this :doc:`getting started guide for " +"contributors ` (note " +"that you won't need to clone the repository). Once you are able to write " +"code and test it, you can finally start making changes!" msgstr "您可以按照这份 \"贡献者入门指南\"__(注意,您不需要克隆版本库)来实现这一点。一旦您能够编写代码并进行测试,您就可以开始修改了!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:101 +#: ../../source/contributor-tutorial-contribute-on-github.rst:100 msgid "Making changes" msgstr "做出改变" -#: ../../source/contributor-tutorial-contribute-on-github.rst:103 +#: ../../source/contributor-tutorial-contribute-on-github.rst:102 msgid "" "Before making any changes make sure you are up-to-date with your " "repository:" msgstr "在进行任何更改之前,请确保您的版本库是最新的:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:109 +#: ../../source/contributor-tutorial-contribute-on-github.rst:108 msgid "And with Flower's repository:" msgstr "还有Flower的存储库:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:123 +#: ../../source/contributor-tutorial-contribute-on-github.rst:122 msgid "**Create a new branch**" msgstr "**创建一个新分支**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:116 +#: ../../source/contributor-tutorial-contribute-on-github.rst:115 msgid "" "To make the history cleaner and easier to work with, it is good practice " "to create a new branch for each feature/project that needs to be " "implemented." msgstr "为了使历史记录更简洁、更易于操作,为每个需要实现的功能/项目创建一个新分支是个不错的做法。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:119 +#: ../../source/contributor-tutorial-contribute-on-github.rst:118 msgid "" "To do so, just run the following command inside the repository's " "directory:" msgstr "为此,只需在版本库目录下运行以下命令即可:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:126 +#: ../../source/contributor-tutorial-contribute-on-github.rst:125 msgid "**Make changes**" msgstr "**进行修改**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:126 +#: ../../source/contributor-tutorial-contribute-on-github.rst:125 msgid "Write great code and create wonderful changes using your favorite editor!" msgstr "使用您最喜欢的编辑器编写优秀的代码并创建精彩的更改!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:139 +#: ../../source/contributor-tutorial-contribute-on-github.rst:138 msgid "**Test and format your code**" msgstr "**测试并格式化您的代码**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:129 +#: ../../source/contributor-tutorial-contribute-on-github.rst:128 msgid "" "Don't forget to test and format your code! Otherwise your code won't be " "able to be merged into the Flower repository. This is done so the " "codebase stays consistent and easy to understand." msgstr "不要忘记测试和格式化您的代码!否则您的代码将无法并入 Flower 代码库。这样做是为了使代码库保持一致并易于理解。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:132 +#: ../../source/contributor-tutorial-contribute-on-github.rst:131 msgid "To do so, we have written a few scripts that you can execute:" msgstr "为此,我们编写了一些脚本供您执行:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:151 +#: ../../source/contributor-tutorial-contribute-on-github.rst:150 msgid "**Stage changes**" msgstr "**舞台变化**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:142 +#: ../../source/contributor-tutorial-contribute-on-github.rst:141 msgid "" "Before creating a commit that will update your history, you must specify " "to Git which files it needs to take into account." msgstr "在创建更新历史记录的提交之前,必须向 Git 说明需要考虑哪些文件。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:144 +#: ../../source/contributor-tutorial-contribute-on-github.rst:143 msgid "This can be done with:" msgstr "这可以通过:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:150 +#: ../../source/contributor-tutorial-contribute-on-github.rst:149 msgid "" "To check which files have been modified compared to the last version " "(last commit) and to see which files are staged for commit, you can use " "the :code:`git status` command." msgstr "要查看与上一版本(上次提交)相比哪些文件已被修改,以及哪些文件处于提交阶段,可以使用 :code:`git status` 命令。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:161 +#: ../../source/contributor-tutorial-contribute-on-github.rst:160 msgid "**Commit changes**" msgstr "**提交更改**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:154 +#: ../../source/contributor-tutorial-contribute-on-github.rst:153 msgid "" "Once you have added all the files you wanted to commit using :code:`git " "add`, you can finally create your commit using this command:" msgstr "使用 :code:`git add` 添加完所有要提交的文件后,就可以使用此命令创建提交了:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:160 +#: ../../source/contributor-tutorial-contribute-on-github.rst:159 msgid "" "The \\ is there to explain to others what the commit " "does. It should be written in an imperative style and be concise. An " @@ -1595,61 +1601,61 @@ msgstr "" " 用于向他人解释提交的作用。它应该以命令式风格书写,并且简明扼要。例如 :code:`git commit " "-m \"Add images to README\"`。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:172 +#: ../../source/contributor-tutorial-contribute-on-github.rst:171 msgid "**Push the changes to the fork**" msgstr "**将更改推送到分叉**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:164 +#: ../../source/contributor-tutorial-contribute-on-github.rst:163 msgid "" "Once we have committed our changes, we have effectively updated our local" " history, but GitHub has no way of knowing this unless we push our " "changes to our origin's remote address:" msgstr "一旦提交了修改,我们就有效地更新了本地历史记录,但除非我们将修改推送到原点的远程地址,否则 GitHub 无法得知:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:171 +#: ../../source/contributor-tutorial-contribute-on-github.rst:170 msgid "" "Once this is done, you will see on the GitHub that your forked repo was " "updated with the changes you have made." msgstr "完成此操作后,您将在 GitHub 上看到您的分叉仓库已根据您所做的更改进行了更新。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:175 +#: ../../source/contributor-tutorial-contribute-on-github.rst:174 msgid "Creating and merging a pull request (PR)" msgstr "创建和合并拉取请求 (PR)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:206 +#: ../../source/contributor-tutorial-contribute-on-github.rst:205 msgid "**Create the PR**" msgstr "**创建 PR**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:178 +#: ../../source/contributor-tutorial-contribute-on-github.rst:177 msgid "" "Once you have pushed changes, on the GitHub webpage of your repository " "you should see the following message:" msgstr "推送更改后,在仓库的 GitHub 网页上应该会看到以下信息:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:182 +#: ../../source/contributor-tutorial-contribute-on-github.rst:181 #, fuzzy msgid "Otherwise you can always find this option in the ``Branches`` page." msgstr "否则,您可以在 \"分支 \"页面找到该选项。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:184 +#: ../../source/contributor-tutorial-contribute-on-github.rst:183 #, fuzzy msgid "" "Once you click the ``Compare & pull request`` button, you should see " "something similar to this:" msgstr "点击 \"比较和拉取请求 \"按钮后,您应该会看到类似下面的内容:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:188 +#: ../../source/contributor-tutorial-contribute-on-github.rst:187 msgid "At the top you have an explanation of which branch will be merged where:" msgstr "在顶部,你可以看到关于哪个分支将被合并的说明:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:192 +#: ../../source/contributor-tutorial-contribute-on-github.rst:191 msgid "" "In this example you can see that the request is to merge the branch " "``doc-fixes`` from my forked repository to branch ``main`` from the " "Flower repository." msgstr "在这个例子中,你可以看到请求将我分叉的版本库中的分支 ``doc-fixes`` 合并到 Flower 版本库中的分支 ``main``。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:194 +#: ../../source/contributor-tutorial-contribute-on-github.rst:193 msgid "" "The input box in the middle is there for you to describe what your PR " "does and to link it to existing issues. We have placed comments (that " @@ -1657,7 +1663,7 @@ msgid "" "process." msgstr "中间的输入框供您描述 PR 的作用,并将其与现有问题联系起来。我们在此放置了注释(一旦 PR 打开,注释将不会显示),以指导您完成整个过程。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:197 +#: ../../source/contributor-tutorial-contribute-on-github.rst:196 msgid "" "It is important to follow the instructions described in comments. For " "instance, in order to not break how our changelog system works, you " @@ -1666,167 +1672,175 @@ msgid "" ":ref:`changelogentry` appendix." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:201 +#: ../../source/contributor-tutorial-contribute-on-github.rst:200 msgid "" "At the bottom you will find the button to open the PR. This will notify " "reviewers that a new PR has been opened and that they should look over it" " to merge or to request changes." msgstr "在底部,您可以找到打开 PR 的按钮。这将通知审核人员新的 PR 已经打开,他们应该查看该 PR 以进行合并或要求修改。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:204 +#: ../../source/contributor-tutorial-contribute-on-github.rst:203 msgid "" "If your PR is not yet ready for review, and you don't want to notify " "anyone, you have the option to create a draft pull request:" msgstr "如果您的 PR 尚未准备好接受审核,而且您不想通知任何人,您可以选择创建一个草案拉取请求:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:208 msgid "**Making new changes**" msgstr "**作出新的改变**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:208 msgid "" "Once the PR has been opened (as draft or not), you can still push new " "commits to it the same way we did before, by making changes to the branch" " associated with the PR." msgstr "一旦 PR 被打开(无论是否作为草案),你仍然可以像以前一样,通过修改与 PR 关联的分支来推送新的提交。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:231 +#: ../../source/contributor-tutorial-contribute-on-github.rst:230 msgid "**Review the PR**" msgstr "**审查 PR**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:212 +#: ../../source/contributor-tutorial-contribute-on-github.rst:211 msgid "" "Once the PR has been opened or once the draft PR has been marked as " "ready, a review from code owners will be automatically requested:" msgstr "一旦 PR 被打开或 PR 草案被标记为就绪,就会自动要求代码所有者进行审核:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:216 +#: ../../source/contributor-tutorial-contribute-on-github.rst:215 msgid "" "Code owners will then look into the code, ask questions, request changes " "or validate the PR." msgstr "然后,代码所有者会查看代码、提出问题、要求修改或验证 PR。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:218 +#: ../../source/contributor-tutorial-contribute-on-github.rst:217 msgid "Merging will be blocked if there are ongoing requested changes." msgstr "如果有正在进行的更改请求,合并将被阻止。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:222 +#: ../../source/contributor-tutorial-contribute-on-github.rst:221 msgid "" "To resolve them, just push the necessary changes to the branch associated" " with the PR:" msgstr "要解决这些问题,只需将必要的更改推送到与 PR 关联的分支即可:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:226 +#: ../../source/contributor-tutorial-contribute-on-github.rst:225 msgid "And resolve the conversation:" msgstr "并解决对话:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:230 +#: ../../source/contributor-tutorial-contribute-on-github.rst:229 msgid "" "Once all the conversations have been resolved, you can re-request a " "review." msgstr "一旦所有对话都得到解决,您就可以重新申请审核。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:251 +#: ../../source/contributor-tutorial-contribute-on-github.rst:250 msgid "**Once the PR is merged**" msgstr "**一旦 PR 被合并**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:234 +#: ../../source/contributor-tutorial-contribute-on-github.rst:233 msgid "" "If all the automatic tests have passed and reviewers have no more changes" " to request, they can approve the PR and merge it." msgstr "如果所有自动测试都已通过,且审核员不再需要修改,他们就可以批准 PR 并将其合并。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:238 +#: ../../source/contributor-tutorial-contribute-on-github.rst:237 msgid "" "Once it is merged, you can delete the branch on GitHub (a button should " "appear to do so) and also delete it locally by doing:" msgstr "合并后,您可以在 GitHub 上删除该分支(会出现一个删除按钮),也可以在本地删除该分支:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:245 +#: ../../source/contributor-tutorial-contribute-on-github.rst:244 msgid "Then you should update your forked repository by doing:" msgstr "然后,你应该更新你的分叉仓库:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:254 +#: ../../source/contributor-tutorial-contribute-on-github.rst:253 msgid "Example of first contribution" msgstr "首次捐款实例" -#: ../../source/contributor-tutorial-contribute-on-github.rst:257 +#: ../../source/contributor-tutorial-contribute-on-github.rst:256 msgid "Problem" msgstr "问题" -#: ../../source/contributor-tutorial-contribute-on-github.rst:259 +#: ../../source/contributor-tutorial-contribute-on-github.rst:258 +#, fuzzy msgid "" -"For our documentation, we’ve started to use the `Diàtaxis framework " +"For our documentation, we've started to use the `Diàtaxis framework " "`_." msgstr "对于我们的文档,我们已经开始使用 \"Diàtaxis 框架 `_\"。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:261 +#: ../../source/contributor-tutorial-contribute-on-github.rst:260 +#, fuzzy msgid "" -"Our “How to” guides should have titles that continue the sencence “How to" -" …”, for example, “How to upgrade to Flower 1.0”." +"Our \"How to\" guides should have titles that continue the sentence \"How" +" to …\", for example, \"How to upgrade to Flower 1.0\"." msgstr "我们的 \"如何 \"指南的标题应延续 \"如何...... \"的句式,例如 \"如何升级到 Flower 1.0\"。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:263 +#: ../../source/contributor-tutorial-contribute-on-github.rst:262 msgid "" "Most of our guides do not follow this new format yet, and changing their " "title is (unfortunately) more involved than one might think." msgstr "我们的大多数指南还没有采用这种新格式,而更改其标题(不幸的是)比人们想象的要复杂得多。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:265 +#: ../../source/contributor-tutorial-contribute-on-github.rst:264 +#, fuzzy msgid "" -"This issue is about changing the title of a doc from present continious " +"This issue is about changing the title of a doc from present continuous " "to present simple." msgstr "这个问题是关于将文档标题从现在进行时改为现在进行时。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:267 +#: ../../source/contributor-tutorial-contribute-on-github.rst:266 +#, fuzzy msgid "" -"Let's take the example of “Saving Progress” which we changed to “Save " -"Progress”. Does this pass our check?" +"Let's take the example of \"Saving Progress\" which we changed to \"Save " +"Progress\". Does this pass our check?" msgstr "以 \"保存进度 \"为例,我们将其改为 \"保存进度\"。这是否通过了我们的检查?" -#: ../../source/contributor-tutorial-contribute-on-github.rst:269 -msgid "Before: ”How to saving progress” ❌" +#: ../../source/contributor-tutorial-contribute-on-github.rst:268 +#, fuzzy +msgid "Before: \"How to saving progress\" ❌" msgstr "之前: \"如何保存进度\" ❌" -#: ../../source/contributor-tutorial-contribute-on-github.rst:271 -msgid "After: ”How to save progress” ✅" +#: ../../source/contributor-tutorial-contribute-on-github.rst:270 +#, fuzzy +msgid "After: \"How to save progress\" ✅" msgstr "之后: \"如何保存进度\"✅" -#: ../../source/contributor-tutorial-contribute-on-github.rst:274 +#: ../../source/contributor-tutorial-contribute-on-github.rst:273 msgid "Solution" msgstr "解决方案" -#: ../../source/contributor-tutorial-contribute-on-github.rst:276 +#: ../../source/contributor-tutorial-contribute-on-github.rst:275 +#, fuzzy msgid "" -"This is a tiny change, but it’ll allow us to test your end-to-end setup. " -"After cloning and setting up the Flower repo, here’s what you should do:" +"This is a tiny change, but it'll allow us to test your end-to-end setup. " +"After cloning and setting up the Flower repo, here's what you should do:" msgstr "这只是一个很小的改动,但可以让我们测试你的端到端设置。克隆并设置好 Flower repo 后,你应该这样做:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:278 +#: ../../source/contributor-tutorial-contribute-on-github.rst:277 #, fuzzy msgid "Find the source file in ``doc/source``" msgstr "在 `doc/source` 中查找源文件" -#: ../../source/contributor-tutorial-contribute-on-github.rst:279 +#: ../../source/contributor-tutorial-contribute-on-github.rst:278 #, fuzzy msgid "" "Make the change in the ``.rst`` file (beware, the dashes under the title " "should be the same length as the title itself)" msgstr "在 `.rst` 文件中进行修改(注意,标题下的破折号应与标题本身的长度相同)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:280 +#: ../../source/contributor-tutorial-contribute-on-github.rst:279 +#, fuzzy msgid "" -"Build the docs and check the result: ``_" msgstr "" "构建文档并检查结果: ``_" -#: ../../source/contributor-tutorial-contribute-on-github.rst:283 +#: ../../source/contributor-tutorial-contribute-on-github.rst:282 msgid "Rename file" msgstr "重命名文件" -#: ../../source/contributor-tutorial-contribute-on-github.rst:285 +#: ../../source/contributor-tutorial-contribute-on-github.rst:284 msgid "" "You might have noticed that the file name still reflects the old wording." " If we just change the file, then we break all existing links to it - it " @@ -1836,32 +1850,33 @@ msgstr "" "您可能已经注意到,文件名仍然反映了旧的措辞。如果我们只是更改文件,那么就会破坏与该文件的所有现有链接--" "避免这种情况是***重要的,破坏链接会损害我们的搜索引擎排名。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:288 -msgid "Here’s how to change the file name:" +#: ../../source/contributor-tutorial-contribute-on-github.rst:287 +#, fuzzy +msgid "Here's how to change the file name:" msgstr "下面是更改文件名的方法:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:290 +#: ../../source/contributor-tutorial-contribute-on-github.rst:289 #, fuzzy msgid "Change the file name to ``save-progress.rst``" msgstr "将文件名改为`save-progress.rst`" -#: ../../source/contributor-tutorial-contribute-on-github.rst:291 +#: ../../source/contributor-tutorial-contribute-on-github.rst:290 #, fuzzy msgid "Add a redirect rule to ``doc/source/conf.py``" msgstr "在 `doc/source/conf.py` 中添加重定向规则" -#: ../../source/contributor-tutorial-contribute-on-github.rst:293 +#: ../../source/contributor-tutorial-contribute-on-github.rst:292 #, fuzzy msgid "" "This will cause a redirect from ``saving-progress.html`` to ``save-" "progress.html``, old links will continue to work." msgstr "这将导致从 `saving-progress.html` 重定向到 `save-progress.html`,旧链接将继续工作。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:296 +#: ../../source/contributor-tutorial-contribute-on-github.rst:295 msgid "Apply changes in the index file" msgstr "应用索引文件中的更改" -#: ../../source/contributor-tutorial-contribute-on-github.rst:298 +#: ../../source/contributor-tutorial-contribute-on-github.rst:297 #, fuzzy msgid "" "For the lateral navigation bar to work properly, it is very important to " @@ -1869,49 +1884,50 @@ msgid "" "arborescence of the navbar." msgstr "要使横向导航栏正常工作,更新 `index.rst` 文件也非常重要。我们就是在这里定义整个导航栏的结构。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:301 +#: ../../source/contributor-tutorial-contribute-on-github.rst:300 #, fuzzy msgid "Find and modify the file name in ``index.rst``" msgstr "查找并修改 `index.rst` 中的文件名" -#: ../../source/contributor-tutorial-contribute-on-github.rst:304 +#: ../../source/contributor-tutorial-contribute-on-github.rst:303 msgid "Open PR" msgstr "开放式 PR" -#: ../../source/contributor-tutorial-contribute-on-github.rst:306 +#: ../../source/contributor-tutorial-contribute-on-github.rst:305 +#, fuzzy msgid "" -"Commit the changes (commit messages are always imperative: “Do " -"something”, in this case “Change …”)" +"Commit the changes (commit messages are always imperative: \"Do " +"something\", in this case \"Change …\")" msgstr "提交更改(提交信息总是命令式的:\"做某事\",这里是 \"更改......\")" -#: ../../source/contributor-tutorial-contribute-on-github.rst:307 +#: ../../source/contributor-tutorial-contribute-on-github.rst:306 msgid "Push the changes to your fork" msgstr "将更改推送到分叉" -#: ../../source/contributor-tutorial-contribute-on-github.rst:308 +#: ../../source/contributor-tutorial-contribute-on-github.rst:307 msgid "Open a PR (as shown above)" msgstr "打开 PR(如上图所示)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:309 +#: ../../source/contributor-tutorial-contribute-on-github.rst:308 msgid "Wait for it to be approved!" msgstr "等待审批!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:310 +#: ../../source/contributor-tutorial-contribute-on-github.rst:309 msgid "Congrats! 🥳 You're now officially a Flower contributor!" msgstr "祝贺你 🥳 您现在正式成为 \"Flower \"贡献者!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:314 +#: ../../source/contributor-tutorial-contribute-on-github.rst:313 msgid "How to write a good PR title" msgstr "如何撰写好的公关标题" -#: ../../source/contributor-tutorial-contribute-on-github.rst:316 +#: ../../source/contributor-tutorial-contribute-on-github.rst:315 msgid "" "A well-crafted PR title helps team members quickly understand the purpose" " and scope of the changes being proposed. Here's a guide to help you " "write a good GitHub PR title:" msgstr "一个精心撰写的公关标题能帮助团队成员迅速了解所提修改的目的和范围。以下指南可帮助您撰写一个好的 GitHub PR 标题:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:318 +#: ../../source/contributor-tutorial-contribute-on-github.rst:317 msgid "" "1. Be Clear and Concise: Provide a clear summary of the changes in a " "concise manner. 1. Use Actionable Verbs: Start with verbs like \"Add,\" " @@ -1924,63 +1940,63 @@ msgstr "" "\"等动词来表明目的。1. 包含相关信息: 提及受影响的功能或模块以了解上下文。1. 简短:避免冗长的标题,以方便阅读。1. " "使用正确的大小写和标点符号: 遵守语法规则,以确保清晰。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:324 +#: ../../source/contributor-tutorial-contribute-on-github.rst:323 msgid "" "Let's start with a few examples for titles that should be avoided because" " they do not provide meaningful information:" msgstr "让我们先举例说明几个应该避免使用的标题,因为它们不能提供有意义的信息:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:326 +#: ../../source/contributor-tutorial-contribute-on-github.rst:325 msgid "Implement Algorithm" msgstr "执行算法" -#: ../../source/contributor-tutorial-contribute-on-github.rst:327 +#: ../../source/contributor-tutorial-contribute-on-github.rst:326 msgid "Database" msgstr "数据库" -#: ../../source/contributor-tutorial-contribute-on-github.rst:328 +#: ../../source/contributor-tutorial-contribute-on-github.rst:327 msgid "Add my_new_file.py to codebase" msgstr "在代码库中添加 my_new_file.py" -#: ../../source/contributor-tutorial-contribute-on-github.rst:329 +#: ../../source/contributor-tutorial-contribute-on-github.rst:328 msgid "Improve code in module" msgstr "改进模块中的代码" -#: ../../source/contributor-tutorial-contribute-on-github.rst:330 +#: ../../source/contributor-tutorial-contribute-on-github.rst:329 msgid "Change SomeModule" msgstr "更改 SomeModule" -#: ../../source/contributor-tutorial-contribute-on-github.rst:332 +#: ../../source/contributor-tutorial-contribute-on-github.rst:331 msgid "" "Here are a few positive examples which provide helpful information " "without repeating how they do it, as that is already visible in the " "\"Files changed\" section of the PR:" msgstr "这里有几个正面的例子,提供了有用的信息,但没有重复他们是如何做的,因为在 PR 的 \"已更改文件 \"部分已经可以看到:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:334 +#: ../../source/contributor-tutorial-contribute-on-github.rst:333 msgid "Update docs banner to mention Flower Summit 2023" msgstr "更新文件横幅,提及 2023 年 Flower 峰会" -#: ../../source/contributor-tutorial-contribute-on-github.rst:335 +#: ../../source/contributor-tutorial-contribute-on-github.rst:334 msgid "Remove unnecessary XGBoost dependency" msgstr "移除不必要的 XGBoost 依赖性" -#: ../../source/contributor-tutorial-contribute-on-github.rst:336 +#: ../../source/contributor-tutorial-contribute-on-github.rst:335 msgid "Remove redundant attributes in strategies subclassing FedAvg" msgstr "删除 FedAvg 子类化策略中的多余属性" -#: ../../source/contributor-tutorial-contribute-on-github.rst:337 +#: ../../source/contributor-tutorial-contribute-on-github.rst:336 #, fuzzy msgid "Add CI job to deploy the staging system when the ``main`` branch changes" msgstr "添加 CI 作业,以便在 \"主 \"分支发生变化时部署暂存系统" -#: ../../source/contributor-tutorial-contribute-on-github.rst:338 +#: ../../source/contributor-tutorial-contribute-on-github.rst:337 msgid "" "Add new amazing library which will be used to improve the simulation " "engine" msgstr "添加新的惊人库,用于改进模拟引擎" -#: ../../source/contributor-tutorial-contribute-on-github.rst:342 +#: ../../source/contributor-tutorial-contribute-on-github.rst:341 #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:548 #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:946 #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:727 @@ -1989,153 +2005,154 @@ msgstr "添加新的惊人库,用于改进模拟引擎" msgid "Next steps" msgstr "接下来的步骤" -#: ../../source/contributor-tutorial-contribute-on-github.rst:344 +#: ../../source/contributor-tutorial-contribute-on-github.rst:343 msgid "" "Once you have made your first PR, and want to contribute more, be sure to" " check out the following :" msgstr "一旦您完成了第一份 PR,并希望做出更多贡献,请务必查看以下内容:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:346 +#: ../../source/contributor-tutorial-contribute-on-github.rst:345 +#, fuzzy msgid "" -"`Good first contributions `_, where you should particularly look " -"into the :code:`baselines` contributions." +":doc:`Good first contributions `, where you should particularly look into the " +":code:`baselines` contributions." msgstr "" "`优秀的首次贡献 `_,在这里你应该特别看看 :code:`baselines` 的贡献。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:350 +#: ../../source/contributor-tutorial-contribute-on-github.rst:349 #: ../../source/fed/0000-20200102-fed-template.md:60 msgid "Appendix" msgstr "附录" -#: ../../source/contributor-tutorial-contribute-on-github.rst:355 +#: ../../source/contributor-tutorial-contribute-on-github.rst:354 #, fuzzy msgid "Changelog entry" msgstr "更新日志" -#: ../../source/contributor-tutorial-contribute-on-github.rst:357 +#: ../../source/contributor-tutorial-contribute-on-github.rst:356 msgid "" "When opening a new PR, inside its description, there should be a " "``Changelog entry`` header." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:359 +#: ../../source/contributor-tutorial-contribute-on-github.rst:358 msgid "" "Above this header you should see the following comment that explains how " "to write your changelog entry:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:361 +#: ../../source/contributor-tutorial-contribute-on-github.rst:360 msgid "" "Inside the following 'Changelog entry' section, you should put the " "description of your changes that will be added to the changelog alongside" " your PR title." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:364 +#: ../../source/contributor-tutorial-contribute-on-github.rst:363 msgid "" -"If the section is completely empty (without any token) or non-existant, " +"If the section is completely empty (without any token) or non-existent, " "the changelog will just contain the title of the PR for the changelog " "entry, without any description." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:367 +#: ../../source/contributor-tutorial-contribute-on-github.rst:366 msgid "" "If the section contains some text other than tokens, it will use it to " "add a description to the change." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:369 +#: ../../source/contributor-tutorial-contribute-on-github.rst:368 msgid "" "If the section contains one of the following tokens it will ignore any " "other text and put the PR under the corresponding section of the " "changelog:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:371 +#: ../../source/contributor-tutorial-contribute-on-github.rst:370 msgid " is for classifying a PR as a general improvement." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:373 +#: ../../source/contributor-tutorial-contribute-on-github.rst:372 msgid " is to not add the PR to the changelog" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:375 +#: ../../source/contributor-tutorial-contribute-on-github.rst:374 msgid " is to add a general baselines change to the PR" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:377 +#: ../../source/contributor-tutorial-contribute-on-github.rst:376 msgid " is to add a general examples change to the PR" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:379 +#: ../../source/contributor-tutorial-contribute-on-github.rst:378 msgid " is to add a general sdk change to the PR" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:381 +#: ../../source/contributor-tutorial-contribute-on-github.rst:380 msgid " is to add a general simulations change to the PR" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:383 +#: ../../source/contributor-tutorial-contribute-on-github.rst:382 msgid "Note that only one token should be used." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:385 +#: ../../source/contributor-tutorial-contribute-on-github.rst:384 msgid "" "Its content must have a specific format. We will break down what each " "possibility does:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:387 +#: ../../source/contributor-tutorial-contribute-on-github.rst:386 msgid "" "If the ``### Changelog entry`` section contains nothing or doesn't exist," " the following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:391 +#: ../../source/contributor-tutorial-contribute-on-github.rst:390 msgid "" "If the ``### Changelog entry`` section contains a description (and no " "token), the following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:397 +#: ../../source/contributor-tutorial-contribute-on-github.rst:396 msgid "" "If the ``### Changelog entry`` section contains ````, nothing will " "change in the changelog." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:399 +#: ../../source/contributor-tutorial-contribute-on-github.rst:398 msgid "" "If the ``### Changelog entry`` section contains ````, the " "following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:403 +#: ../../source/contributor-tutorial-contribute-on-github.rst:402 msgid "" "If the ``### Changelog entry`` section contains ````, the " "following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:407 +#: ../../source/contributor-tutorial-contribute-on-github.rst:406 msgid "" "If the ``### Changelog entry`` section contains ````, the " "following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:411 +#: ../../source/contributor-tutorial-contribute-on-github.rst:410 msgid "" "If the ``### Changelog entry`` section contains ````, the following " "text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:415 +#: ../../source/contributor-tutorial-contribute-on-github.rst:414 msgid "" "If the ``### Changelog entry`` section contains ````, the " "following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:419 +#: ../../source/contributor-tutorial-contribute-on-github.rst:418 msgid "" "Note that only one token must be provided, otherwise, only the first " "action (in the order listed above), will be performed." @@ -2167,10 +2184,11 @@ msgid "(Optional) `pyenv-virtualenv ` msgstr "(可选) `pyenv-virtualenv `_" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:12 +#, fuzzy msgid "" "Flower uses :code:`pyproject.toml` to manage dependencies and configure " "development tools (the ones which support it). Poetry is a build tool " -"which supports `PEP 517 `_." +"which supports `PEP 517 `_." msgstr "" "Flower 使用 :code:`pyproject.toml` 来管理依赖关系和配置开发工具(支持它的)。Poetry 是一种支持 `PEP " "517 `_ 的构建工具。" @@ -2348,15 +2366,16 @@ msgid "Example: FedBN in PyTorch - From Centralized To Federated" msgstr "示例: PyTorch 中的 FedBN - 从集中式到联邦式" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:4 +#, fuzzy msgid "" "This tutorial will show you how to use Flower to build a federated " "version of an existing machine learning workload with `FedBN " "`_, a federated training strategy " "designed for non-iid data. We are using PyTorch to train a Convolutional " "Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " -"When applying FedBN, only few changes needed compared to `Example: " -"PyTorch - From Centralized To Federated `_." +"When applying FedBN, only few changes needed compared to :doc:`Example: " +"PyTorch - From Centralized To Federated `." msgstr "" "本教程将向您展示如何使用 Flower 为现有的机器学习框架构建一个联邦学习的版本,并使用 \"FedBN `_\"(一种针对非 iid 数据设计的联邦训练策略)。我们使用 PyTorch 在 CIFAR-10 " @@ -2370,11 +2389,12 @@ msgid "Centralized Training" msgstr "集中式训练" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:10 +#, fuzzy msgid "" -"All files are revised based on `Example: PyTorch - From Centralized To " -"Federated `_. The only thing to do is modifying the file called " -":code:`cifar.py`, revised part is shown below:" +"All files are revised based on :doc:`Example: PyTorch - From Centralized " +"To Federated `. The only " +"thing to do is modifying the file called :code:`cifar.py`, revised part " +"is shown below:" msgstr "" "所有文件均根据 `示例: PyTorch -从集中式到联邦式 `_。唯一要做的就是修改名为 :code:`cifar.py` " @@ -2392,11 +2412,12 @@ msgid "You can now run your machine learning workload:" msgstr "现在,您可以运行您的机器学习工作了:" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 +#, fuzzy msgid "" "So far this should all look fairly familiar if you've used PyTorch " "before. Let's take the next step and use what we've built to create a " -"federated learning system within FedBN, the sytstem consists of one " -"server and two clients." +"federated learning system within FedBN, the system consists of one server" +" and two clients." msgstr "" "到目前为止,如果您以前使用过 PyTorch,这一切看起来应该相当熟悉。让我们进行下一步,使用我们所构建的内容在 FedBN " "中创建一个联邦学习系统,该系统由一个服务器和两个客户端组成。" @@ -2407,14 +2428,14 @@ msgid "Federated Training" msgstr "联邦培训" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 +#, fuzzy msgid "" -"If you have read `Example: PyTorch - From Centralized To Federated " -"`_, the following parts are easy to follow, onyl " -":code:`get_parameters` and :code:`set_parameters` function in " -":code:`client.py` needed to revise. If not, please read the `Example: " -"PyTorch - From Centralized To Federated `_. first." +"If you have read :doc:`Example: PyTorch - From Centralized To Federated " +"`, the following parts are" +" easy to follow, only :code:`get_parameters` and :code:`set_parameters` " +"function in :code:`client.py` needed to revise. If not, please read the " +":doc:`Example: PyTorch - From Centralized To Federated `. first." msgstr "" "如果你读过 `示例: PyTorch - 从集中式到联邦式 `_,下面的部分就很容易理解了,只需要修改 " @@ -3004,8 +3025,8 @@ msgid "" "Implementing a Flower *client* basically means implementing a subclass of" " either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " "Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`MNISTClient`. :code:`NumPyClient` is slightly easier " -"to implement than :code:`Client` if you use a framework with good NumPy " +"we'll call it :code:`MNISTClient`. :code:`NumPyClient` is slightly easier" +" to implement than :code:`Client` if you use a framework with good NumPy " "interoperability (like PyTorch or MXNet) because it avoids some of the " "boilerplate that would otherwise be necessary. :code:`MNISTClient` needs " "to implement four methods, two methods for getting/setting model " @@ -3223,8 +3244,8 @@ msgid "" "Implementing a Flower *client* basically means implementing a subclass of" " either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " "Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier " -"to implement than :code:`Client` if you use a framework with good NumPy " +"we'll call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier" +" to implement than :code:`Client` if you use a framework with good NumPy " "interoperability (like PyTorch or TensorFlow/Keras) because it avoids " "some of the boilerplate that would otherwise be necessary. " ":code:`CifarClient` needs to implement four methods, two methods for " @@ -3289,653 +3310,301 @@ msgstr "" "federated>`_。当然,我们的示例有些过于简单,因为两个客户端都加载了完全相同的数据集,这并不真实。现在,您已经准备好进一步探讨这一主题了。比如在每个客户端使用不同的" " CIFAR-10 子集会如何?增加更多客户端会如何?" -#: ../../source/example-walkthrough-pytorch-mnist.rst:2 -msgid "Example: Walk-Through PyTorch & MNIST" -msgstr "实例: PyTorch 和 MNIST 的演练" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:4 -msgid "" -"In this tutorial we will learn, how to train a Convolutional Neural " -"Network on MNIST using Flower and PyTorch." -msgstr "在本教程中,我们将学习如何使用 Flower 和 PyTorch 在 MNIST 上训练卷积神经网络。" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:6 -#: ../../source/tutorial-quickstart-mxnet.rst:16 -#: ../../source/tutorial-quickstart-pytorch.rst:17 -#: ../../source/tutorial-quickstart-scikitlearn.rst:14 -msgid "" -"Our example consists of one *server* and two *clients* all having the " -"same model." -msgstr "我们的例子包括一个*服务器*和两个*客户端*,它们都有相同的模型。" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:8 -#: ../../source/tutorial-quickstart-pytorch.rst:19 -msgid "" -"*Clients* are responsible for generating individual weight-updates for " -"the model based on their local datasets. These updates are then sent to " -"the *server* which will aggregate them to produce a better model. " -"Finally, the *server* sends this improved version of the model back to " -"each *client*. A complete cycle of weight updates is called a *round*." -msgstr "*客户端*负责在其本地数据集上更新模型参数。然后,这些参数会被发送到*服务器*,由*服务器*聚合后生成一个更好的模型。最后,*服务器*将改进后的模型发送回每个*客户端*。一个完整的模型参数更新周期称为一*轮*。" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:12 -#: ../../source/tutorial-quickstart-pytorch.rst:23 -msgid "" -"Now that we have a rough idea of what is going on, let's get started. We " -"first need to install Flower. You can do this by running :" -msgstr "现在,我们已经有了一个大致的概念了,那就让我们开始吧。首先,我们需要安装 Flower。可以通过运行 :" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:18 -msgid "" -"Since we want to use PyTorch to solve a computer vision task, let's go " -"ahead an install PyTorch and the **torchvision** library:" -msgstr "我们想用 PyTorch 来做计算机视觉任务,需要先安装 PyTorch 和 **torchvision** 库:" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:26 -msgid "Ready... Set... Train!" -msgstr "准备...设置...训练!" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:28 -msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. Our training " -"procedure and network architecture are based on PyTorch's `Basic MNIST " -"Example `_. This " -"will allow you see how easy it is to wrap your code with Flower and begin" -" training in a federated way. We provide you with two helper scripts, " -"namely *run-server.sh*, and *run-clients.sh*. Don't be afraid to look " -"inside, they are simple enough =)." -msgstr "" -"现在我们已经安装了所有的依赖包,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。我们的训练过程和网络架构基于 PyTorch 的 " -"`Basic MNIST Example " -"`_。您会发现用 Flower " -"来封装您的代码并进行联邦学习训练是多么容易。我们为您提供了两个辅助脚本,即 *run-server.sh* 和 *run-" -"clients.sh*。别害怕,它们很简单 =)。" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:31 -msgid "" -"Go ahead and launch on a terminal the *run-server.sh* script first as " -"follows:" -msgstr "首先在终端上启动 *run-server.sh* 脚本,如下所示:" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:38 -msgid "Now that the server is up and running, go ahead and launch the clients." -msgstr "现在服务器已经启动并运行,请继续启动客户端。" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:45 -msgid "" -"Et voilà! You should be seeing the training procedure and, after a few " -"iterations, the test accuracy for each client." -msgstr "然后就可以了!您应该能看到训练过程,以及经过几次反复后,每个客户端的测试准确率。" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:66 -msgid "Now, let's see what is really happening inside." -msgstr "现在,让我们看看里面到底发生了什么。" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:69 -#: ../../source/tutorial-quickstart-ios.rst:129 -#: ../../source/tutorial-quickstart-mxnet.rst:226 -#: ../../source/tutorial-quickstart-pytorch.rst:203 -#: ../../source/tutorial-quickstart-scikitlearn.rst:157 -#: ../../source/tutorial-quickstart-tensorflow.rst:98 -#: ../../source/tutorial-quickstart-xgboost.rst:309 -msgid "Flower Server" -msgstr "Flower 服务器" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:71 -msgid "" -"Inside the server helper script *run-server.sh* you will find the " -"following code that basically runs the :code:`server.py`" -msgstr "在服务器辅助脚本 *run-server.sh* 中,你可以找到以下代码,这些代码基本上都是运行 :code:`server.py` 的代码" +#: ../../source/explanation-differential-privacy.rst:2 +#: ../../source/explanation-differential-privacy.rst:11 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 +msgid "Differential Privacy" +msgstr "差分隐私" -#: ../../source/example-walkthrough-pytorch-mnist.rst:78 +#: ../../source/explanation-differential-privacy.rst:3 msgid "" -"We can go a bit deeper and see that :code:`server.py` simply launches a " -"server that will coordinate three rounds of training. Flower Servers are " -"very customizable, but for simple workloads, we can start a server using " -"the :ref:`start_server ` function and " -"leave all the configuration possibilities at their default values, as " -"seen below." +"The information in datasets like healthcare, financial transactions, user" +" preferences, etc., is valuable and has the potential for scientific " +"breakthroughs and provides important business insights. However, such " +"data is also sensitive and there is a risk of compromising individual " +"privacy." msgstr "" -"我们可以再深入一点,:code:`server.py` 只是启动了一个服务器,该服务器将协调三轮训练。Flower " -"服务器是非常容易修改的,但对于简单的工作,我们可以使用 :ref:`start_server `函数启动服务器,并将所有可能的配置保留为默认值,如下所示。" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:89 -#: ../../source/tutorial-quickstart-ios.rst:34 -#: ../../source/tutorial-quickstart-mxnet.rst:36 -#: ../../source/tutorial-quickstart-pytorch.rst:37 -#: ../../source/tutorial-quickstart-scikitlearn.rst:40 -#: ../../source/tutorial-quickstart-tensorflow.rst:29 -#: ../../source/tutorial-quickstart-xgboost.rst:55 -msgid "Flower Client" -msgstr "Flower 客户端" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:91 -msgid "" -"Next, let's take a look at the *run-clients.sh* file. You will see that " -"it contains the main loop that starts a set of *clients*." -msgstr "接下来,让我们看看 *run-clients.sh* 文件。您会看到它包含了用来启动多个 *客户端* 的代码。" -#: ../../source/example-walkthrough-pytorch-mnist.rst:100 +#: ../../source/explanation-differential-privacy.rst:6 msgid "" -"**cid**: is the client ID. It is an integer that uniquely identifies " -"client identifier." -msgstr "**cid**:是客户 ID。它是一个整数,可唯一标识客户标识符。" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:101 -msgid "**sever_address**: String that identifies IP and port of the server." -msgstr "**sever_address**: 标识服务器 IP 和端口的字符串。" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:102 -msgid "" -"**nb_clients**: This defines the number of clients being created. This " -"piece of information is not required by the client, but it helps us " -"partition the original MNIST dataset to make sure that every client is " -"working on unique subsets of both *training* and *test* sets." +"Traditional methods like anonymization alone would not work because of " +"attacks like Re-identification and Data Linkage. That's where " +"differential privacy comes in. It provides the possibility of analyzing " +"data while ensuring the privacy of individuals." msgstr "" -"**nb_clients**: 这定义了正在创建的客户端数量。客户端并不需要这一信息,但它有助于我们对原始 MNIST " -"数据集进行划分,以确保每个客户端都在 *training* 和 *test* 数据集上有独立的数据。" -#: ../../source/example-walkthrough-pytorch-mnist.rst:104 +#: ../../source/explanation-differential-privacy.rst:12 msgid "" -"Again, we can go deeper and look inside :code:`flwr_example/quickstart-" -"pytorch/client.py`. After going through the argument parsing code at the " -"beginning of our :code:`main` function, you will find a call to " -":code:`mnist.load_data`. This function is responsible for partitioning " -"the original MNIST datasets (*training* and *test*) and returning a " -":code:`torch.utils.data.DataLoader` s for each of them. We then " -"instantiate a :code:`PytorchMNISTClient` object with our client ID, our " -"DataLoaders, the number of epochs in each round, and which device we want" -" to use for training (CPU or GPU)." +"Imagine two datasets that are identical except for a single record (for " +"instance, Alice's data). Differential Privacy (DP) guarantees that any " +"analysis (M), like calculating the average income, will produce nearly " +"identical results for both datasets (O and O' would be similar). This " +"preserves group patterns while obscuring individual details, ensuring the" +" individual's information remains hidden in the crowd." msgstr "" -"我们可以深入看一下 :code:`flwr_example/quickstart-pytorch/client.py`。查看 " -":code:`main` 函数开头的参数解析代码后,你会发现一个对 :code:`mnist.load_data` 的调用。该函数负责分割原始 " -"MNIST 数据集(*training* 和 *test*),并为每个数据集返回一个 " -":code:`torch.utils.data.DataLoader` 。然后,我们实例化一个 " -":code:`PytorchMNISTClient` 对象,其中包含我们的客户端 ID、 " -"DataLoader、每一轮中的遍历数,以及我们希望用于训练的设备(CPU 或 GPU)。" -#: ../../source/example-walkthrough-pytorch-mnist.rst:119 -msgid "" -"The :code:`PytorchMNISTClient` object when finally passed to " -":code:`fl.client.start_client` along with the server's address as the " -"training process begins." +#: ../../source/explanation-differential-privacy.rst:-1 +msgid "DP Intro" msgstr "" -"当训练过程开始时,:code:`PytorchMNISTClient` 对象会连同服务器地址一起传递给 " -":code:`fl.client.start_client`。" -#: ../../source/example-walkthrough-pytorch-mnist.rst:123 -msgid "A Closer Look" -msgstr "仔细看一下" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:125 +#: ../../source/explanation-differential-privacy.rst:22 msgid "" -"Now, let's look closely into the :code:`PytorchMNISTClient` inside " -":code:`flwr_example.quickstart-pytorch.mnist` and see what it is doing:" +"One of the most commonly used mechanisms to achieve DP is adding enough " +"noise to the output of the analysis to mask the contribution of each " +"individual in the data while preserving the overall accuracy of the " +"analysis." msgstr "" -"现在,让我们仔细研究一下 :code:`flwr_example.quickstart-pytorch.mnist` 中的 " -":code:`PytorchMNISTClient`,看看它在做什么:" -#: ../../source/example-walkthrough-pytorch-mnist.rst:226 -msgid "" -"The first thing to notice is that :code:`PytorchMNISTClient` instantiates" -" a CNN model inside its constructor" -msgstr "首先要注意的是 :code:`PytorchMNISTClient` 在其构造函数中实例化了一个 CNN 模型" +#: ../../source/explanation-differential-privacy.rst:25 +#, fuzzy +msgid "Formal Definition" +msgstr "编译 ProtoBuf 定义" -#: ../../source/example-walkthrough-pytorch-mnist.rst:244 +#: ../../source/explanation-differential-privacy.rst:26 msgid "" -"The code for the CNN is available under :code:`quickstart-pytorch.mnist` " -"and it is reproduced below. It is the same network found in `Basic MNIST " -"Example `_." +"Differential Privacy (DP) provides statistical guarantees against the " +"information an adversary can infer through the output of a randomized " +"algorithm. It provides an unconditional upper bound on the influence of a" +" single individual on the output of the algorithm by adding noise [1]. A " +"randomized mechanism M provides (:math:`\\epsilon`, " +":math:`\\delta`)-differential privacy if for any two neighboring " +"databases, D :sub:`1` and D :sub:`2`, that differ in only a single " +"record, and for all possible outputs S ⊆ Range(A):" msgstr "" -"CNN 的代码可在 :code:`quickstart-pytorch.mnist` 下找到,现复制如下。它与 `Basic MNIST " -"Example `_中的网络相同。" -#: ../../source/example-walkthrough-pytorch-mnist.rst:290 +#: ../../source/explanation-differential-privacy.rst:32 msgid "" -"The second thing to notice is that :code:`PytorchMNISTClient` class " -"inherits from the :code:`fl.client.Client`, and hence it must implement " -"the following methods:" +"\\small\n" +"P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" msgstr "" -"第二件要注意的事是 :code:`PytorchMNISTClient` 类继承自 " -":code:`fl.client.Client`,因此它必须实现以下方法:" -#: ../../source/example-walkthrough-pytorch-mnist.rst:315 +#: ../../source/explanation-differential-privacy.rst:38 msgid "" -"When comparing the abstract class to its derived class " -":code:`PytorchMNISTClient` you will notice that :code:`fit` calls a " -":code:`train` function and that :code:`evaluate` calls a :code:`test`: " -"function." +"The :math:`\\epsilon` parameter, also known as the privacy budget, is a " +"metric of privacy loss. It also controls the privacy-utility trade-off; " +"lower :math:`\\epsilon` values indicate higher levels of privacy but are " +"likely to reduce utility as well. The :math:`\\delta` parameter accounts " +"for a small probability on which the upper bound :math:`\\epsilon` does " +"not hold. The amount of noise needed to achieve differential privacy is " +"proportional to the sensitivity of the output, which measures the maximum" +" change in the output due to the inclusion or removal of a single record." msgstr "" -"将抽象类与其派生类 :code:`PytorchMNISTClient` 进行比较时,您会发现 :code:`fit` 调用了一个 " -":code:`train` 函数,而 :code:`evaluate` 则调用了一个 :code:`test`: 函数。" -#: ../../source/example-walkthrough-pytorch-mnist.rst:317 -msgid "" -"These functions can both be found inside the same :code:`quickstart-" -"pytorch.mnist` module:" -msgstr "这些函数都可以在同一个 :code:`quickstart-pytorch.mnist` 模块中找到:" +#: ../../source/explanation-differential-privacy.rst:45 +#, fuzzy +msgid "Differential Privacy in Machine Learning" +msgstr "差分隐私" -#: ../../source/example-walkthrough-pytorch-mnist.rst:437 +#: ../../source/explanation-differential-privacy.rst:46 msgid "" -"Observe that these functions encapsulate regular training and test loops " -"and provide :code:`fit` and :code:`evaluate` with final statistics for " -"each round. You could substitute them with your custom train and test " -"loops and change the network architecture, and the entire example would " -"still work flawlessly. As a matter of fact, why not try and modify the " -"code to an example of your liking?" +"DP can be utilized in machine learning to preserve the privacy of the " +"training data. Differentially private machine learning algorithms are " +"designed in a way to prevent the algorithm to learn any specific " +"information about any individual data points and subsequently prevent the" +" model from revealing sensitive information. Depending on the stage at " +"which noise is introduced, various methods exist for applying DP to " +"machine learning algorithms. One approach involves adding noise to the " +"training data (either to the features or labels), while another method " +"entails injecting noise into the gradients of the loss function during " +"model training. Additionally, such noise can be incorporated into the " +"model's output." msgstr "" -"请注意,这些函数封装了常规的训练和测试循环,并为 :code:`fit` 和 :code:`evaluate` " -"提供了每轮的最终统计数据。您可以用自定义的训练和测试循环来替代它们,并改变网络结构,整个示例仍然可以完美运行。事实上,为什么不按照自己的喜好修改代码呢?" -#: ../../source/example-walkthrough-pytorch-mnist.rst:444 -msgid "Give It a Try" -msgstr "试试看" +#: ../../source/explanation-differential-privacy.rst:53 +#, fuzzy +msgid "Differential Privacy in Federated Learning" +msgstr "扩大联邦学习的规模" -#: ../../source/example-walkthrough-pytorch-mnist.rst:445 +#: ../../source/explanation-differential-privacy.rst:54 msgid "" -"Looking through the quickstart code description above will have given a " -"good understanding of how *clients* and *servers* work in Flower, how to " -"run a simple experiment, and the internals of a client wrapper. Here are " -"a few things you could try on your own and get more experience with " -"Flower:" +"Federated learning is a data minimization approach that allows multiple " +"parties to collaboratively train a model without sharing their raw data. " +"However, federated learning also introduces new privacy challenges. The " +"model updates between parties and the central server can leak information" +" about the local data. These leaks can be exploited by attacks such as " +"membership inference and property inference attacks, or model inversion " +"attacks." msgstr "" -"通过上面的快速入门代码描述,你将对 Flower " -"中*客户端*和*服务器*的工作方式、如何运行一个简单的实验以及客户端封装器的内部结构有一个很好的了解。您可以自己尝试以下内容,以获得更多使用 " -"Flower 的经验:" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:448 -msgid "" -"Try and change :code:`PytorchMNISTClient` so it can accept different " -"architectures." -msgstr "尝试修改 :code:`PytorchMNISTClient`,使其可以接受不同的架构。" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:449 -msgid "Modify the :code:`train` function so that it accepts different optimizers" -msgstr "修改 :code:`train` 函数,使其接受不同的优化器" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:450 -msgid "" -"Modify the :code:`test` function so that it proves not only the top-1 " -"(regular accuracy) but also the top-5 accuracy?" -msgstr "修改 :code:`test` 函数,使其不仅能输出前 1 名(常规精确度),还能证明前 5 名的精确度?" -#: ../../source/example-walkthrough-pytorch-mnist.rst:451 -msgid "" -"Go larger! Try to adapt the code to larger images and datasets. Why not " -"try training on ImageNet with a ResNet-50?" -msgstr "让我们尝试让代码适应更大的图像和数据集。为什么不尝试使用 ResNet-50 在 ImageNet 上进行训练呢?" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:453 -msgid "You are ready now. Enjoy learning in a federated way!" -msgstr "您现在已经准备就绪。尽情享受联邦学习的乐趣吧!" - -#: ../../source/explanation-differential-privacy.rst:2 -msgid "Differential privacy" -msgstr "差别隐私" - -#: ../../source/explanation-differential-privacy.rst:4 +#: ../../source/explanation-differential-privacy.rst:58 msgid "" -"Flower provides differential privacy (DP) wrapper classes for the easy " -"integration of the central DP guarantees provided by DP-FedAvg into " -"training pipelines defined in any of the various ML frameworks that " -"Flower is compatible with." +"DP can play a crucial role in federated learning to provide privacy for " +"the clients' data." msgstr "" -"Flower 提供了差分隐私 (DP) 封装类,可将 DP-FedAvg 提供的核心 DP 轻松集成到 Flower 兼容的各种 ML " -"框架中定义的训练模式中。" - -#: ../../source/explanation-differential-privacy.rst:7 -#, fuzzy -msgid "" -"Please note that these components are still experimental; the correct " -"configuration of DP for a specific task is still an unsolved problem." -msgstr "请注意,这些组件仍处于试验阶段,如何为特定任务正确配置 DP 仍是一个尚未解决的问题。" - -#: ../../source/explanation-differential-privacy.rst:10 -msgid "" -"The name DP-FedAvg is misleading since it can be applied on top of any FL" -" algorithm that conforms to the general structure prescribed by the " -"FedOpt family of algorithms." -msgstr "DP-FedAvg 这个名称容易引起误解,因为它可以应用于任何符合 FedOpt 系列算法规定的一般结构的 FL 算法之上。" - -#: ../../source/explanation-differential-privacy.rst:13 -msgid "DP-FedAvg" -msgstr "DP-FedAvg" -#: ../../source/explanation-differential-privacy.rst:15 +#: ../../source/explanation-differential-privacy.rst:60 msgid "" -"DP-FedAvg, originally proposed by McMahan et al. [mcmahan]_ and extended " -"by Andrew et al. [andrew]_, is essentially FedAvg with the following " -"modifications." -msgstr "DP-FedAvg 最初由McMahan等人提出,并由Andrew等人加以扩展。" +"Depending on the granularity of privacy provision or the location of " +"noise addition, different forms of DP exist in federated learning. In " +"this explainer, we focus on two approaches of DP utilization in federated" +" learning based on where the noise is added: at the server (also known as" +" the center) or at the client (also known as the local)." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:17 +#: ../../source/explanation-differential-privacy.rst:63 msgid "" -"**Clipping** : The influence of each client's update is bounded by " -"clipping it. This is achieved by enforcing a cap on the L2 norm of the " -"update, scaling it down if needed." -msgstr "**裁剪** : 裁剪会影响到每个客户端的模型参数。具体做法是对参数的 L2 准则设置上限,必要时将其缩减。" +"**Central Differential Privacy**: DP is applied by the server and the " +"goal is to prevent the aggregated model from leaking information about " +"each client's data." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:18 +#: ../../source/explanation-differential-privacy.rst:65 msgid "" -"**Noising** : Gaussian noise, calibrated to the clipping threshold, is " -"added to the average computed at the server." -msgstr "**噪声** : 在服务器计算出的平均值中加入高斯噪声,该噪声根据剪切阈值进行校准。" +"**Local Differential Privacy**: DP is applied on the client side before " +"sending any information to the server and the goal is to prevent the " +"updates that are sent to the server from leaking any information about " +"the client's data." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:20 +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:68 +#: ../../source/how-to-use-differential-privacy.rst:11 #, fuzzy -msgid "" -"The distribution of the update norm has been shown to vary from task-to-" -"task and to evolve as training progresses. This variability is crucial in" -" understanding its impact on differential privacy guarantees, emphasizing" -" the need for an adaptive approach [andrew]_ that continuously adjusts " -"the clipping threshold to track a prespecified quantile of the update " -"norm distribution." -msgstr "事实证明,参数更新准则的分布会随着任务的不同而变化,并随着训练的进展而演变。因此,我们采用了一种自适应方法,该方法会不断调整剪切阈值,以跟踪参数更新准则分布的预设量化值。" - -#: ../../source/explanation-differential-privacy.rst:23 -msgid "Simplifying Assumptions" -msgstr "简化假设" +msgid "Central Differential Privacy" +msgstr "差分隐私" -#: ../../source/explanation-differential-privacy.rst:25 -#, fuzzy +#: ../../source/explanation-differential-privacy.rst:69 msgid "" -"We make (and attempt to enforce) a number of assumptions that must be " -"satisfied to ensure that the training process actually realizes the " -":math:`(\\epsilon, \\delta)` guarantees the user has in mind when " -"configuring the setup." +"In this approach, which is also known as user-level DP, the central " +"server is responsible for adding noise to the globally aggregated " +"parameters. It should be noted that trust in the server is required." msgstr "" -"我们提出(并试图执行)了一系列必须满足的假设,以确保训练过程真正实现用户在配置设置时所定的 :math:`(\\epsilon,\\delta)`" -" 。" - -#: ../../source/explanation-differential-privacy.rst:27 -msgid "" -"**Fixed-size subsampling** :Fixed-size subsamples of the clients must be " -"taken at each round, as opposed to variable-sized Poisson subsamples." -msgstr "** 固定大小的子样本** :与可变大小的泊松分布子样本相比,每轮必须抽取固定大小的客户端子样本。" - -#: ../../source/explanation-differential-privacy.rst:28 -msgid "" -"**Unweighted averaging** : The contributions from all the clients must " -"weighted equally in the aggregate to eliminate the requirement for the " -"server to know in advance the sum of the weights of all clients available" -" for selection." -msgstr "**非加权平均**: 所有客户端的贡献必须加权相等,这样服务器就不需要事先知道所有客户的权重总和。" -#: ../../source/explanation-differential-privacy.rst:29 +#: ../../source/explanation-differential-privacy.rst:76 msgid "" -"**No client failures** : The set of available clients must stay constant " -"across all rounds of training. In other words, clients cannot drop out or" -" fail." -msgstr "**没有失败的客户端** : 在各轮训练中,可用客户端的数量必须保持不变。换句话说,客户端不能退出或失败。" +"While there are various ways to implement central DP in federated " +"learning, we concentrate on the algorithms proposed by [2] and [3]. The " +"overall approach is to clip the model updates sent by the clients and add" +" some amount of noise to the aggregated model. In each iteration, a " +"random set of clients is chosen with a specific probability for training." +" Each client performs local training on its own data. The update of each " +"client is then clipped by some value `S` (sensitivity `S`). This would " +"limit the impact of any individual client which is crucial for privacy " +"and often beneficial for robustness. A common approach to achieve this is" +" by restricting the `L2` norm of the clients' model updates, ensuring " +"that larger updates are scaled down to fit within the norm `S`." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:31 -#, fuzzy -msgid "" -"The first two are useful for eliminating a multitude of complications " -"associated with calibrating the noise to the clipping threshold, while " -"the third one is required to comply with the assumptions of the privacy " -"analysis." -msgstr "前两种方法有助于消除将噪声校准为削波阈值所带来的诸多复杂问题,而第三种方法则需要符合隐私分析的假设。" +#: ../../source/explanation-differential-privacy.rst:-1 +msgid "clipping" +msgstr "" -#: ../../source/explanation-differential-privacy.rst:34 +#: ../../source/explanation-differential-privacy.rst:89 msgid "" -"These restrictions are in line with constraints imposed by Andrew et al. " -"[andrew]_." -msgstr "这些限制与 Andrew 等人所施加的限制一致。" - -#: ../../source/explanation-differential-privacy.rst:37 -msgid "Customizable Responsibility for Noise injection" -msgstr "可定制的噪声注入" +"Afterwards, the Gaussian mechanism is used to add noise in order to " +"distort the sum of all clients' updates. The amount of noise is scaled to" +" the sensitivity value to obtain a privacy guarantee. The Gaussian " +"mechanism is used with a noise sampled from `N (0, σ²)` where `σ = ( " +"noise_scale * S ) / (number of sampled clients)`." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:38 -msgid "" -"In contrast to other implementations where the addition of noise is " -"performed at the server, you can configure the site of noise injection to" -" better match your threat model. We provide users with the flexibility to" -" set up the training such that each client independently adds a small " -"amount of noise to the clipped update, with the result that simply " -"aggregating the noisy updates is equivalent to the explicit addition of " -"noise to the non-noisy aggregate at the server." -msgstr "与其他在服务器上添加噪声的实现方法不同,您可以配置噪声注入的位置,以便更好地匹配您的威胁模型。我们为用户提供了设置训练的灵活性,使每个客户端都能独立地为剪切参数更新添加少量噪声,这样,只需聚合噪声更新,就相当于在服务器上为非噪声聚合添加噪声了。" +#: ../../source/explanation-differential-privacy.rst:94 +msgid "Clipping" +msgstr "" -#: ../../source/explanation-differential-privacy.rst:41 +#: ../../source/explanation-differential-privacy.rst:96 msgid "" -"To be precise, if we let :math:`m` be the number of clients sampled each " -"round and :math:`\\sigma_\\Delta` be the scale of the total Gaussian " -"noise that needs to be added to the sum of the model updates, we can use " -"simple maths to show that this is equivalent to each client adding noise " -"with scale :math:`\\sigma_\\Delta/\\sqrt{m}`." +"There are two forms of clipping commonly used in Central DP: Fixed " +"Clipping and Adaptive Clipping." msgstr "" -"准确地说,我们假设每轮采样的客户端数量为:math:`m`,:math:`\\sigma_\\Delta` " -"为需要添加到模型更新总和中的总高斯噪声的规模,我们就可以用简单的数学方法证明了,这相当于每个客户端都添加了规模为 " -":math:`\\sigma_\\Delta/\\sqrt{m}` 的噪声。" - -#: ../../source/explanation-differential-privacy.rst:44 -msgid "Wrapper-based approach" -msgstr "基于封装的方法" -#: ../../source/explanation-differential-privacy.rst:46 +#: ../../source/explanation-differential-privacy.rst:98 msgid "" -"Introducing DP to an existing workload can be thought of as adding an " -"extra layer of security around it. This inspired us to provide the " -"additional server and client-side logic needed to make the training " -"process differentially private as wrappers for instances of the " -":code:`Strategy` and :code:`NumPyClient` abstract classes respectively. " -"This wrapper-based approach has the advantage of being easily composable " -"with other wrappers that someone might contribute to the Flower library " -"in the future, e.g., for secure aggregation. Using Inheritance instead " -"can be tedious because that would require the creation of new sub- " -"classes every time a new class implementing :code:`Strategy` or " -":code:`NumPyClient` is defined." -msgstr "" -"在现有工作负载中引入 DP 可以被认为是在其周围增加了一层额外的安全性。受此启发,我们提供了额外的服务器端和客户端逻辑,分别作为 " -":code:`Strategy` 和 :code:`NumPyClient` " -"抽象类实例的封装器,使训练过程具有不同的隐私性。这种基于封装器的方法的优点是可以很容易地与将来有人贡献给 Flower " -"的其他封装器(例如用于安全聚合的封装器)进行组合。使用继承可能会比较繁琐,因为每次定义实现 :code:`Strategy` 或 " -":code:`NumPyClient` 的新类时,都需要创建新的子类。" - -#: ../../source/explanation-differential-privacy.rst:49 -msgid "Server-side logic" -msgstr "服务器端逻辑" +"**Fixed Clipping** : A predefined fix threshold is set for the magnitude " +"of clients' updates. Any update exceeding this threshold is clipped back " +"to the threshold value." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:51 -#, fuzzy +#: ../../source/explanation-differential-privacy.rst:100 msgid "" -"The first version of our solution was to define a decorator whose " -"constructor accepted, among other things, a boolean-valued variable " -"indicating whether adaptive clipping was to be enabled or not. We quickly" -" realized that this would clutter its :code:`__init__()` function with " -"variables corresponding to hyperparameters of adaptive clipping that " -"would remain unused when it was disabled. A cleaner implementation could " -"be achieved by splitting the functionality into two decorators, " -":code:`DPFedAvgFixed` and :code:`DPFedAvgAdaptive`, with the latter sub- " -"classing the former. The constructors for both classes accept a boolean " -"parameter :code:`server_side_noising`, which, as the name suggests, " -"determines where noising is to be performed." +"**Adaptive Clipping** : The clipping threshold dynamically adjusts based " +"on the observed update distribution [4]. It means that the clipping value" +" is tuned during the rounds with respect to the quantile of the update " +"norm distribution." msgstr "" -"我们的第一版解决方案是定义一个装饰器,其构造函数接受一个布尔值变量,表示是否启用自适应剪裁。我们很快意识到,这样会使其 " -":code:`__init__()` " -"函数中与自适应裁剪超参数相对应的变量变得杂乱无章,而这些变量在自适应裁剪被禁用时将保持未使用状态。要实现更简洁的功能,可以将该功能拆分为两个装饰器,即" -" :code:`DPFedAvgFixed` 和 " -":code:`DPFedAvgAdaptive`,后者是前者的子类。这两个类的构造函数都接受一个布尔参数 " -":code:`server_side_noising`,顾名思义,它决定了在哪里加噪声。" - -#: ../../source/explanation-differential-privacy.rst:54 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 -msgid "DPFedAvgFixed" -msgstr "DPFedAvgFixed" -#: ../../source/explanation-differential-privacy.rst:56 +#: ../../source/explanation-differential-privacy.rst:102 msgid "" -"The server-side capabilities required for the original version of DP-" -"FedAvg, i.e., the one which performed fixed clipping, can be completely " -"captured with the help of wrapper logic for just the following two " -"methods of the :code:`Strategy` abstract class." +"The choice between fixed and adaptive clipping depends on various factors" +" such as privacy requirements, data distribution, model complexity, and " +"others." msgstr "" -"只需对 :code:`Strategy` 抽象类的以下两个方法进行封装,就能完全捕获 DP-FedAvg " -"原始版本(即执行固定剪裁的版本)所需的服务器端功能。" -#: ../../source/explanation-differential-privacy.rst:58 -msgid "" -":code:`configure_fit()` : The config dictionary being sent by the wrapped" -" :code:`Strategy` to each client needs to be augmented with an additional" -" value equal to the clipping threshold (keyed under " -":code:`dpfedavg_clip_norm`) and, if :code:`server_side_noising=true`, " -"another one equal to the scale of the Gaussian noise that needs to be " -"added at the client (keyed under :code:`dpfedavg_noise_stddev`). This " -"entails *post*-processing of the results returned by the wrappee's " -"implementation of :code:`configure_fit()`." -msgstr "" -":code:`configure_fit()` :由封装的 :code:`Strategy` " -"发送到每个客户端的配置字典需要使用等于裁剪阈值的附加值(在 :code:`dpfedavg_clip_norm` 下键入)进行扩充。并且,如果 " -"server_side_noising=true,则另一个值等于需要在客户端添加的高斯噪声的大小(在 dpfedavg_noise_stddev " -"下键入)。这需要对封装后的configure_fit() 所返回的结果进行后处理。" - -#: ../../source/explanation-differential-privacy.rst:59 -#, fuzzy -msgid "" -":code:`aggregate_fit()`: We check whether any of the sampled clients " -"dropped out or failed to upload an update before the round timed out. In " -"that case, we need to abort the current round, discarding any successful " -"updates that were received, and move on to the next one. On the other " -"hand, if all clients responded successfully, we must force the averaging " -"of the updates to happen in an unweighted manner by intercepting the " -":code:`parameters` field of :code:`FitRes` for each received update and " -"setting it to 1. Furthermore, if :code:`server_side_noising=true`, each " -"update is perturbed with an amount of noise equal to what it would have " -"been subjected to had client-side noising being enabled. This entails " -"*pre*-processing of the arguments to this method before passing them on " -"to the wrappee's implementation of :code:`aggregate_fit()`." -msgstr "" -":code:`aggregate_fit()`: " -"我们会检查是否有任何客户端在本轮超时前退出或未能上传参数更新。在这种情况下,我们需要中止当前一轮,丢弃已收到的所有参数更新,然后继续下一轮。另一方面,如果所有客户端都成功响应,我们就必须通过拦截" -" :code:`FitRes` 的 :code:`parameters` 字段并将其设置为 1,强制以不加权的方式平均更新。此外,如果 " -":code:`server_side_noising=true`,每次更新都会受到一定量的噪声扰动,其扰动量相当于启用客户端噪声时的扰动量。 " -"这就需要在将本方法的参数传递给封装的 :code:`aggregate_fit()` 之前,对参数进行*预*处理。" - -#: ../../source/explanation-differential-privacy.rst:62 -msgid "" -"We can't directly change the aggregation function of the wrapped strategy" -" to force it to add noise to the aggregate, hence we simulate client-side" -" noising to implement server-side noising." -msgstr "我们无法直接改变封装策略的聚合函数,迫使它在聚合中添加噪声,因此我们模拟客户端噪声来实现服务器端噪声。" - -#: ../../source/explanation-differential-privacy.rst:64 -msgid "" -"These changes have been put together into a class called " -":code:`DPFedAvgFixed`, whose constructor accepts the strategy being " -"decorated, the clipping threshold and the number of clients sampled every" -" round as compulsory arguments. The user is expected to specify the " -"clipping threshold since the order of magnitude of the update norms is " -"highly dependent on the model being trained and providing a default value" -" would be misleading. The number of clients sampled at every round is " -"required to calculate the amount of noise that must be added to each " -"individual update, either by the server or the clients." -msgstr "" -"这些变化被整合到一个名为 :code:`DPFedAvgFixed` " -"的类中,其构造函数接受被装饰的策略、剪切阈值和每轮采样的客户数作为必选参数。用户需要指定剪切阈值,因为参数更新规范的数量级在很大程度上取决于正在训练的模型,提供默认值会产生误导。每轮采样的客户端数量是计算服务器或客户在每次参数更新时添加的噪音量所必需的。" - -#: ../../source/explanation-differential-privacy.rst:67 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 -msgid "DPFedAvgAdaptive" -msgstr "DPFedAvgAdaptive" +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:105 +#: ../../source/how-to-use-differential-privacy.rst:96 +#, fuzzy +msgid "Local Differential Privacy" +msgstr "差分隐私" -#: ../../source/explanation-differential-privacy.rst:69 +#: ../../source/explanation-differential-privacy.rst:107 msgid "" -"The additional functionality required to facilitate adaptive clipping has" -" been provided in :code:`DPFedAvgAdaptive`, a subclass of " -":code:`DPFedAvgFixed`. It overrides the above-mentioned methods to do the" -" following." +"In this approach, each client is responsible for performing DP. Local DP " +"avoids the need for a fully trusted aggregator, but it should be noted " +"that local DP leads to a decrease in accuracy but better privacy in " +"comparison to central DP." msgstr "" -"自适应剪裁所需的附加功能在 :code:`DPFedAvgAdaptive` 中提供,其是 :code:`DPFedAvgFixed` " -"的子类。它重写了上述方法,以实现以下功能。" -#: ../../source/explanation-differential-privacy.rst:71 -msgid "" -":code:`configure_fit()` : It intercepts the config dict returned by " -":code:`super.configure_fit()` to add the key-value pair " -":code:`dpfedavg_adaptive_clip_enabled:True` to it, which the client " -"interprets as an instruction to include an indicator bit (1 if update " -"norm <= clipping threshold, 0 otherwise) in the results returned by it." +#: ../../source/explanation-differential-privacy.rst:116 +msgid "In this explainer, we focus on two forms of achieving Local DP:" msgstr "" -":code:`configure_fit()`:它截取由 :code:`super.configure_fit()` 返回的 config " -"字典,并在其中添加键-值对 " -":code:`dpfedavg_adaptive_clip_enabled:True\",客户端将其解释为在返回结果中包含一个指示位(如果参数更新范式" -" <= 剪裁阈值,则为 1,否则为 0)的指令。" -#: ../../source/explanation-differential-privacy.rst:73 +#: ../../source/explanation-differential-privacy.rst:118 msgid "" -":code:`aggregate_fit()` : It follows a call to " -":code:`super.aggregate_fit()` with one to :code:`__update_clip_norm__()`," -" a procedure which adjusts the clipping threshold on the basis of the " -"indicator bits received from the sampled clients." -msgstr ":code:`aggregate_fit()`:在调用:code:`super.aggregate_fit()`后,再调用:code:`__update_clip_norm__()`,该过程根据从采样客户端接收到的指示位调整裁剪阈值。" - -#: ../../source/explanation-differential-privacy.rst:77 -msgid "Client-side logic" -msgstr "客户端逻辑" +"Each client adds noise to the local updates before sending them to the " +"server. To achieve (:math:`\\epsilon`, :math:`\\delta`)-DP, considering " +"the sensitivity of the local model to be ∆, Gaussian noise is applied " +"with a noise scale of σ where:" +msgstr "" -#: ../../source/explanation-differential-privacy.rst:79 +#: ../../source/explanation-differential-privacy.rst:120 msgid "" -"The client-side capabilities required can be completely captured through " -"wrapper logic for just the :code:`fit()` method of the " -":code:`NumPyClient` abstract class. To be precise, we need to *post-" -"process* the update computed by the wrapped client to clip it, if " -"necessary, to the threshold value supplied by the server as part of the " -"config dictionary. In addition to this, it may need to perform some extra" -" work if either (or both) of the following keys are also present in the " -"dict." +"\\small\n" +"\\frac{∆ \\times \\sqrt{2 \\times " +"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" +"\n" msgstr "" -"客户端所需的功能完全可以通过 :code:`NumPyClient` 抽象类的 :code:`fit()` " -"方法的封装逻辑来实现。准确地说,我们需要对封装客户端计算的参数更新进行处理,以便在必要时将其剪切到服务器作为配置字典的一部分提供的阈值。除此之外,如果配置字典中还存在以下任一(或两个)键,客户端可能还需要执行一些额外的工作。" -#: ../../source/explanation-differential-privacy.rst:81 +#: ../../source/explanation-differential-privacy.rst:125 msgid "" -":code:`dpfedavg_noise_stddev` : Generate and add the specified amount of " -"noise to the clipped update." -msgstr "code:`dpfedavg_noise_stddev`:生成并在剪切参数更新中添加指定数量的噪声。" +"Each client adds noise to the gradients of the model during the local " +"training (DP-SGD). More specifically, in this approach, gradients are " +"clipped and an amount of calibrated noise is injected into the gradients." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:82 +#: ../../source/explanation-differential-privacy.rst:128 msgid "" -":code:`dpfedavg_adaptive_clip_enabled` : Augment the metrics dict in the " -":code:`FitRes` object being returned to the server with an indicator bit," -" calculated as described earlier." +"Please note that these two approaches are providing privacy at different " +"levels." msgstr "" -":code:`dpfedavg_adaptive_clip_enabled`:在返回给服务器的 :code:`FitRes` " -"对象中的度量值字典中增加一个指标位,计算方法如前所述。" -#: ../../source/explanation-differential-privacy.rst:86 -msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" -msgstr "进行 :math:`(epsilon, \\delta)` 分析" +#: ../../source/explanation-differential-privacy.rst:131 +#, fuzzy +msgid "**References:**" +msgstr "参考资料" -#: ../../source/explanation-differential-privacy.rst:88 -msgid "" -"Assume you have trained for :math:`n` rounds with sampling fraction " -":math:`q` and noise multiplier :math:`z`. In order to calculate the " -":math:`\\epsilon` value this would result in for a particular " -":math:`\\delta`, the following script may be used." +#: ../../source/explanation-differential-privacy.rst:133 +msgid "[1] Dwork et al. The Algorithmic Foundations of Differential Privacy." msgstr "" -"假设您已经训练了 :math:`n` 轮,采样比例为 :math:`q`,噪声乘数为 :math:`z`。为了计算特定 " -":math:`\\delta` 的 :math:`epsilon` 值,可以使用下面的脚本。" -#: ../../source/explanation-differential-privacy.rst:98 +#: ../../source/explanation-differential-privacy.rst:135 #, fuzzy msgid "" -"McMahan et al. \"Learning Differentially Private Recurrent Language " -"Models.\" International Conference on Learning Representations (ICLR), " -"2017." +"[2] McMahan et al. Learning Differentially Private Recurrent Language " +"Models." msgstr "" "McMahan, H. Brendan等. \"Learning differentially private recurrent " "language models.\" arXiv preprint arXiv:1710.06963 (2017)." -#: ../../source/explanation-differential-privacy.rst:100 -#, fuzzy +#: ../../source/explanation-differential-privacy.rst:137 msgid "" -"Andrew, Galen, et al. \"Differentially Private Learning with Adaptive " -"Clipping.\" Advances in Neural Information Processing Systems (NeurIPS), " -"2021." +"[3] Geyer et al. Differentially Private Federated Learning: A Client " +"Level Perspective." +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:139 +#, fuzzy +msgid "[4] Galen et al. Differentially Private Learning with Adaptive Clipping." msgstr "" "Andrew, Galen等. \"Differentially private learning with adaptive " "clipping.\" Advances in Neural Information Processing Systems 34 (2021): " @@ -4382,6 +4051,7 @@ msgid "As a reference, this document follows the above structure." msgstr "作为参考,本文件采用上述结构。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 +#: ../../source/ref-api/flwr.common.Metadata.rst:2 msgid "Metadata" msgstr "描述数据" @@ -4715,15 +4385,15 @@ msgid "" msgstr "在某些情况下,有必要向不同的客户端发送不同的配置值。" #: ../../source/how-to-configure-clients.rst:89 +#, fuzzy msgid "" "This can be achieved by customizing an existing strategy or by " -"`implementing a custom strategy from scratch " -"`_. " -"Here's a nonsensical example that customizes :code:`FedAvg` by adding a " -"custom ``\"hello\": \"world\"`` configuration key/value pair to the " -"config dict of a *single client* (only the first client in the list, the " -"other clients in this round to not receive this \"special\" config " -"value):" +":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes :code:`FedAvg`" +" by adding a custom ``\"hello\": \"world\"`` configuration key/value pair" +" to the config dict of a *single client* (only the first client in the " +"list, the other clients in this round to not receive this \"special\" " +"config value):" msgstr "" "这可以通过定制现有策略或 `从头开始实施一个定制策略 `_" +msgid ":doc:`How to run Flower using Docker `" msgstr "" "`TensorFlow快速入门 (教程) `_" @@ -5620,15 +5290,15 @@ msgid "Resources" msgstr "资源" #: ../../source/how-to-monitor-simulation.rst:234 +#, fuzzy msgid "" -"Ray Dashboard: ``_" +"Ray Dashboard: ``_" msgstr "Ray 仪表盘: ``_" #: ../../source/how-to-monitor-simulation.rst:236 -msgid "" -"Ray Metrics: ``_" +#, fuzzy +msgid "Ray Metrics: ``_" msgstr "" "Ray 指标: ``_" @@ -6650,7 +6320,8 @@ msgstr "除了上述必要的改动之外,还有一些潜在的改进措施: msgid "" "Remove \"placeholder\" methods from subclasses of ``Client`` or " "``NumPyClient``. If you, for example, use server-side evaluation, then " -"empty placeholder implementations of ``evaluate`` are no longer necessary." +"empty placeholder implementations of ``evaluate`` are no longer " +"necessary." msgstr "" "删除 ``Client`` 或 ``NumPyClient`` 子类中的 \"占位符 " "\"方法。例如,如果你使用服务器端评估,那么就不再需要``evaluate``的 \"空占位符 \"实现。" @@ -6798,25 +6469,175 @@ msgid "" msgstr "" #: ../../source/how-to-use-built-in-mods.rst:89 -msgid "Enjoy building more robust and flexible ``ClientApp``s with mods!" +msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" msgstr "" -#: ../../source/how-to-use-strategies.rst:2 -msgid "Use strategies" -msgstr "使用策略" +#: ../../source/how-to-use-differential-privacy.rst:2 +#, fuzzy +msgid "Use Differential Privacy" +msgstr "差分隐私" -#: ../../source/how-to-use-strategies.rst:4 +#: ../../source/how-to-use-differential-privacy.rst:3 msgid "" -"Flower allows full customization of the learning process through the " -":code:`Strategy` abstraction. A number of built-in strategies are " -"provided in the core framework." -msgstr "Flower 允许通过 :code:`Strategy` 抽象类对学习过程进行完全定制。核心框架中提供了许多内置策略。" +"This guide explains how you can utilize differential privacy in the " +"Flower framework. If you are not yet familiar with differential privacy, " +"you can refer to :doc:`explanation-differential-privacy`." +msgstr "" -#: ../../source/how-to-use-strategies.rst:6 +#: ../../source/how-to-use-differential-privacy.rst:7 msgid "" -"There are three ways to customize the way Flower orchestrates the " -"learning process on the server side:" -msgstr "有三种方法可以自定义 Flower 在服务器端协调学习过程的方式:" +"Differential Privacy in Flower is in a preview phase. If you plan to use " +"these features in a production environment with sensitive data, feel free" +" contact us to discuss your requirements and to receive guidance on how " +"to best use these features." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:12 +msgid "" +"This approach consists of two seprate phases: clipping of the updates and" +" adding noise to the aggregated model. For the clipping phase, Flower " +"framework has made it possible to decide whether to perform clipping on " +"the server side or the client side." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:15 +msgid "" +"**Server-side Clipping**: This approach has the advantage of the server " +"enforcing uniform clipping across all clients' updates and reducing the " +"communication overhead for clipping values. However, it also has the " +"disadvantage of increasing the computational load on the server due to " +"the need to perform the clipping operation for all clients." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:16 +msgid "" +"**Client-side Clipping**: This approach has the advantage of reducing the" +" computational overhead on the server. However, it also has the " +"disadvantage of lacking centralized control, as the server has less " +"control over the clipping process." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:21 +#, fuzzy +msgid "Server-side Clipping" +msgstr "服务器端逻辑" + +#: ../../source/how-to-use-differential-privacy.rst:22 +msgid "" +"For central DP with server-side clipping, there are two :code:`Strategy` " +"classes that act as wrappers around the actual :code:`Strategy` instance " +"(for example, :code:`FedAvg`). The two wrapper classes are " +":code:`DifferentialPrivacyServerSideFixedClipping` and " +":code:`DifferentialPrivacyServerSideAdaptiveClipping` for fixed and " +"adaptive clipping." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:-1 +#, fuzzy +msgid "server side clipping" +msgstr "服务器端逻辑" + +#: ../../source/how-to-use-differential-privacy.rst:31 +msgid "" +"The code sample below enables the :code:`FedAvg` strategy to use server-" +"side fixed clipping using the " +":code:`DifferentialPrivacyServerSideFixedClipping` wrapper class. The " +"same approach can be used with " +":code:`DifferentialPrivacyServerSideAdaptiveClipping` by adjusting the " +"corresponding input parameters." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:52 +#, fuzzy +msgid "Client-side Clipping" +msgstr "客户端逻辑" + +#: ../../source/how-to-use-differential-privacy.rst:53 +msgid "" +"For central DP with client-side clipping, the server sends the clipping " +"value to selected clients on each round. Clients can use existing Flower " +":code:`Mods` to perform the clipping. Two mods are available for fixed " +"and adaptive client-side clipping: :code:`fixedclipping_mod` and " +":code:`adaptiveclipping_mod` with corresponding server-side wrappers " +":code:`DifferentialPrivacyClientSideFixedClipping` and " +":code:`DifferentialPrivacyClientSideAdaptiveClipping`." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:-1 +#, fuzzy +msgid "client side clipping" +msgstr "客户端逻辑" + +#: ../../source/how-to-use-differential-privacy.rst:63 +msgid "" +"The code sample below enables the :code:`FedAvg` strategy to use " +"differential privacy with client-side fixed clipping using both the " +":code:`DifferentialPrivacyClientSideFixedClipping` wrapper class and, on " +"the client, :code:`fixedclipping_mod`:" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:80 +msgid "" +"In addition to the server-side strategy wrapper, the :code:`ClientApp` " +"needs to configure the matching :code:`fixedclipping_mod` to perform the " +"client-side clipping:" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:97 +msgid "" +"To utilize local differential privacy (DP) and add noise to the client " +"model parameters before transmitting them to the server in Flower, you " +"can use the `LocalDpMod`. The following hyperparameters need to be set: " +"clipping norm value, sensitivity, epsilon, and delta." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:-1 +msgid "local DP mod" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:104 +msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:122 +msgid "" +"Please note that the order of mods, especially those that modify " +"parameters, is important when using multiple modifiers. Typically, " +"differential privacy (DP) modifiers should be the last to operate on " +"parameters." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:125 +msgid "Local Training using Privacy Engines" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:126 +msgid "" +"For ensuring data instance-level privacy during local model training on " +"the client side, consider leveraging privacy engines such as Opacus and " +"TensorFlow Privacy. For examples of using Flower with these engines, " +"please refer to the Flower examples directory (`Opacus " +"`_, `Tensorflow" +" Privacy `_)." +msgstr "" + +#: ../../source/how-to-use-strategies.rst:2 +msgid "Use strategies" +msgstr "使用策略" + +#: ../../source/how-to-use-strategies.rst:4 +msgid "" +"Flower allows full customization of the learning process through the " +":code:`Strategy` abstraction. A number of built-in strategies are " +"provided in the core framework." +msgstr "Flower 允许通过 :code:`Strategy` 抽象类对学习过程进行完全定制。核心框架中提供了许多内置策略。" + +#: ../../source/how-to-use-strategies.rst:6 +msgid "" +"There are three ways to customize the way Flower orchestrates the " +"learning process on the server side:" +msgstr "有三种方法可以自定义 Flower 在服务器端协调学习过程的方式:" #: ../../source/how-to-use-strategies.rst:8 msgid "Use an existing strategy, for example, :code:`FedAvg`" @@ -6925,11 +6746,11 @@ msgstr "快速入门教程" msgid "How-to guides" msgstr "操作指南" -#: ../../source/index.rst:97 +#: ../../source/index.rst:98 msgid "Legacy example guides" msgstr "旧版指南范例" -#: ../../source/index.rst:108 ../../source/index.rst:112 +#: ../../source/index.rst:109 ../../source/index.rst:113 msgid "Explanations" msgstr "说明" @@ -6937,23 +6758,23 @@ msgstr "说明" msgid "API reference" msgstr "应用程序接口参考" -#: ../../source/index.rst:137 +#: ../../source/index.rst:138 msgid "Reference docs" msgstr "参考文档" -#: ../../source/index.rst:153 +#: ../../source/index.rst:154 msgid "Contributor tutorials" msgstr "贡献者教程" -#: ../../source/index.rst:160 +#: ../../source/index.rst:161 msgid "Contributor how-to guides" msgstr "投稿指南" -#: ../../source/index.rst:173 +#: ../../source/index.rst:174 msgid "Contributor explanations" msgstr "贡献者解释" -#: ../../source/index.rst:179 +#: ../../source/index.rst:180 msgid "Contributor references" msgstr "贡献者参考资料" @@ -7048,33 +6869,33 @@ msgid "" "specific goal." msgstr "以问题为导向的 \"如何做 \"指南逐步展示如何实现特定目标。" -#: ../../source/index.rst:110 +#: ../../source/index.rst:111 msgid "" "Understanding-oriented concept guides explain and discuss key topics and " "underlying ideas behind Flower and collaborative AI." msgstr "以理解为导向的概念指南解释并讨论了Flower和协作式人工智能背后的关键主题和基本思想。" -#: ../../source/index.rst:120 +#: ../../source/index.rst:121 msgid "References" msgstr "参考资料" -#: ../../source/index.rst:122 +#: ../../source/index.rst:123 msgid "Information-oriented API reference and other reference material." msgstr "以信息为导向的 API 参考资料和其他参考资料。" -#: ../../source/index.rst:131::1 +#: ../../source/index.rst:132::1 msgid ":py:obj:`flwr `\\" msgstr "" -#: ../../source/index.rst:131::1 flwr:1 of +#: ../../source/index.rst:132::1 flwr:1 of msgid "Flower main package." msgstr "" -#: ../../source/index.rst:148 +#: ../../source/index.rst:149 msgid "Contributor docs" msgstr "贡献者文档" -#: ../../source/index.rst:150 +#: ../../source/index.rst:151 msgid "" "The Flower community welcomes contributions. The following docs are " "intended to help along the way." @@ -7096,12 +6917,22 @@ msgstr "flower-driver-api" msgid "flower-fleet-api" msgstr "flower-fleet-api" +#: ../../source/ref-api-cli.rst:37 +#, fuzzy +msgid "flower-client-app" +msgstr "Flower 客户端。" + +#: ../../source/ref-api-cli.rst:47 +#, fuzzy +msgid "flower-server-app" +msgstr "flower-driver-api" + #: ../../source/ref-api/flwr.rst:2 #, fuzzy msgid "flwr" msgstr "Flower" -#: ../../source/ref-api/flwr.rst:25 ../../source/ref-api/flwr.server.rst:48 +#: ../../source/ref-api/flwr.rst:25 ../../source/ref-api/flwr.server.rst:52 msgid "Modules" msgstr "" @@ -7126,7 +6957,7 @@ msgid ":py:obj:`flwr.server `\\" msgstr "" #: ../../source/ref-api/flwr.rst:35::1 -#: ../../source/ref-api/flwr.server.rst:37::1 flwr.server:1 +#: ../../source/ref-api/flwr.server.rst:41::1 flwr.server:1 #: flwr.server.server.Server:1 of msgid "Flower server." msgstr "Flower 服务器。" @@ -7146,7 +6977,6 @@ msgstr "客户端" #: ../../source/ref-api/flwr.client.rst:13 #: ../../source/ref-api/flwr.common.rst:13 -#: ../../source/ref-api/flwr.server.driver.rst:13 #: ../../source/ref-api/flwr.server.rst:13 #: ../../source/ref-api/flwr.simulation.rst:13 #, fuzzy @@ -7186,10 +7016,10 @@ msgid "Start a Flower NumPyClient which connects to a gRPC server." msgstr "启动 Flower NumPyClient,连接到 gRPC 服务器。" #: ../../source/ref-api/flwr.client.rst:26 -#: ../../source/ref-api/flwr.common.rst:31 -#: ../../source/ref-api/flwr.server.driver.rst:24 -#: ../../source/ref-api/flwr.server.rst:28 +#: ../../source/ref-api/flwr.common.rst:32 +#: ../../source/ref-api/flwr.server.rst:29 #: ../../source/ref-api/flwr.server.strategy.rst:17 +#: ../../source/ref-api/flwr.server.workflow.rst:17 msgid "Classes" msgstr "" @@ -7204,7 +7034,7 @@ msgstr "Flower 客户端的抽象基类。" #: ../../source/ref-api/flwr.client.rst:33::1 msgid "" -":py:obj:`ClientApp `\\ \\(client\\_fn\\[\\, " +":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " "mods\\]\\)" msgstr "" @@ -7232,8 +7062,12 @@ msgstr "" #: ../../source/ref-api/flwr.client.Client.rst:15 #: ../../source/ref-api/flwr.client.ClientApp.rst:15 #: ../../source/ref-api/flwr.client.NumPyClient.rst:15 +#: ../../source/ref-api/flwr.common.Array.rst:15 #: ../../source/ref-api/flwr.common.ClientMessage.rst:15 +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:15 +#: ../../source/ref-api/flwr.common.Context.rst:15 #: ../../source/ref-api/flwr.common.DisconnectRes.rst:15 +#: ../../source/ref-api/flwr.common.Error.rst:15 #: ../../source/ref-api/flwr.common.EvaluateIns.rst:15 #: ../../source/ref-api/flwr.common.EvaluateRes.rst:15 #: ../../source/ref-api/flwr.common.FitIns.rst:15 @@ -7242,20 +7076,32 @@ msgstr "" #: ../../source/ref-api/flwr.common.GetParametersRes.rst:15 #: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:15 #: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:15 +#: ../../source/ref-api/flwr.common.Message.rst:15 +#: ../../source/ref-api/flwr.common.MessageType.rst:15 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:15 +#: ../../source/ref-api/flwr.common.Metadata.rst:15 +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:15 #: ../../source/ref-api/flwr.common.Parameters.rst:15 +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:15 #: ../../source/ref-api/flwr.common.ReconnectIns.rst:15 +#: ../../source/ref-api/flwr.common.RecordSet.rst:15 #: ../../source/ref-api/flwr.common.ServerMessage.rst:15 #: ../../source/ref-api/flwr.common.Status.rst:15 #: ../../source/ref-api/flwr.server.ClientManager.rst:15 +#: ../../source/ref-api/flwr.server.Driver.rst:15 #: ../../source/ref-api/flwr.server.History.rst:15 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:15 #: ../../source/ref-api/flwr.server.Server.rst:15 +#: ../../source/ref-api/flwr.server.ServerApp.rst:15 #: ../../source/ref-api/flwr.server.ServerConfig.rst:15 #: ../../source/ref-api/flwr.server.SimpleClientManager.rst:15 -#: ../../source/ref-api/flwr.server.driver.Driver.rst:15 -#: ../../source/ref-api/flwr.server.driver.GrpcDriver.rst:15 #: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:15 #: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:15 #: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:15 #: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:15 #: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:15 #: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:15 @@ -7273,6 +7119,9 @@ msgstr "" #: ../../source/ref-api/flwr.server.strategy.Krum.rst:15 #: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:15 #: ../../source/ref-api/flwr.server.strategy.Strategy.rst:15 +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 msgid "Methods" msgstr "" @@ -7350,9 +7199,12 @@ msgstr "返回客户端(本身)。" #: ../../source/ref-api/flwr.client.Client.rst:46 #: ../../source/ref-api/flwr.client.NumPyClient.rst:46 +#: ../../source/ref-api/flwr.common.Array.rst:28 #: ../../source/ref-api/flwr.common.ClientMessage.rst:25 #: ../../source/ref-api/flwr.common.Code.rst:19 +#: ../../source/ref-api/flwr.common.Context.rst:25 #: ../../source/ref-api/flwr.common.DisconnectRes.rst:25 +#: ../../source/ref-api/flwr.common.Error.rst:25 #: ../../source/ref-api/flwr.common.EvaluateIns.rst:25 #: ../../source/ref-api/flwr.common.EvaluateRes.rst:25 #: ../../source/ref-api/flwr.common.EventType.rst:19 @@ -7362,10 +7214,16 @@ msgstr "返回客户端(本身)。" #: ../../source/ref-api/flwr.common.GetParametersRes.rst:25 #: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:25 #: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:25 +#: ../../source/ref-api/flwr.common.Message.rst:37 +#: ../../source/ref-api/flwr.common.MessageType.rst:25 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:25 +#: ../../source/ref-api/flwr.common.Metadata.rst:25 #: ../../source/ref-api/flwr.common.Parameters.rst:25 #: ../../source/ref-api/flwr.common.ReconnectIns.rst:25 +#: ../../source/ref-api/flwr.common.RecordSet.rst:25 #: ../../source/ref-api/flwr.common.ServerMessage.rst:25 #: ../../source/ref-api/flwr.common.Status.rst:25 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:25 #: ../../source/ref-api/flwr.server.ServerConfig.rst:25 msgid "Attributes" msgstr "" @@ -7383,14 +7241,25 @@ msgstr "" #: flwr.client.numpy_client.NumPyClient.fit #: flwr.client.numpy_client.NumPyClient.get_parameters #: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.server.app.start_server +#: flwr.common.context.Context flwr.common.message.Error +#: flwr.common.message.Message flwr.common.message.Message.create_error_reply +#: flwr.common.message.Message.create_reply flwr.common.message.Metadata +#: flwr.common.record.parametersrecord.Array flwr.server.app.start_server #: flwr.server.client_manager.ClientManager.register #: flwr.server.client_manager.ClientManager.unregister #: flwr.server.client_manager.SimpleClientManager.register #: flwr.server.client_manager.SimpleClientManager.unregister #: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.app.start_driver flwr.server.driver.driver.Driver +#: flwr.server.compat.app.start_driver flwr.server.driver.driver.Driver +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive #: flwr.server.strategy.bulyan.Bulyan +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit #: flwr.server.strategy.fedadagrad.FedAdagrad @@ -7406,7 +7275,10 @@ msgstr "" #: flwr.server.strategy.strategy.Strategy.configure_fit #: flwr.server.strategy.strategy.Strategy.evaluate #: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.simulation.app.start_simulation of +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow +#: flwr.simulation.app.start_simulation +#: flwr.simulation.run_simulation.run_simulation of msgid "Parameters" msgstr "参数" @@ -7424,13 +7296,17 @@ msgstr "评估指令包含从服务器接收的(全局)模型参数,以及 #: flwr.client.numpy_client.NumPyClient.fit #: flwr.client.numpy_client.NumPyClient.get_parameters #: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.server.app.start_server +#: flwr.common.message.Message.create_reply flwr.server.app.start_server #: flwr.server.client_manager.ClientManager.num_available #: flwr.server.client_manager.ClientManager.register #: flwr.server.client_manager.SimpleClientManager.num_available #: flwr.server.client_manager.SimpleClientManager.register #: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.app.start_driver +#: flwr.server.compat.app.start_driver +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit #: flwr.server.strategy.strategy.Strategy.aggregate_evaluate @@ -7454,13 +7330,17 @@ msgstr "评估结果包含本地数据集上的损失值和其他详细信息, #: flwr.client.client.Client.get_properties #: flwr.client.numpy_client.NumPyClient.get_parameters #: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.server.app.start_server +#: flwr.common.message.Message.create_reply flwr.server.app.start_server #: flwr.server.client_manager.ClientManager.num_available #: flwr.server.client_manager.ClientManager.register #: flwr.server.client_manager.SimpleClientManager.num_available #: flwr.server.client_manager.SimpleClientManager.register #: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.app.start_driver +#: flwr.server.compat.app.start_driver +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit #: flwr.server.strategy.strategy.Strategy.aggregate_evaluate @@ -7511,23 +7391,38 @@ msgstr "当前客户端属性。" msgid "ClientApp" msgstr "客户端" -#: flwr.client.client_app.ClientApp:1 flwr.common.typing.ClientMessage:1 +#: flwr.client.client_app.ClientApp:1 flwr.common.constant.MessageType:1 +#: flwr.common.constant.MessageTypeLegacy:1 flwr.common.context.Context:1 +#: flwr.common.message.Error:1 flwr.common.message.Message:1 +#: flwr.common.message.Metadata:1 flwr.common.record.parametersrecord.Array:1 +#: flwr.common.record.recordset.RecordSet:1 flwr.common.typing.ClientMessage:1 #: flwr.common.typing.DisconnectRes:1 flwr.common.typing.EvaluateIns:1 #: flwr.common.typing.EvaluateRes:1 flwr.common.typing.FitIns:1 #: flwr.common.typing.FitRes:1 flwr.common.typing.GetParametersIns:1 #: flwr.common.typing.GetParametersRes:1 flwr.common.typing.GetPropertiesIns:1 #: flwr.common.typing.GetPropertiesRes:1 flwr.common.typing.Parameters:1 #: flwr.common.typing.ReconnectIns:1 flwr.common.typing.ServerMessage:1 -#: flwr.common.typing.Status:1 flwr.server.app.ServerConfig:1 -#: flwr.server.driver.driver.Driver:1 -#: flwr.server.driver.grpc_driver.GrpcDriver:1 flwr.server.history.History:1 -#: flwr.server.server.Server:1 of +#: flwr.common.typing.Status:1 flwr.server.driver.driver.Driver:1 +#: flwr.server.history.History:1 flwr.server.server.Server:1 +#: flwr.server.server_app.ServerApp:1 flwr.server.server_config.ServerConfig:1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of msgid "Bases: :py:class:`object`" msgstr "" -#: flwr.client.app.start_client:33 flwr.client.app.start_numpy_client:36 -#: flwr.client.client_app.ClientApp:4 flwr.server.app.start_server:41 -#: flwr.server.driver.app.start_driver:30 of +#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 +#: flwr.client.client_app.ClientApp:4 +#: flwr.client.client_app.ClientApp.evaluate:4 +#: flwr.client.client_app.ClientApp.query:4 +#: flwr.client.client_app.ClientApp.train:4 flwr.server.app.start_server:41 +#: flwr.server.compat.app.start_driver:32 flwr.server.server_app.ServerApp:4 +#: flwr.server.server_app.ServerApp.main:4 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:29 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:21 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:14 +#: of msgid "Examples" msgstr "实例" @@ -7550,6 +7445,34 @@ msgid "" "global attribute `app` that points to an object of type `ClientApp`." msgstr "" +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`evaluate `\\ \\(\\)" +msgstr "" + +#: flwr.client.client_app.ClientApp.evaluate:1 +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid "Return a decorator that registers the evaluate fn with the client app." +msgstr "" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`query `\\ \\(\\)" +msgstr "" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.query:1 of +msgid "Return a decorator that registers the query fn with the client app." +msgstr "" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +#, fuzzy +msgid ":py:obj:`train `\\ \\(\\)" +msgstr "server.strategy.Strategy" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.train:1 of +msgid "Return a decorator that registers the train fn with the client app." +msgstr "" + #: ../../source/ref-api/flwr.client.NumPyClient.rst:2 msgid "NumPyClient" msgstr "NumPyClient" @@ -7764,7 +7687,7 @@ msgstr "" "服务器需要以相同的值启动(请参阅 `flwr.server.start_server`),否则它将不知道增加的限制并阻止更大的消息。" #: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 -#: flwr.server.driver.app.start_driver:21 of +#: flwr.server.compat.app.start_driver:21 of msgid "" "The PEM-encoded root certificates as a byte string or a path string. If " "provided, a secure connection using the certificates will be established " @@ -7786,16 +7709,30 @@ msgstr "" "配置传输层:允许的值包括 - 'grpc-bidi': gRPC,双向流 - 'grpc-rere': gRPC,请求-响应(实验性) - " "'rest': HTTP(实验性)" -#: flwr.client.app.start_client:34 flwr.client.app.start_numpy_client:37 of +#: flwr.client.app.start_client:31 of +msgid "" +"The maximum number of times the client will try to connect to the server " +"before giving up in case of a connection error. If set to None, there is " +"no limit to the number of tries." +msgstr "" + +#: flwr.client.app.start_client:35 of +msgid "" +"The maximum duration before the client stops trying to connect to the " +"server in case of connection error. If set to None, there is no limit to " +"the total time." +msgstr "" + +#: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of msgid "Starting a gRPC client with an insecure server connection:" msgstr "使用不安全的服务器连接启动 gRPC 客户端:" -#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:44 of +#: flwr.client.app.start_client:49 flwr.client.app.start_numpy_client:44 of #, fuzzy msgid "Starting an SSL-enabled gRPC client using system certificates:" msgstr "启动支持 SSL 的 gRPC 客户端:" -#: flwr.client.app.start_client:52 flwr.client.app.start_numpy_client:52 of +#: flwr.client.app.start_client:60 flwr.client.app.start_numpy_client:52 of #, fuzzy msgid "Starting an SSL-enabled gRPC client using provided certificates:" msgstr "启动支持 SSL 的 gRPC 客户端:" @@ -7821,73 +7758,83 @@ msgstr "抽象基类 `flwr.client.NumPyClient` 的实现。" msgid "common" msgstr "常见" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.record.conversion_utils.array_from_numpy:1 of +#, fuzzy +msgid "Create Array from NumPy ndarray." +msgstr "将参数对象转换为 NumPy ndarrays。" + +#: ../../source/ref-api/flwr.common.rst:30::1 msgid ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.parameter.bytes_to_ndarray:1 of msgid "Deserialize NumPy ndarray from bytes." msgstr "从字节反序列化 NumPy ndarray。" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" ":py:obj:`configure `\\ \\(identifier\\[\\, " "filename\\, host\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.logger.configure:1 of msgid "Configure logging to file and/or remote log server." msgstr "配置将日志记录到文件和/或远程日志服务器。" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" ":py:obj:`event `\\ \\(event\\_type\\[\\, " "event\\_details\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.telemetry.event:1 of msgid "Submit create_event to ThreadPoolExecutor to avoid blocking." msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" ":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " "\\*\\*kwargs\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 logging.Logger.log:1 +#: ../../source/ref-api/flwr.common.rst:30::1 logging.Logger.log:1 #: of msgid "Log 'msg % args' with the integer severity 'level'." msgstr "以整数严重性 \"级别 \"记录 \"msg % args\"。" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.parameter.ndarray_to_bytes:1 of msgid "Serialize NumPy ndarray to bytes." msgstr "将 NumPy ndarray 序列化为字节。" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid ":py:obj:`now `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.date.now:1 of msgid "Construct a datetime from time.time() with time zone set to UTC." msgstr "从 time.time() 生成日期时间,时区设置为 UTC。" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" ":py:obj:`ndarrays_to_parameters `\\ " "\\(ndarrays\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.parameter.ndarrays_to_parameters:1 #: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 #: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarrays_to_parameters:1 @@ -7895,187 +7842,369 @@ msgstr "" msgid "Convert NumPy ndarrays to parameters object." msgstr "将 NumPy ndarrays 转换为参数对象。" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" ":py:obj:`parameters_to_ndarrays `\\ " "\\(parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.parameter.parameters_to_ndarrays:1 of msgid "Convert parameters object to NumPy ndarrays." msgstr "将参数对象转换为 NumPy ndarrays。" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " +"data\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.record.parametersrecord.Array:1 of +#, fuzzy +msgid "Array type." +msgstr "返回类型" + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`ClientMessage `\\ " "\\(\\[get\\_properties\\_res\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.ClientMessage:1 of msgid "ClientMessage is a container used to hold one result message." msgstr "ClientMessage 是用于容纳一条结果信息的容器。" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`Code `\\ \\(value\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.Code:1 of msgid "Client status codes." msgstr "客户端状态代码。" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +#, fuzzy +msgid "" +":py:obj:`ConfigsRecord `\\ " +"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" +msgstr "" +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.record.configsrecord.ConfigsRecord:1 of +#, fuzzy +msgid "Configs record." +msgstr "配置日志记录" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`Context `\\ \\(state\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.context.Context:1 of +msgid "State of your run." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`DisconnectRes `\\ \\(reason\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.DisconnectRes:1 of msgid "DisconnectRes message from client to server." msgstr "客户端向服务器发送 DisconnectRes 信息。" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`EvaluateIns `\\ \\(parameters\\, " "config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.EvaluateIns:1 of msgid "Evaluate instructions for a client." msgstr "评估客户端的指示。" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " "num\\_examples\\, metrics\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.EvaluateRes:1 of msgid "Evaluate response from a client." msgstr "评估客户端的反应。" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`EventType `\\ \\(value\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.telemetry.EventType:1 of msgid "Types of telemetry events." msgstr "遥测事件类型。" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.FitIns:1 of msgid "Fit instructions for a client." msgstr "为客户提供安装说明。" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`FitRes `\\ \\(status\\, parameters\\, " "num\\_examples\\, metrics\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.FitRes:1 of msgid "Fit response from a client." msgstr "来自客户端的合适回复。" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.message.Error:1 of +msgid "A dataclass that stores information about an error that occurred." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`GetParametersIns `\\ \\(config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.GetParametersIns:1 of msgid "Parameters request for a client." msgstr "客户端的参数请求。" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`GetParametersRes `\\ \\(status\\, " "parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.GetParametersRes:1 of msgid "Response when asked to return parameters." msgstr "要求返回参数时的响应。" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`GetPropertiesIns `\\ \\(config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.GetPropertiesIns:1 of msgid "Properties request for a client." msgstr "客户端的属性请求。" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`GetPropertiesRes `\\ \\(status\\, " "properties\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.GetPropertiesRes:1 of msgid "Properties response from a client." msgstr "来自客户端的属性响应。" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " +"error\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.message.Message:1 of +msgid "State of your application from the viewpoint of the entity using it." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`MessageType `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.constant.MessageType:1 of +msgid "Message type." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`MessageTypeLegacy `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.constant.MessageTypeLegacy:1 of +msgid "Legacy message type." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`Metadata `\\ \\(run\\_id\\, " +"message\\_id\\, src\\_node\\_id\\, ...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.message.Metadata:1 of +msgid "A dataclass holding metadata associated with the current message." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`MetricsRecord `\\ " +"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.record.metricsrecord.MetricsRecord:1 of +msgid "Metrics record." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`NDArray `\\" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" "alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " ":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`Parameters `\\ \\(tensors\\, " "tensor\\_type\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.Parameters:1 of msgid "Model parameters." msgstr "模型参数。" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`ParametersRecord `\\ " +"\\(\\[array\\_dict\\, keep\\_input\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.record.parametersrecord.ParametersRecord:1 of +#, fuzzy +msgid "Parameters record." +msgstr "参数" + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`ReconnectIns `\\ \\(seconds\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.ReconnectIns:1 of msgid "ReconnectIns message from server to client." msgstr "服务器发送给客户端的重新连接信息。" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`RecordSet `\\ " +"\\(\\[parameters\\_records\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.record.recordset.RecordSet:1 of +msgid "RecordSet stores groups of parameters, metrics and configs." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`ServerMessage `\\ " "\\(\\[get\\_properties\\_ins\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.ServerMessage:1 of msgid "ServerMessage is a container used to hold one instruction message." msgstr "ServerMessage 是用于容纳一条指令信息的容器。" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`Status `\\ \\(code\\, message\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.Status:1 of msgid "Client status." msgstr "客户端状态。" +#: ../../source/ref-api/flwr.common.Array.rst:2 +msgid "Array" +msgstr "" + +#: flwr.common.record.parametersrecord.Array:3 of +msgid "" +"A dataclass containing serialized data from an array-like or tensor-like " +"object along with some metadata about it." +msgstr "" + +#: flwr.common.record.parametersrecord.Array:6 of +msgid "" +"A string representing the data type of the serialised object (e.g. " +"`np.float32`)" +msgstr "" + +#: flwr.common.record.parametersrecord.Array:8 of +msgid "" +"A list representing the shape of the unserialized array-like object. This" +" is used to deserialize the data (depending on the serialization method) " +"or simply as a metadata field." +msgstr "" + +#: flwr.common.record.parametersrecord.Array:12 of +msgid "" +"A string indicating the type of serialisation mechanism used to generate " +"the bytes in `data` from an array-like or tensor-like object." +msgstr "" + +#: flwr.common.record.parametersrecord.Array:15 of +msgid "A buffer of bytes containing the data." +msgstr "" + +#: ../../source/ref-api/flwr.common.Array.rst:26::1 +#, fuzzy +msgid ":py:obj:`numpy `\\ \\(\\)" +msgstr "server.strategy.Strategy" + +#: ../../source/ref-api/flwr.common.Array.rst:26::1 +#: flwr.common.record.parametersrecord.Array.numpy:1 of +#, fuzzy +msgid "Return the array as a NumPy array." +msgstr "以 NumPy ndarrays 列表形式返回模型参数" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`dtype `\\" +msgstr "" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +#, fuzzy +msgid ":py:obj:`shape `\\" +msgstr "server.strategy.Strategy" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +#, fuzzy +msgid ":py:obj:`stype `\\" +msgstr "server.strategy.Strategy" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`data `\\" +msgstr "" + #: ../../source/ref-api/flwr.common.ClientMessage.rst:2 #, fuzzy msgid "ClientMessage" @@ -8135,6 +8264,106 @@ msgid "" "`\\" msgstr "" +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:2 +#, fuzzy +msgid "ConfigsRecord" +msgstr "配置日志记录" + +#: flwr.common.record.configsrecord.ConfigsRecord:1 of +msgid "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:obj:`~typing.Union`\\ [:py:class:`int`, " +":py:class:`float`, :py:class:`str`, :py:class:`bytes`, :py:class:`bool`, " +":py:class:`~typing.List`\\ [:py:class:`int`], :py:class:`~typing.List`\\ " +"[:py:class:`float`], :py:class:`~typing.List`\\ [:py:class:`str`], " +":py:class:`~typing.List`\\ [:py:class:`bytes`], " +":py:class:`~typing.List`\\ [:py:class:`bool`]]]" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1 +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid "Remove all items from R." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:1 +#: flwr.common.record.metricsrecord.MetricsRecord.count_bytes:1 +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:1 +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid "Return number of Bytes stored in this object." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 +#: flwr.common.record.typeddict.TypedDict.get:1 of +msgid "d defaults to None." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 +#: flwr.common.record.typeddict.TypedDict.pop:1 of +msgid "If key is not found, d is returned if given, otherwise KeyError is raised." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 +#: flwr.common.record.typeddict.TypedDict.update:1 of +msgid "Update R from dict/iterable E and F." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:3 of +msgid "This function counts booleans as occupying 1 Byte." +msgstr "" + +#: ../../source/ref-api/flwr.common.Context.rst:2 +msgid "Context" +msgstr "" + +#: flwr.common.context.Context:3 of +msgid "" +"Holds records added by the entity in a given run and that will stay " +"local. This means that the data it holds will never leave the system it's" +" running from. This can be used as an intermediate storage or scratchpad " +"when executing mods. It can also be used as a memory to access at " +"different points during the lifecycle of this entity (e.g. across " +"multiple rounds)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Context.rst:28::1 +#, fuzzy +msgid ":py:obj:`state `\\" +msgstr "server.strategy.Strategy" + #: ../../source/ref-api/flwr.common.DisconnectRes.rst:2 msgid "DisconnectRes" msgstr "" @@ -8143,6 +8372,34 @@ msgstr "" msgid ":py:obj:`reason `\\" msgstr "" +#: ../../source/ref-api/flwr.common.Error.rst:2 +msgid "Error" +msgstr "" + +#: flwr.common.message.Error:3 of +msgid "An identifier for the error." +msgstr "" + +#: flwr.common.message.Error:5 of +msgid "A reason for why the error arose (e.g. an exception stack-trace)" +msgstr "" + +#: flwr.common.Error.code:1::1 of +msgid ":py:obj:`code `\\" +msgstr "" + +#: flwr.common.Error.code:1 flwr.common.Error.code:1::1 of +msgid "Error code." +msgstr "" + +#: flwr.common.Error.code:1::1 of +msgid ":py:obj:`reason `\\" +msgstr "" + +#: flwr.common.Error.code:1::1 flwr.common.Error.reason:1 of +msgid "Reason reported about the error." +msgstr "" + #: ../../source/ref-api/flwr.common.EvaluateIns.rst:2 #, fuzzy msgid "EvaluateIns" @@ -8367,11 +8624,286 @@ msgstr "" msgid ":py:obj:`properties `\\" msgstr "" -#: ../../source/ref-api/flwr.common.NDArray.rst:2 -msgid "NDArray" +#: ../../source/ref-api/flwr.common.Message.rst:2 +#, fuzzy +msgid "Message" +msgstr "服务器端" + +#: flwr.common.Message.content:1::1 flwr.common.Message.metadata:1 +#: flwr.common.message.Message:3 of +msgid "A dataclass including information about the message to be executed." msgstr "" -#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +#: flwr.common.message.Message:5 of +msgid "" +"Holds records either sent by another entity (e.g. sent by the server-side" +" logic to a client, or vice-versa) or that will be sent to it." +msgstr "" + +#: flwr.common.message.Message:8 of +msgid "" +"A dataclass that captures information about an error that took place when" +" processing another message." +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid "" +":py:obj:`create_error_reply `\\ " +"\\(error\\, ttl\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_error_reply:1 of +msgid "Construct a reply message indicating an error happened." +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid "" +":py:obj:`create_reply `\\ \\(content\\," +" ttl\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_reply:1 of +msgid "Create a reply to this message with specified content and TTL." +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid ":py:obj:`has_content `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_content:1 of +msgid "Return True if message has content, else False." +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid ":py:obj:`has_error `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_error:1 of +msgid "Return True if message has an error, else False." +msgstr "" + +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`content `\\" +msgstr "" + +#: flwr.common.Message.content:1 flwr.common.Message.content:1::1 +#: of +#, fuzzy +msgid "The content of this message." +msgstr "评估客户端的反应。" + +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`error `\\" +msgstr "" + +#: flwr.common.Message.content:1::1 flwr.common.Message.error:1 of +msgid "Error captured by this message." +msgstr "" + +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`metadata `\\" +msgstr "" + +#: flwr.common.message.Message.create_error_reply:3 of +msgid "The error that was encountered." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.ttl:1 flwr.common.message.Message.create_error_reply:5 +#: flwr.common.message.Message.create_reply:9 flwr.common.message.Metadata:16 +#: of +msgid "Time-to-live for this message." +msgstr "" + +#: flwr.common.message.Message.create_reply:3 of +msgid "" +"The method generates a new `Message` as a reply to this message. It " +"inherits 'run_id', 'src_node_id', 'dst_node_id', and 'message_type' from " +"this message and sets 'reply_to_message' to the ID of this message." +msgstr "" + +#: flwr.common.message.Message.create_reply:7 of +msgid "The content for the reply message." +msgstr "" + +#: flwr.common.message.Message.create_reply:12 of +msgid "A new `Message` instance representing the reply." +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageType.rst:2 +#, fuzzy +msgid "MessageType" +msgstr "返回类型" + +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`EVALUATE `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`QUERY `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`TRAIN `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:2 +msgid "MessageTypeLegacy" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +msgid ":py:obj:`GET_PARAMETERS `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +msgid ":py:obj:`GET_PROPERTIES `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of +msgid "An identifier for the current run." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.message_id:1 flwr.common.message.Metadata:5 of +msgid "An identifier for the current message." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.src_node_id:1 flwr.common.message.Metadata:7 of +msgid "An identifier for the node sending this message." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1 +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.message.Metadata:9 of +msgid "An identifier for the node receiving this message." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.reply_to_message:1 flwr.common.message.Metadata:11 of +msgid "An identifier for the message this message replies to." +msgstr "" + +#: flwr.common.message.Metadata:13 of +msgid "" +"An identifier for grouping messages. In some settings, this is used as " +"the FL round." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.message_type:1 flwr.common.message.Metadata:18 of +msgid "A string that encodes the action to be executed on the receiving end." +msgstr "" + +#: flwr.common.message.Metadata:21 of +msgid "" +"An identifier that can be used when loading a particular data partition " +"for a ClientApp. Making use of this identifier is more relevant when " +"conducting simulations." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`dst_node_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`group_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.group_id:1 of +msgid "An identifier for grouping messages." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`message_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`message_type `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`partition_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.partition_id:1 of +msgid "An identifier telling which data partition a ClientApp should use." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`reply_to_message `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`run_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`src_node_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`ttl `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:2 +msgid "MetricsRecord" +msgstr "" + +#: flwr.common.record.metricsrecord.MetricsRecord:1 of +msgid "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:obj:`~typing.Union`\\ [:py:class:`int`, " +":py:class:`float`, :py:class:`~typing.List`\\ [:py:class:`int`], " +":py:class:`~typing.List`\\ [:py:class:`float`]]]" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.NDArray.rst:2 +msgid "NDArray" +msgstr "" + +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 msgid ":py:obj:`tensors `\\" msgstr "" @@ -8379,6 +8911,66 @@ msgstr "" msgid ":py:obj:`tensor_type `\\" msgstr "" +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:2 +#, fuzzy +msgid "ParametersRecord" +msgstr "参数" + +#: flwr.common.record.parametersrecord.ParametersRecord:1 of +msgid "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" +msgstr "" + +#: flwr.common.record.parametersrecord.ParametersRecord:3 of +msgid "" +"A dataclass storing named Arrays in order. This means that it holds " +"entries as an OrderedDict[str, Array]. ParametersRecord objects can be " +"viewed as an equivalent to PyTorch's state_dict, but holding serialised " +"tensors instead." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:3 of +msgid "" +"Note that a small amount of Bytes might also be included in this counting" +" that correspond to metadata of the serialized object (e.g. of NumPy " +"array) needed for deseralization." +msgstr "" + #: ../../source/ref-api/flwr.common.ReconnectIns.rst:2 #, fuzzy msgid "ReconnectIns" @@ -8388,6 +8980,37 @@ msgstr "启用 SSL 连接" msgid ":py:obj:`seconds `\\" msgstr "" +#: ../../source/ref-api/flwr.common.RecordSet.rst:2 +msgid "RecordSet" +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`configs_records `\\" +msgstr "" + +#: flwr.common.RecordSet.configs_records:1 +#: flwr.common.RecordSet.configs_records:1::1 of +msgid "Dictionary holding ConfigsRecord instances." +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`metrics_records `\\" +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.metrics_records:1 of +msgid "Dictionary holding MetricsRecord instances." +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`parameters_records `\\" +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.parameters_records:1 of +msgid "Dictionary holding ParametersRecord instances." +msgstr "" + #: ../../source/ref-api/flwr.common.ServerMessage.rst:2 #, fuzzy msgid "ServerMessage" @@ -8426,6 +9049,10 @@ msgstr "" msgid ":py:obj:`message `\\" msgstr "" +#: ../../source/ref-api/flwr.common.array_from_numpy.rst:2 +msgid "array\\_from\\_numpy" +msgstr "" + #: ../../source/ref-api/flwr.common.bytes_to_ndarray.rst:2 msgid "bytes\\_to\\_ndarray" msgstr "" @@ -8474,83 +9101,134 @@ msgstr "" msgid "server" msgstr "服务器" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 msgid ":py:obj:`run_driver_api `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 #: flwr.server.app.run_driver_api:1 of #, fuzzy msgid "Run Flower server (Driver API)." msgstr "flower-driver-api" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 msgid ":py:obj:`run_fleet_api `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 #: flwr.server.app.run_fleet_api:1 of #, fuzzy msgid "Run Flower server (Fleet API)." msgstr "Flower 服务器。" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 msgid ":py:obj:`run_server_app `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 -#: flwr.server.app.run_server_app:1 of +#: ../../source/ref-api/flwr.server.rst:27::1 +#: flwr.server.run_serverapp.run_server_app:1 of #, fuzzy msgid "Run Flower server app." msgstr "Flower 服务器。" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 msgid ":py:obj:`run_superlink `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 #: flwr.server.app.run_superlink:1 of msgid "Run Flower server (Driver API and Fleet API)." msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 +msgid "" +":py:obj:`start_driver `\\ \\(\\*\\[\\, " +"server\\_address\\, server\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:27::1 +#: flwr.server.compat.app.start_driver:1 of +#, fuzzy +msgid "Start a Flower Driver API server." +msgstr "启动基于 Ray 的Flower模拟服务器。" + +#: ../../source/ref-api/flwr.server.rst:27::1 msgid "" ":py:obj:`start_server `\\ \\(\\*\\[\\, " "server\\_address\\, server\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 #: flwr.server.app.start_server:1 of msgid "Start a Flower server using the gRPC transport layer." msgstr "使用 gRPC 传输层启动 Flower 服务器。" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 msgid ":py:obj:`ClientManager `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 #: flwr.server.client_manager.ClientManager:1 of #, fuzzy msgid "Abstract base class for managing Flower clients." msgstr "Flower 客户端的抽象基类。" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 +#, fuzzy +msgid "" +":py:obj:`Driver `\\ " +"\\(\\[driver\\_service\\_address\\, ...\\]\\)" +msgstr "" +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" + +#: ../../source/ref-api/flwr.server.rst:41::1 +#: flwr.server.driver.driver.Driver:1 of +msgid "`Driver` class provides an interface to the Driver API." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:41::1 msgid ":py:obj:`History `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 #: flwr.server.history.History:1 of #, fuzzy msgid "History class for training and/or evaluation metrics collection." msgstr "**hist** -- 包含训练和评估指标的对象。" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 +msgid "" +":py:obj:`LegacyContext `\\ \\(state\\[\\, " +"config\\, strategy\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:41::1 +#: flwr.server.compat.legacy_context.LegacyContext:1 of +msgid "Legacy Context." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:41::1 msgid "" ":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " "strategy\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 +#, fuzzy +msgid "" +":py:obj:`ServerApp `\\ \\(\\[server\\, config\\, " +"strategy\\, ...\\]\\)" +msgstr "server.strategy.Strategy" + +#: ../../source/ref-api/flwr.server.rst:41::1 +#: flwr.server.server_app.ServerApp:1 of +#, fuzzy +msgid "Flower ServerApp." +msgstr "Flower 服务器。" + +#: ../../source/ref-api/flwr.server.rst:41::1 #, fuzzy msgid "" ":py:obj:`ServerConfig `\\ \\(\\[num\\_rounds\\," @@ -8560,42 +9238,43 @@ msgstr "" "config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " "...)``" -#: ../../source/ref-api/flwr.server.rst:37::1 -#: flwr.server.app.ServerConfig:1 of +#: ../../source/ref-api/flwr.server.rst:41::1 +#: flwr.server.server_config.ServerConfig:1 of #, fuzzy msgid "Flower server config." msgstr "Flower 服务器。" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 msgid ":py:obj:`SimpleClientManager `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 #: flwr.server.client_manager.SimpleClientManager:1 of #, fuzzy msgid "Provides a pool of available clients." msgstr "使用部分可用客户进行评估。" -#: ../../source/ref-api/flwr.server.rst:56::1 -msgid ":py:obj:`flwr.server.driver `\\" -msgstr "" - -#: ../../source/ref-api/flwr.server.rst:56::1 flwr.server.driver:1 -#: of -#, fuzzy -msgid "Flower driver SDK." -msgstr "Flower 服务器。" - -#: ../../source/ref-api/flwr.server.rst:56::1 +#: ../../source/ref-api/flwr.server.rst:60::1 #, fuzzy msgid ":py:obj:`flwr.server.strategy `\\" msgstr "server.strategy.Strategy" -#: ../../source/ref-api/flwr.server.rst:56::1 +#: ../../source/ref-api/flwr.server.rst:60::1 #: flwr.server.strategy:1 of msgid "Contains the strategy abstraction and different implementations." msgstr "包含策略抽象和不同的实现方法。" +#: ../../source/ref-api/flwr.server.rst:60::1 +#, fuzzy +msgid ":py:obj:`flwr.server.workflow `\\" +msgstr "server.strategy.Strategy" + +#: ../../source/ref-api/flwr.server.rst:60::1 +#: flwr.server.workflow:1 of +#, fuzzy +msgid "Workflows." +msgstr "工作流程" + #: ../../source/ref-api/flwr.server.ClientManager.rst:2 #, fuzzy msgid "ClientManager" @@ -8690,34 +9369,248 @@ msgstr "" msgid "This method is idempotent." msgstr "" -#: ../../source/ref-api/flwr.server.History.rst:2 -msgid "History" -msgstr "" - -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "" -":py:obj:`add_loss_centralized " -"`\\ \\(server\\_round\\, " -"loss\\)" -msgstr "" +#: ../../source/ref-api/flwr.server.Driver.rst:2 +#, fuzzy +msgid "Driver" +msgstr "服务器" -#: flwr.server.history.History.add_loss_centralized:1 -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.server.driver.driver.Driver:3 of #, fuzzy -msgid "Add one loss entry (from centralized evaluation)." -msgstr "集中评估" +msgid "" +"The IPv4 or IPv6 address of the Driver API server. Defaults to " +"`\"[::]:9091\"`." +msgstr "服务器的 IPv4 或 IPv6 地址。默认为 `\"[::]:8080\"。" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.server.app.start_server:28 flwr.server.driver.driver.Driver:6 of msgid "" -":py:obj:`add_loss_distributed " -"`\\ \\(server\\_round\\, " -"loss\\)" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order: * CA certificate. * " +"server certificate. * server private key." msgstr "" +"包含根证书、服务器证书和私钥的元组,用于启动启用 SSL 的安全服务器。元组应按以下顺序包含三个字节元素: * CA 证书,* 服务器证书, * " +"服务器私钥。" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_loss_distributed:1 of -msgid "Add one loss entry (from distributed evaluation)." -msgstr "" +#: flwr.server.app.start_server:28 flwr.server.driver.driver.Driver:6 of +msgid "" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order:" +msgstr "包含根证书、服务器证书和私钥的元组,用于启动启用 SSL 的安全服务器。元组应按以下顺序包含三个字节元素:" + +#: flwr.server.app.start_server:32 flwr.server.driver.driver.Driver:10 of +msgid "CA certificate." +msgstr "CA 证书。" + +#: flwr.server.app.start_server:33 flwr.server.driver.driver.Driver:11 of +msgid "server certificate." +msgstr "服务器证书。" + +#: flwr.server.app.start_server:34 flwr.server.driver.driver.Driver:12 of +msgid "server private key." +msgstr "服务器私人密钥。" + +#: flwr.server.driver.driver.Driver.close:1::1 of +#, fuzzy +msgid ":py:obj:`close `\\ \\(\\)" +msgstr "server.strategy.Strategy" + +#: flwr.server.driver.driver.Driver.close:1 +#: flwr.server.driver.driver.Driver.close:1::1 of +msgid "Disconnect from the SuperLink if connected." +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 of +msgid "" +":py:obj:`create_message `\\ " +"\\(content\\, message\\_type\\, ...\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 +#: flwr.server.driver.driver.Driver.create_message:1 of +msgid "Create a new message with specified parameters." +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 of +msgid ":py:obj:`get_node_ids `\\ \\(\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 +#: flwr.server.driver.driver.Driver.get_node_ids:1 of +msgid "Get node IDs." +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 of +msgid "" +":py:obj:`pull_messages `\\ " +"\\(message\\_ids\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 +#: flwr.server.driver.driver.Driver.pull_messages:1 of +msgid "Pull messages based on message IDs." +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 of +msgid "" +":py:obj:`push_messages `\\ " +"\\(messages\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 +#: flwr.server.driver.driver.Driver.push_messages:1 of +msgid "Push messages to specified node IDs." +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 of +#, fuzzy +msgid "" +":py:obj:`send_and_receive `\\ " +"\\(messages\\, \\*\\[\\, timeout\\]\\)" +msgstr "" +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" + +#: flwr.server.driver.driver.Driver.close:1::1 +#: flwr.server.driver.driver.Driver.send_and_receive:1 of +msgid "Push messages to specified node IDs and pull the reply messages." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:3 of +msgid "" +"This method constructs a new `Message` with given content and metadata. " +"The `run_id` and `src_node_id` will be set automatically." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:6 of +msgid "" +"The content for the new message. This holds records that are to be sent " +"to the destination node." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:9 of +msgid "" +"The type of the message, defining the action to be executed on the " +"receiving end." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:12 of +msgid "The ID of the destination node to which the message is being sent." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:14 of +msgid "" +"The ID of the group to which this message is associated. In some " +"settings, this is used as the FL round." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:17 of +msgid "" +"Time-to-live for the round trip of this message, i.e., the time from " +"sending this message to receiving a reply. It specifies the duration for " +"which the message and its potential reply are considered valid." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:22 of +msgid "" +"**message** -- A new `Message` instance with the specified content and " +"metadata." +msgstr "" + +#: flwr.server.driver.driver.Driver.pull_messages:3 of +msgid "" +"This method is used to collect messages from the SuperLink that " +"correspond to a set of given message IDs." +msgstr "" + +#: flwr.server.driver.driver.Driver.pull_messages:6 of +msgid "An iterable of message IDs for which reply messages are to be retrieved." +msgstr "" + +#: flwr.server.driver.driver.Driver.pull_messages:9 of +msgid "**messages** -- An iterable of messages received." +msgstr "" + +#: flwr.server.driver.driver.Driver.push_messages:3 of +msgid "" +"This method takes an iterable of messages and sends each message to the " +"node specified in `dst_node_id`." +msgstr "" + +#: flwr.server.driver.driver.Driver.push_messages:6 +#: flwr.server.driver.driver.Driver.send_and_receive:7 of +msgid "An iterable of messages to be sent." +msgstr "" + +#: flwr.server.driver.driver.Driver.push_messages:9 of +msgid "" +"**message_ids** -- An iterable of IDs for the messages that were sent, " +"which can be used to pull replies." +msgstr "" + +#: flwr.server.driver.driver.Driver.send_and_receive:3 of +msgid "" +"This method sends a list of messages to their destination node IDs and " +"then waits for the replies. It continues to pull replies until either all" +" replies are received or the specified timeout duration is exceeded." +msgstr "" + +#: flwr.server.driver.driver.Driver.send_and_receive:9 of +msgid "" +"The timeout duration in seconds. If specified, the method will wait for " +"replies for this duration. If `None`, there is no time limit and the " +"method will wait until replies for all messages are received." +msgstr "" + +#: flwr.server.driver.driver.Driver.send_and_receive:14 of +msgid "**replies** -- An iterable of reply messages received from the SuperLink." +msgstr "" + +#: flwr.server.driver.driver.Driver.send_and_receive:18 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:53 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:60 +#: of +#, fuzzy +msgid "Notes" +msgstr "无" + +#: flwr.server.driver.driver.Driver.send_and_receive:19 of +msgid "" +"This method uses `push_messages` to send the messages and `pull_messages`" +" to collect the replies. If `timeout` is set, the method may not return " +"replies for all sent messages. A message remains valid until its TTL, " +"which is not affected by `timeout`." +msgstr "" + +#: ../../source/ref-api/flwr.server.History.rst:2 +msgid "History" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_loss_centralized " +"`\\ \\(server\\_round\\, " +"loss\\)" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1 +#: flwr.server.history.History.add_loss_centralized:1::1 of +#, fuzzy +msgid "Add one loss entry (from centralized evaluation)." +msgstr "集中评估" + +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_loss_distributed " +"`\\ \\(server\\_round\\, " +"loss\\)" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_loss_distributed:1 of +msgid "Add one loss entry (from distributed evaluation)." +msgstr "" #: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" @@ -8757,6 +9650,38 @@ msgstr "" msgid "Add metrics entries (from distributed fit)." msgstr "" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:2 +msgid "LegacyContext" +msgstr "" + +#: flwr.server.compat.legacy_context.LegacyContext:1 of +msgid "Bases: :py:class:`~flwr.common.context.Context`" +msgstr "" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#, fuzzy +msgid ":py:obj:`config `\\" +msgstr "server.strategy.Strategy" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#, fuzzy +msgid ":py:obj:`strategy `\\" +msgstr "server.strategy.Strategy" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +msgid ":py:obj:`client_manager `\\" +msgstr "" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#, fuzzy +msgid ":py:obj:`history `\\" +msgstr "server.strategy.Strategy" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#, fuzzy +msgid ":py:obj:`state `\\" +msgstr "server.strategy.Strategy" + #: flwr.server.server.Server.client_manager:1::1 of msgid ":py:obj:`client_manager `\\ \\(\\)" msgstr "" @@ -8833,12 +9758,36 @@ msgstr "" msgid "Replace server strategy." msgstr "server.strategy" +#: ../../source/ref-api/flwr.server.ServerApp.rst:2 +#, fuzzy +msgid "ServerApp" +msgstr "服务器" + +#: flwr.server.server_app.ServerApp:5 of +#, fuzzy +msgid "Use the `ServerApp` with an existing `Strategy`:" +msgstr "使用现有策略" + +#: flwr.server.server_app.ServerApp:15 of +msgid "Use the `ServerApp` with a custom main function:" +msgstr "" + +#: flwr.server.server_app.ServerApp.main:1::1 of +#, fuzzy +msgid ":py:obj:`main `\\ \\(\\)" +msgstr "server.strategy.Strategy" + +#: flwr.server.server_app.ServerApp.main:1 +#: flwr.server.server_app.ServerApp.main:1::1 of +msgid "Return a decorator that registers the main fn with the server app." +msgstr "" + #: ../../source/ref-api/flwr.server.ServerConfig.rst:2 #, fuzzy msgid "ServerConfig" msgstr "服务器" -#: flwr.server.app.ServerConfig:3 of +#: flwr.server.server_config.ServerConfig:3 of msgid "" "All attributes have default values which allows users to configure just " "the ones they care about." @@ -8912,255 +9861,87 @@ msgstr "" msgid "**success**" msgstr "" -#: ../../source/ref-api/flwr.server.driver.rst:2 -#, fuzzy -msgid "driver" -msgstr "服务器" - -#: ../../source/ref-api/flwr.server.driver.rst:22::1 -msgid "" -":py:obj:`start_driver `\\ \\(\\*\\[\\, " -"server\\_address\\, server\\, ...\\]\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.driver.rst:22::1 -#: flwr.server.driver.app.start_driver:1 of +#: ../../source/ref-api/flwr.server.run_driver_api.rst:2 #, fuzzy -msgid "Start a Flower Driver API server." -msgstr "启动基于 Ray 的Flower模拟服务器。" - -#: ../../source/ref-api/flwr.server.driver.rst:30::1 -msgid "" -":py:obj:`Driver `\\ " -"\\(\\[driver\\_service\\_address\\, ...\\]\\)" -msgstr "" +msgid "run\\_driver\\_api" +msgstr "flower-driver-api" -#: ../../source/ref-api/flwr.server.driver.rst:30::1 -#: flwr.server.driver.driver.Driver:1 of -msgid "`Driver` class provides an interface to the Driver API." +#: ../../source/ref-api/flwr.server.run_fleet_api.rst:2 +msgid "run\\_fleet\\_api" msgstr "" -#: ../../source/ref-api/flwr.server.driver.rst:30::1 -msgid "" -":py:obj:`GrpcDriver `\\ " -"\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#: ../../source/ref-api/flwr.server.run_server_app.rst:2 +msgid "run\\_server\\_app" msgstr "" -#: ../../source/ref-api/flwr.server.driver.rst:30::1 -#: flwr.server.driver.grpc_driver.GrpcDriver:1 of -msgid "`GrpcDriver` provides access to the gRPC Driver API/service." -msgstr "" +#: ../../source/ref-api/flwr.server.run_superlink.rst:2 +#, fuzzy +msgid "run\\_superlink" +msgstr "flower-superlink" -#: ../../source/ref-api/flwr.server.driver.Driver.rst:2 +#: ../../source/ref-api/flwr.server.start_driver.rst:2 #, fuzzy -msgid "Driver" -msgstr "服务器" +msgid "start\\_driver" +msgstr "启动客户端" -#: flwr.server.driver.driver.Driver:3 of +#: flwr.server.compat.app.start_driver:3 of #, fuzzy msgid "" "The IPv4 or IPv6 address of the Driver API server. Defaults to " -"`\"[::]:9091\"`." +"`\"[::]:8080\"`." msgstr "服务器的 IPv4 或 IPv6 地址。默认为 `\"[::]:8080\"。" -#: flwr.server.app.start_server:28 flwr.server.driver.driver.Driver:6 of +#: flwr.server.compat.app.start_driver:6 of +#, fuzzy msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order: * CA certificate. * " -"server certificate. * server private key." -msgstr "" -"包含根证书、服务器证书和私钥的元组,用于启动启用 SSL 的安全服务器。元组应按以下顺序包含三个字节元素: * CA 证书,* 服务器证书, * " -"服务器私钥。" +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, then `start_driver` will create one." +msgstr "服务器实现,可以是 `flwr.server.Server` 或其子类。如果没有提供实例,`start_server` 将创建一个。" -#: flwr.server.app.start_server:28 flwr.server.driver.driver.Driver:6 of +#: flwr.server.app.start_server:9 flwr.server.compat.app.start_driver:10 +#: flwr.simulation.app.start_simulation:28 of msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order:" -msgstr "包含根证书、服务器证书和私钥的元组,用于启动启用 SSL 的安全服务器。元组应按以下顺序包含三个字节元素:" - -#: flwr.server.app.start_server:32 flwr.server.driver.driver.Driver:10 of -msgid "CA certificate." -msgstr "CA 证书。" - -#: flwr.server.app.start_server:33 flwr.server.driver.driver.Driver:11 of -msgid "server certificate." -msgstr "服务器证书。" - -#: flwr.server.app.start_server:34 flwr.server.driver.driver.Driver:12 of -msgid "server private key." -msgstr "服务器私人密钥。" - -#: flwr.server.driver.driver.Driver.get_nodes:1::1 of -msgid ":py:obj:`get_nodes `\\ \\(\\)" -msgstr "" - -#: flwr.server.driver.driver.Driver.get_nodes:1 -#: flwr.server.driver.driver.Driver.get_nodes:1::1 of -msgid "Get node IDs." -msgstr "" +"Currently supported values are `num_rounds` (int, default: 1) and " +"`round_timeout` in seconds (float, default: None)." +msgstr "目前支持的值有:`num_rounds`(int,默认值:1)和以秒为单位的`round_timeout`(float,默认值:无)。" -#: flwr.server.driver.driver.Driver.get_nodes:1::1 of +#: flwr.server.app.start_server:12 flwr.server.compat.app.start_driver:13 of msgid "" -":py:obj:`pull_task_res `\\ " -"\\(task\\_ids\\)" +"An implementation of the abstract base class " +"`flwr.server.strategy.Strategy`. If no strategy is provided, then " +"`start_server` will use `flwr.server.strategy.FedAvg`." msgstr "" +"抽象基类 `flwr.server.strategy.Strategy` 的实现。如果没有提供策略,`start_server` 将使用 " +"`flwr.server.strategy.FedAvg`。" -#: flwr.server.driver.driver.Driver.get_nodes:1::1 -#: flwr.server.driver.driver.Driver.pull_task_res:1 -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 -#: flwr.server.driver.grpc_driver.GrpcDriver.pull_task_res:1 of +#: flwr.server.compat.app.start_driver:17 of #, fuzzy -msgid "Get task results." -msgstr "汇总训练结果。" - -#: flwr.server.driver.driver.Driver.get_nodes:1::1 of msgid "" -":py:obj:`push_task_ins `\\ " -"\\(task\\_ins\\_list\\)" +"An implementation of the class `flwr.server.ClientManager`. If no " +"implementation is provided, then `start_driver` will use " +"`flwr.server.SimpleClientManager`." msgstr "" +"抽象基类 `flwr.server.ClientManager` 的实现。如果没有提供实现,`start_server` 将使用 " +"`flwr.server.client_manager.SimpleClientManager`。" -#: flwr.server.driver.driver.Driver.get_nodes:1::1 -#: flwr.server.driver.driver.Driver.push_task_ins:1 -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 -#: flwr.server.driver.grpc_driver.GrpcDriver.push_task_ins:1 of -msgid "Schedule tasks." +#: flwr.server.compat.app.start_driver:25 of +msgid "The Driver object to use." msgstr "" -#: ../../source/ref-api/flwr.server.driver.GrpcDriver.rst:2 -msgid "GrpcDriver" -msgstr "" +#: flwr.server.app.start_server:37 flwr.server.compat.app.start_driver:28 of +msgid "**hist** -- Object containing training and evaluation metrics." +msgstr "**hist** -- 包含训练和评估指标的对象。" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of -msgid ":py:obj:`connect `\\ \\(\\)" -msgstr "" +#: flwr.server.compat.app.start_driver:33 of +#, fuzzy +msgid "Starting a driver that connects to an insecure server:" +msgstr "启动不安全的服务器:" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1 -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of -msgid "Connect to the Driver API." -msgstr "" - -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of -msgid "" -":py:obj:`create_run `\\ " -"\\(req\\)" -msgstr "" - -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 -#: flwr.server.driver.grpc_driver.GrpcDriver.create_run:1 of -#, fuzzy -msgid "Request for run ID." -msgstr "Flower 基线申请" - -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of -msgid ":py:obj:`disconnect `\\ \\(\\)" -msgstr "" - -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 -#: flwr.server.driver.grpc_driver.GrpcDriver.disconnect:1 of -msgid "Disconnect from the Driver API." -msgstr "" - -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of -msgid ":py:obj:`get_nodes `\\ \\(req\\)" -msgstr "" - -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 -#: flwr.server.driver.grpc_driver.GrpcDriver.get_nodes:1 of -#, fuzzy -msgid "Get client IDs." -msgstr "返回客户端(本身)。" - -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of -msgid "" -":py:obj:`pull_task_res `\\ " -"\\(req\\)" -msgstr "" - -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of -msgid "" -":py:obj:`push_task_ins `\\ " -"\\(req\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.driver.start_driver.rst:2 -#, fuzzy -msgid "start\\_driver" -msgstr "启动客户端" - -#: flwr.server.driver.app.start_driver:3 of -#, fuzzy -msgid "" -"The IPv4 or IPv6 address of the Driver API server. Defaults to " -"`\"[::]:8080\"`." -msgstr "服务器的 IPv4 或 IPv6 地址。默认为 `\"[::]:8080\"。" - -#: flwr.server.driver.app.start_driver:6 of -#, fuzzy -msgid "" -"A server implementation, either `flwr.server.Server` or a subclass " -"thereof. If no instance is provided, then `start_driver` will create one." -msgstr "服务器实现,可以是 `flwr.server.Server` 或其子类。如果没有提供实例,`start_server` 将创建一个。" - -#: flwr.server.app.start_server:9 flwr.server.driver.app.start_driver:10 -#: flwr.simulation.app.start_simulation:28 of -msgid "" -"Currently supported values are `num_rounds` (int, default: 1) and " -"`round_timeout` in seconds (float, default: None)." -msgstr "目前支持的值有:`num_rounds`(int,默认值:1)和以秒为单位的`round_timeout`(float,默认值:无)。" - -#: flwr.server.app.start_server:12 flwr.server.driver.app.start_driver:13 of -msgid "" -"An implementation of the abstract base class " -"`flwr.server.strategy.Strategy`. If no strategy is provided, then " -"`start_server` will use `flwr.server.strategy.FedAvg`." -msgstr "" -"抽象基类 `flwr.server.strategy.Strategy` 的实现。如果没有提供策略,`start_server` 将使用 " -"`flwr.server.strategy.FedAvg`。" - -#: flwr.server.driver.app.start_driver:17 of -#, fuzzy -msgid "" -"An implementation of the class `flwr.server.ClientManager`. If no " -"implementation is provided, then `start_driver` will use " -"`flwr.server.SimpleClientManager`." -msgstr "" -"抽象基类 `flwr.server.ClientManager` 的实现。如果没有提供实现,`start_server` 将使用 " -"`flwr.server.client_manager.SimpleClientManager`。" - -#: flwr.server.app.start_server:37 flwr.server.driver.app.start_driver:26 of -msgid "**hist** -- Object containing training and evaluation metrics." -msgstr "**hist** -- 包含训练和评估指标的对象。" - -#: flwr.server.driver.app.start_driver:31 of -#, fuzzy -msgid "Starting a driver that connects to an insecure server:" -msgstr "启动不安全的服务器:" - -#: flwr.server.driver.app.start_driver:35 of +#: flwr.server.compat.app.start_driver:37 of #, fuzzy msgid "Starting a driver that connects to an SSL-enabled server:" msgstr "启动支持 SSL 的服务器:" -#: ../../source/ref-api/flwr.server.run_driver_api.rst:2 -#, fuzzy -msgid "run\\_driver\\_api" -msgstr "flower-driver-api" - -#: ../../source/ref-api/flwr.server.run_fleet_api.rst:2 -msgid "run\\_fleet\\_api" -msgstr "" - -#: ../../source/ref-api/flwr.server.run_server_app.rst:2 -msgid "run\\_server\\_app" -msgstr "" - -#: ../../source/ref-api/flwr.server.run_superlink.rst:2 -#, fuzzy -msgid "run\\_superlink" -msgstr "flower-superlink" - #: ../../source/ref-api/flwr.server.start_server.rst:2 #, fuzzy msgid "start\\_server" @@ -9210,223 +9991,279 @@ msgstr "启动支持 SSL 的服务器:" msgid "strategy" msgstr "Krum 策略。" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`FaultTolerantFedAvg " -"`\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +":py:obj:`Bulyan `\\ \\(\\*\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of -msgid "Configurable fault-tolerant FedAvg strategy implementation." -msgstr "可配置的容错 FedAvg 策略实施。" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.bulyan.Bulyan:1 of +msgid "Bulyan strategy." +msgstr "Bulyan 策略。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`DPFedAvgAdaptive `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." +msgstr "用于配置具有自适应剪切功能的 DP 策略的包装器。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`DPFedAvgFixed `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of +msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." +msgstr "封装器,用于为具有固定剪切功能的 DP 配置策略。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " +"`\\ " +"\\(...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 +#: of +#, fuzzy +msgid "Strategy wrapper for central DP with client-side adaptive clipping." +msgstr "用于配置具有自适应剪切功能的 DP 策略的包装器。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " +"`\\ " +"\\(...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 +#: of +#, fuzzy +msgid "Strategy wrapper for central DP with server-side adaptive clipping." +msgstr "用于配置具有自适应剪切功能的 DP 策略的包装器。" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`DifferentialPrivacyClientSideFixedClipping " +"`\\ " +"\\(...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 +#: of +#, fuzzy +msgid "Strategy wrapper for central DP with client-side fixed clipping." +msgstr "封装器,用于为具有固定剪切功能的 DP 配置策略。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`DifferentialPrivacyServerSideFixedClipping " +"`\\ " +"\\(...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 +#: of +#, fuzzy +msgid "Strategy wrapper for central DP with server-side fixed clipping." +msgstr "封装器,用于为具有固定剪切功能的 DP 配置策略。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " "fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedadagrad.FedAdagrad:1 of msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." msgstr "FedAdagrad 策略 - 使用 Adagrad 进行自适应联合优化。" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedAdam `\\ \\(\\*\\[\\, " "fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedadam.FedAdam:1 of msgid "FedAdam - Adaptive Federated Optimization using Adam." msgstr "FedAdam - 使用 Adam 进行自适应联合优化。" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedAvg `\\ \\(\\*\\[\\, " "fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedavg.FedAvg:1 #: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of msgid "Federated Averaging strategy." msgstr "联邦平均策略。" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -msgid "" -":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " -"\\*\\*kwargs\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of -msgid "Configurable FedXgbNnAvg strategy implementation." -msgstr "可配置的 FedXgbNAvg 策略实施。" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -msgid "" -":py:obj:`FedXgbBagging `\\ " -"\\(\\[evaluate\\_function\\]\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of -#, fuzzy -msgid "Configurable FedXgbBagging strategy implementation." -msgstr "可配置的 FedXgbNAvg 策略实施。" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -msgid "" -":py:obj:`FedXgbCyclic `\\ " -"\\(\\*\\*kwargs\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of -#, fuzzy -msgid "Configurable FedXgbCyclic strategy implementation." -msgstr "可配置的 FedAvg 策略实施。" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedAvgAndroid `\\ " "\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " "fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedavgm.FedAvgM:1 of msgid "Federated Averaging with Momentum strategy." msgstr "联邦平均动量策略。" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedMedian `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedmedian.FedMedian:1 of +#, fuzzy +msgid "Configurable FedMedian strategy implementation." +msgstr "可配置的 FedAvg 策略实施。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedOpt `\\ \\(\\*\\[\\, " "fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedopt.FedOpt:1 of #, fuzzy msgid "Federated Optim strategy." msgstr "联邦优化策略。" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedProx `\\ \\(\\*\\[\\, " "fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedprox.FedProx:1 of msgid "Federated Optimization strategy." msgstr "联邦优化策略。" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`FedYogi `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +":py:obj:`FedTrimmedAvg `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedyogi.FedYogi:1 of -msgid "FedYogi [Reddi et al., 2020] strategy." -msgstr "FedYogi [Reddi 等人,2020] 策略。" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of +msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." +msgstr "带修剪均值的联邦平均法[Dong Yin 等,2021]。" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " -"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" +":py:obj:`FedXgbBagging `\\ " +"\\(\\[evaluate\\_function\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.qfedavg.QFedAvg:1 of -msgid "Configurable QFedAvg strategy implementation." -msgstr "可配置的 QFedAvg 策略实施。" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of +#, fuzzy +msgid "Configurable FedXgbBagging strategy implementation." +msgstr "可配置的 FedXgbNAvg 策略实施。" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`FedMedian `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +":py:obj:`FedXgbCyclic `\\ " +"\\(\\*\\*kwargs\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedmedian.FedMedian:1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of #, fuzzy -msgid "Configurable FedMedian strategy implementation." +msgid "Configurable FedXgbCyclic strategy implementation." msgstr "可配置的 FedAvg 策略实施。" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`FedTrimmedAvg `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of -msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." -msgstr "带修剪均值的联邦平均法[Dong Yin 等,2021]。" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of +msgid "Configurable FedXgbNnAvg strategy implementation." +msgstr "可配置的 FedXgbNAvg 策略实施。" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`Krum `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +":py:obj:`FedYogi `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.krum.Krum:1 of -#, fuzzy -msgid "Krum [Blanchard et al., 2017] strategy." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "FedYogi [Reddi et al., 2020] strategy." msgstr "FedYogi [Reddi 等人,2020] 策略。" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`Bulyan `\\ \\(\\*\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" +":py:obj:`FaultTolerantFedAvg " +"`\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.bulyan.Bulyan:1 of -msgid "Bulyan strategy." -msgstr "Bulyan 策略。" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of +msgid "Configurable fault-tolerant FedAvg strategy implementation." +msgstr "可配置的容错 FedAvg 策略实施。" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`DPFedAvgAdaptive `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\)" +":py:obj:`Krum `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of -msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." -msgstr "用于配置具有自适应剪切功能的 DP 策略的包装器。" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.krum.Krum:1 of +#, fuzzy +msgid "Krum [Blanchard et al., 2017] strategy." +msgstr "FedYogi [Reddi 等人,2020] 策略。" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`DPFedAvgFixed `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" +":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " +"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of -msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." -msgstr "封装器,用于为具有固定剪切功能的 DP 配置策略。" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.qfedavg.QFedAvg:1 of +msgid "Configurable QFedAvg strategy implementation." +msgstr "可配置的 QFedAvg 策略实施。" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid ":py:obj:`Strategy `\\ \\(\\)" msgstr "server.strategy.Strategy" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.strategy.Strategy:1 of msgid "Abstract base class for server strategy implementations." msgstr "服务器策略实现的抽象基类。" @@ -9627,6 +10464,14 @@ msgid "" "parameters\\, ...\\)" msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_evaluate:1 #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 #: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 @@ -9648,6 +10493,14 @@ msgid "" "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_fit:1 #: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 @@ -9741,6 +10594,10 @@ msgstr "" msgid "Return the sample size and the required number of available clients." msgstr "返回样本大小和所需的可用客户数量。" +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 +msgid "DPFedAvgAdaptive" +msgstr "DPFedAvgAdaptive" + #: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of msgid "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" msgstr "" @@ -9758,6 +10615,14 @@ msgid "" "\\(server\\_round\\, results\\, ...\\)" msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of @@ -9806,6 +10671,14 @@ msgid "" "\\(server\\_round\\, parameters\\)" msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.evaluate:1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of msgid "Evaluate model parameters using an evaluation function from the strategy." @@ -9819,6 +10692,14 @@ msgid "" "\\(client\\_manager\\)" msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.initialize_parameters:1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of msgid "Initialize global model parameters using given strategy." @@ -9855,6 +10736,14 @@ msgstr "" "一个元组列表。列表中的每个元组都标识了一个`ClientProxy`和该特定`ClientProxy`的`EvaluateIns`。如果某个特定的" " `ClientProxy` 未包含在此列表中,则表示该 `ClientProxy` 将不参与下一轮联合评估。" +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 +msgid "DPFedAvgFixed" +msgstr "DPFedAvgFixed" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 #: flwr.server.strategy.fedavg.FedAvg:1 #: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of @@ -9938,6 +10827,392 @@ msgstr "" "**fit_configuration** -- " "一个元组列表。列表中的每个元组都标识了一个`ClientProxy`和该特定`ClientProxy`的`FitIns'。如果某个特定的`ClientProxy`不在此列表中,则表示该`ClientProxy`将不参加下一轮联合学习。" +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:2 +msgid "DifferentialPrivacyClientSideAdaptiveClipping" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:3 +#: of +msgid "Use `adaptiveclipping_mod` modifier at the client side." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:5 +#: of +msgid "" +"In comparison to `DifferentialPrivacyServerSideAdaptiveClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideAdaptiveClipping` expects clipping to " +"happen on the client-side, usually by using the built-in " +"`adaptiveclipping_mod`." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:10 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:3 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:10 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:3 +#: of +msgid "The strategy to which DP functionalities will be added by this wrapper." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:12 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:5 +#: of +msgid "The noise multiplier for the Gaussian mechanism for model updates." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:14 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:7 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:17 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:10 +#: of +msgid "The number of clients that are sampled on each round." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:16 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:9 +#: of +msgid "" +"The initial value of clipping norm. Defaults to 0.1. Andrew et al. " +"recommends to set to 0.1." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:19 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:12 +#: of +msgid "The desired quantile of updates which should be clipped. Defaults to 0.5." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:21 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:14 +#: of +msgid "" +"The learning rate for the clipping norm adaptation. Defaults to 0.2. " +"Andrew et al. recommends to set to 0.2." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:24 +#: of +msgid "" +"The stddev of the noise added to the count of updates currently below the" +" estimate. Andrew et al. recommends to set to `expected_num_records/20`" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:30 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:23 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:15 +#: of +#, fuzzy +msgid "Create a strategy:" +msgstr "server.strategy" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:34 +#: of +msgid "" +"Wrap the strategy with the " +"`DifferentialPrivacyClientSideAdaptiveClipping` wrapper:" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:40 +#: of +msgid "On the client, add the `adaptiveclipping_mod` to the client-side mods:" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_fit:1 +#: of +#, fuzzy +msgid "Aggregate training results and update clip norms." +msgstr "汇总 DPFedAvgFixed 中的训练结果并更新片段标准。" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:2 +#, fuzzy +msgid "DifferentialPrivacyClientSideFixedClipping" +msgstr "差分隐私" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:3 +#: of +msgid "Use `fixedclipping_mod` modifier at the client side." +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:5 +#: of +msgid "" +"In comparison to `DifferentialPrivacyServerSideFixedClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideFixedClipping` expects clipping to happen " +"on the client-side, usually by using the built-in `fixedclipping_mod`." +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:12 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:5 +#: of +msgid "" +"The noise multiplier for the Gaussian mechanism for model updates. A " +"value of 1.0 or higher is recommended for strong privacy." +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:15 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:8 +#: of +msgid "The value of the clipping norm." +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:26 +#: of +msgid "" +"Wrap the strategy with the `DifferentialPrivacyClientSideFixedClipping` " +"wrapper:" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:32 +#: of +msgid "On the client, add the `fixedclipping_mod` to the client-side mods:" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_fit:1 +#: of +#, fuzzy +msgid "Add noise to the aggregated parameters." +msgstr "然后将汇总结果序列化:" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:2 +msgid "DifferentialPrivacyServerSideAdaptiveClipping" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:17 +#: of +msgid "" +"The standard deviation of the noise added to the count of updates below " +"the estimate. Andrew et al. recommends to set to " +"`expected_num_records/20`" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:27 +#: of +msgid "" +"Wrap the strategy with the DifferentialPrivacyServerSideAdaptiveClipping " +"wrapper" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:2 +#, fuzzy +msgid "DifferentialPrivacyServerSideFixedClipping" +msgstr "差分隐私" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:19 +#: of +msgid "" +"Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping " +"wrapper" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:1 +#: of +msgid "Compute the updates, clip, and pass them for aggregation." +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:3 +#: of +msgid "Afterward, add noise to the aggregated parameters." +msgstr "" + #: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 #, fuzzy msgid "FaultTolerantFedAvg" @@ -10222,6 +11497,10 @@ msgstr "" "验证过程中使用的客户端的比例。如果 `min_evaluate_clients` 大于 `fraction_evaluate * " "available_clients`,则仍会对 `min_evaluate_clients` 进行采样。默认为 1.0。" +#: flwr.server.strategy.fedavg.FedAvg:33 of +msgid "Enable (True) or disable (False) in-place aggregation of model updates." +msgstr "" + #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`aggregate_evaluate " @@ -11333,26 +12612,472 @@ msgid "" "these as the initial global model parameters." msgstr "**parameters** -- 如果返回参数,服务器将把这些参数视为初始全局模型参数。" -#: ../../source/ref-api/flwr.simulation.rst:2 +#: ../../source/ref-api/flwr.server.workflow.rst:2 #, fuzzy -msgid "simulation" -msgstr "运行模拟" +msgid "workflow" +msgstr "工作流程" -#: ../../source/ref-api/flwr.simulation.rst:17::1 +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 msgid "" -":py:obj:`start_simulation `\\ \\(\\*\\," -" client\\_fn\\[\\, ...\\]\\)" +":py:obj:`DefaultWorkflow `\\ " +"\\(\\[fit\\_workflow\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:17::1 -#: flwr.simulation.app.start_simulation:1 of -msgid "Start a Ray-based Flower simulation server." -msgstr "启动基于 Ray 的Flower模拟服务器。" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 of +msgid "Default workflow in Flower." +msgstr "" -#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 -#, fuzzy -msgid "start\\_simulation" -msgstr "start_simulation" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +msgid "" +":py:obj:`SecAggPlusWorkflow `\\ " +"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of +msgid "The workflow for the SecAgg+ protocol." +msgstr "" + +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +msgid "" +":py:obj:`SecAggWorkflow `\\ " +"\\(reconstruction\\_threshold\\, \\*\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +msgid "The workflow for the SecAgg protocol." +msgstr "" + +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:2 +#, fuzzy +msgid "DefaultWorkflow" +msgstr "工作流程" + +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:2 +#, fuzzy +msgid "SecAggPlusWorkflow" +msgstr "工作流程" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:3 +#: of +msgid "" +"The SecAgg+ protocol ensures the secure summation of integer vectors " +"owned by multiple parties, without accessing any individual integer " +"vector. This workflow allows the server to compute the weighted average " +"of model parameters across all clients, ensuring individual contributions" +" remain private. This is achieved by clients sending both, a weighting " +"factor and a weighted version of the locally updated parameters, both of " +"which are masked for privacy. Specifically, each client uploads \"[w, w *" +" params]\" with masks, where weighting factor 'w' is the number of " +"examples ('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:14 +#: of +msgid "" +"The protocol involves four main stages: - 'setup': Send SecAgg+ " +"configuration to clients and collect their public keys. - 'share keys': " +"Broadcast public keys among clients and collect encrypted secret" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:17 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:17 +#: of +msgid "key shares." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:18 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:18 +#: of +msgid "" +"'collect masked vectors': Forward encrypted secret key shares to target " +"clients and collect masked model parameters." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:20 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:20 +#: of +msgid "" +"'unmask': Collect secret key shares to decrypt and aggregate the model " +"parameters." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:22 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:22 +#: of +msgid "" +"Only the aggregated model parameters are exposed and passed to " +"`Strategy.aggregate_fit`, ensuring individual data privacy." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:25 +#: of +msgid "" +"The number of shares into which each client's private key is split under " +"the SecAgg+ protocol. If specified as a float, it represents the " +"proportion of all selected clients, and the number of shares will be set " +"dynamically in the run time. A private key can be reconstructed from " +"these shares, allowing for the secure aggregation of model updates. Each " +"client sends one share to each of its neighbors while retaining one." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:25 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:32 +#: of +msgid "" +"The minimum number of shares required to reconstruct a client's private " +"key, or, if specified as a float, it represents the proportion of the " +"total number of shares needed for reconstruction. This threshold ensures " +"privacy by allowing for the recovery of contributions from dropped " +"clients during aggregation, without compromising individual client data." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:31 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:38 +#: of +msgid "" +"The maximum value of the weight that can be assigned to any single " +"client's update during the weighted average calculation on the server " +"side, e.g., in the FedAvg algorithm." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:35 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:42 +#: of +msgid "" +"The range within which model parameters are clipped before quantization. " +"This parameter ensures each model parameter is bounded within " +"[-clipping_range, clipping_range], facilitating quantization." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:39 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:46 +#: of +msgid "" +"The size of the range into which floating-point model parameters are " +"quantized, mapping each parameter to an integer in [0, " +"quantization_range-1]. This facilitates cryptographic operations on the " +"model updates." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:43 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:50 +#: of +msgid "" +"The range of values from which random mask entries are uniformly sampled " +"([0, modulus_range-1]). `modulus_range` must be less than 4294967296. " +"Please use 2**n values for `modulus_range` to prevent overflow issues." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:47 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:54 +#: of +msgid "" +"The timeout duration in seconds. If specified, the workflow will wait for" +" replies for this duration each time. If `None`, there is no time limit " +"and the workflow will wait until replies for all messages are received." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:61 +#: of +msgid "" +"Generally, higher `num_shares` means more robust to dropouts while " +"increasing the computational costs; higher `reconstruction_threshold` " +"means better privacy guarantees but less tolerance to dropouts." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:58 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:64 +#: of +msgid "Too large `max_weight` may compromise the precision of the quantization." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:59 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:65 +#: of +msgid "`modulus_range` must be 2**n and larger than `quantization_range`." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:66 +#: of +msgid "" +"When `num_shares` is a float, it is interpreted as the proportion of all " +"selected clients, and hence the number of shares will be determined in " +"the runtime. This allows for dynamic adjustment based on the total number" +" of participating clients." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:69 +#: of +msgid "" +"Similarly, when `reconstruction_threshold` is a float, it is interpreted " +"as the proportion of the number of shares needed for the reconstruction " +"of a private key. This feature enables flexibility in setting the " +"security threshold relative to the number of distributed shares." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:73 +#: of +msgid "" +"`num_shares`, `reconstruction_threshold`, and the quantization parameters" +" (`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg+" +" protocol." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`collect_masked_vectors_stage " +"`\\" +" \\(driver\\, ...\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "Execute the 'collect masked vectors' stage." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`setup_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.setup_stage:1 +#: of +msgid "Execute the 'setup' stage." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`share_keys_stage " +"`\\ " +"\\(driver\\, context\\, state\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.share_keys_stage:1 +#: of +msgid "Execute the 'share keys' stage." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.unmask_stage:1 +#: of +msgid "Execute the 'unmask' stage." +msgstr "" + +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:2 +#, fuzzy +msgid "SecAggWorkflow" +msgstr "工作流程" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +msgid "" +"Bases: " +":py:class:`~flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow`" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:3 of +msgid "" +"The SecAgg protocol ensures the secure summation of integer vectors owned" +" by multiple parties, without accessing any individual integer vector. " +"This workflow allows the server to compute the weighted average of model " +"parameters across all clients, ensuring individual contributions remain " +"private. This is achieved by clients sending both, a weighting factor and" +" a weighted version of the locally updated parameters, both of which are " +"masked for privacy. Specifically, each client uploads \"[w, w * params]\"" +" with masks, where weighting factor 'w' is the number of examples " +"('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:14 of +msgid "" +"The protocol involves four main stages: - 'setup': Send SecAgg " +"configuration to clients and collect their public keys. - 'share keys': " +"Broadcast public keys among clients and collect encrypted secret" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:54 of +msgid "" +"Each client's private key is split into N shares under the SecAgg " +"protocol, where N is the number of selected clients." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:56 of +msgid "" +"Generally, higher `reconstruction_threshold` means better privacy " +"guarantees but less tolerance to dropouts." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:60 of +msgid "" +"When `reconstruction_threshold` is a float, it is interpreted as the " +"proportion of the number of all selected clients needed for the " +"reconstruction of a private key. This feature enables flexibility in " +"setting the security threshold relative to the number of selected " +"clients." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:64 of +msgid "" +"`reconstruction_threshold`, and the quantization parameters " +"(`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg " +"protocol." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`collect_masked_vectors_stage " +"`\\ " +"\\(driver\\, ...\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`setup_stage `\\" +" \\(driver\\, context\\, state\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`share_keys_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" +msgstr "" + +#: ../../source/ref-api/flwr.simulation.rst:2 +#, fuzzy +msgid "simulation" +msgstr "运行模拟" + +#: ../../source/ref-api/flwr.simulation.rst:19::1 +msgid "" +":py:obj:`start_simulation `\\ \\(\\*\\," +" client\\_fn\\[\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.simulation.rst:19::1 +#: flwr.simulation.app.start_simulation:1 of +msgid "Start a Ray-based Flower simulation server." +msgstr "启动基于 Ray 的Flower模拟服务器。" + +#: ../../source/ref-api/flwr.simulation.rst:19::1 +msgid "" +":py:obj:`run_simulation_from_cli " +"`\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.simulation.rst:19::1 +#: flwr.simulation.run_simulation.run_simulation_from_cli:1 of +msgid "Run Simulation Engine from the CLI." +msgstr "" + +#: ../../source/ref-api/flwr.simulation.rst:19::1 +msgid "" +":py:obj:`run_simulation `\\ " +"\\(server\\_app\\, client\\_app\\, ...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.simulation.rst:19::1 +#: flwr.simulation.run_simulation.run_simulation:1 of +msgid "Run a Flower App using the Simulation Engine." +msgstr "" + +#: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 +#, fuzzy +msgid "run\\_simulation" +msgstr "运行模拟" + +#: flwr.simulation.run_simulation.run_simulation:3 of +msgid "" +"The `ServerApp` to be executed. It will send messages to different " +"`ClientApp` instances running on different (virtual) SuperNodes." +msgstr "" + +#: flwr.simulation.run_simulation.run_simulation:6 of +msgid "" +"The `ClientApp` to be executed by each of the SuperNodes. It will receive" +" messages sent by the `ServerApp`." +msgstr "" + +#: flwr.simulation.run_simulation.run_simulation:9 of +msgid "" +"Number of nodes that run a ClientApp. They can be sampled by a Driver in " +"the ServerApp and receive a Message describing what the ClientApp should " +"perform." +msgstr "" + +#: flwr.simulation.run_simulation.run_simulation:13 of +msgid "A simulation backend that runs `ClientApp`s." +msgstr "" + +#: flwr.simulation.run_simulation.run_simulation:15 of +msgid "" +"'A dictionary, e.g {\"\": , \"\": } to " +"configure a backend. Values supported in are those included by " +"`flwr.common.typing.ConfigsRecordValues`." +msgstr "" + +#: flwr.simulation.run_simulation.run_simulation:19 of +msgid "" +"A boolean to indicate whether to enable GPU growth on the main thread. " +"This is desirable if you make use of a TensorFlow model on your " +"`ServerApp` while having your `ClientApp` running on the same GPU. " +"Without enabling this, you might encounter an out-of-memory error because" +" TensorFlow, by default, allocates all GPU memory. Read more about how " +"`tf.config.experimental.set_memory_growth()` works in the TensorFlow " +"documentation: https://www.tensorflow.org/api/stable." +msgstr "" + +#: flwr.simulation.run_simulation.run_simulation:26 of +msgid "" +"When diabled, only INFO, WARNING and ERROR log messages will be shown. If" +" enabled, DEBUG-level logs will be displayed." +msgstr "" + +#: ../../source/ref-api/flwr.simulation.run_simulation_from_cli.rst:2 +#, fuzzy +msgid "run\\_simulation\\_from\\_cli" +msgstr "运行模拟" + +#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 +#, fuzzy +msgid "start\\_simulation" +msgstr "start_simulation" #: flwr.simulation.app.start_simulation:3 of msgid "" @@ -11456,10 +13181,11 @@ msgid "" msgstr "设为 True 可在 `ray.is_initialized()=True` 情况下阻止 `ray.shutdown()` 。" #: flwr.simulation.app.start_simulation:50 of +#, fuzzy msgid "" "Optionally specify the type of actor to use. The actor object, which " "persists throughout the simulation, will be the process in charge of " -"running the clients' jobs (i.e. their `fit()` method)." +"executing a ClientApp wrapping input argument `client_fn`." msgstr "可选择指定要使用的actor类型。actor对象将在整个模拟过程中持续存在,它将是负责运行客户端作业(即其 `fit()`方法)的进程。" #: flwr.simulation.app.start_simulation:54 of @@ -12484,8 +14210,8 @@ msgid "" "tensorflow.html) notebooks, and a new [YouTube tutorial " "series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." msgstr "" -"综合文档包括新的[how-to run simulations](https://flower.ai/docs/framework/how-" -"to-run-simulations.html) guide, new [simulation-" +"综合文档包括新的[how-to run simulations](https://flower.ai/docs/framework/how-to-" +"run-simulations.html) guide, new [simulation-" "pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " "[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" "tensorflow.html) notebooks, and a new [YouTube tutorial " @@ -12551,15 +14277,15 @@ msgstr "" "[#2227](https://github.com/adap/flower/pull/2227))" #: ../../source/ref-changelog.md:220 +#, fuzzy msgid "" "Much effort went into a completely restructured Flower docs experience. " -"The documentation on [flower.ai/docs](flower.ai/docs) is now divided " -"into Flower Framework, Flower Baselines, Flower Android SDK, Flower iOS " -"SDK, and code example projects." +"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " +"divided into Flower Framework, Flower Baselines, Flower Android SDK, " +"Flower iOS SDK, and code example projects." msgstr "" -"Flower 文档体验的全面重构耗费了大量精力。现在,[flower.ai/docs](flower.ai/docs)上的文档分为 " -"Flower Framework、Flower Baselines、Flower Android SDK、Flower iOS SDK " -"和代码示例项目。" +"Flower 文档体验的全面重构耗费了大量精力。现在,[flower.ai/docs](flower.ai/docs)上的文档分为 Flower " +"Framework、Flower Baselines、Flower Android SDK、Flower iOS SDK 和代码示例项目。" #: ../../source/ref-changelog.md:222 msgid "" @@ -12904,13 +14630,13 @@ msgid "" "gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" " " "[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," -" and a [code " -"example](https://github.com/adap/flower/tree/main/examples/xgboost-quickstart)" -" that demonstrates the usage of this new strategy in an XGBoost project." +" and a [code example](https://github.com/adap/flower/tree/main/examples" +"/xgboost-quickstart) that demonstrates the usage of this new strategy in " +"an XGBoost project." msgstr "" "XGBoost 是一种基于树的集合机器学习算法,它使用梯度提升来提高模型的准确性。我们添加了一个新的 " -"\"FedXgbNnAvg\"[策略](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)和一个[代码示例](https://github.com/adap/flower/tree/main/examples/xgboost-quickstart),演示如何在" -" XGBoost 项目中使用这个新策略。" +"\"FedXgbNnAvg\"[策略](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)和一个[代码示例](https://github.com/adap/flower/tree/main/examples" +"/xgboost-quickstart),演示如何在 XGBoost 项目中使用这个新策略。" #: ../../source/ref-changelog.md:300 msgid "" @@ -13094,10 +14820,12 @@ msgstr "" msgid "" "TabNet is a powerful and flexible framework for training machine learning" " models on tabular data. We now have a federated example using Flower: " -"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples/quickstart-tabnet)." +"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" +"/quickstart-tabnet)." msgstr "" -"TabNet 是一个强大而灵活的框架,用于在表格数据上训练机器学习模型。我们现在有一个使用 Flower " -"的联邦示例:[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples/quickstart-tabnet)。" +"TabNet 是一个强大而灵活的框架,用于在表格数据上训练机器学习模型。我们现在有一个使用 Flower 的联邦示例:[quickstart-" +"tabnet](https://github.com/adap/flower/tree/main/examples/quickstart-" +"tabnet)。" #: ../../source/ref-changelog.md:334 msgid "" @@ -13305,11 +15033,13 @@ msgstr "" msgid "" "A new code example (`quickstart-fastai`) demonstrates federated learning " "with [fastai](https://www.fast.ai/) and Flower. You can find it here: " -"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples/quickstart-fastai)." +"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" +"/quickstart-fastai)." msgstr "" "一个新的代码示例(`quickstart-fastai`)演示了使用 [fastai](https://www.fast.ai/) 和 " -"Flower 的联邦学习。您可以在这里找到它: " -"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples/quickstart-fastai)。" +"Flower 的联邦学习。您可以在这里找到它: [quickstart-" +"fastai](https://github.com/adap/flower/tree/main/examples/quickstart-" +"fastai)。" #: ../../source/ref-changelog.md:376 msgid "" @@ -13527,8 +15257,8 @@ msgid "" msgstr "" "在未来几周内,我们将发布一些新的参考,特别是对 FL " "新手有用的方法。它们通常会重温文献中的知名论文,适合集成到您自己的应用程序中或用于实验,以加深您对 FL " -"的总体了解。今天发布的是该系列中的第一篇。[阅读全文](https://flower.ai/blog/2023-01-12-fl-" -"starter-pack-fedavg-mnist-cnn/)" +"的总体了解。今天发布的是该系列中的第一篇。[阅读全文](https://flower.ai/blog/2023-01-12-fl-starter-" +"pack-fedavg-mnist-cnn/)" #: ../../source/ref-changelog.md:422 msgid "" @@ -13697,11 +15427,13 @@ msgstr "" #: ../../source/ref-changelog.md:453 msgid "" "A new code example (`quickstart-pandas`) demonstrates federated analytics" -" with Pandas and Flower. You can find it here: " -"[quickstart-pandas](https://github.com/adap/flower/tree/main/examples/quickstart-pandas)." +" with Pandas and Flower. You can find it here: [quickstart-" +"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" +"pandas)." msgstr "" "新代码示例(`quickstart-pandas`)演示了使用 Pandas 和 Flower 进行联邦分析。您可以在此处找到它: " -"[quickstart-pandas](https://github.com/adap/flower/tree/main/examples/quickstart-pandas)。" +"[quickstart-pandas](https://github.com/adap/flower/tree/main/examples" +"/quickstart-pandas)。" #: ../../source/ref-changelog.md:455 msgid "" @@ -14590,14 +16322,15 @@ msgstr "" "[#914](https://github.com/adap/flower/pull/914))" #: ../../source/ref-changelog.md:660 +#, fuzzy msgid "" "The first preview release of Flower Baselines has arrived! We're " "kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " "FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " "to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " "With this first preview release we're also inviting the community to " -"[contribute their own baselines](https://flower.ai/docs/contributing-" -"baselines.html)." +"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" +"contribute-baselines.html)." msgstr "" "Flower Baselines 的第一个预览版已经发布!我们通过实现 " "FedOpt(FedYogi、FedAdam、FedAdagrad)、FedBN 和 FedAvgM 来启动 Flower " @@ -14787,8 +16520,8 @@ msgid "" "strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " "[#1175](https://github.com/adap/flower/pull/1175))" msgstr "" -"[实施战略](https://flower.ai/docs/framework/how-to-implement-" -"strategies.html) 的新文件([#1097](https://github.com/adap/flower/pull/1097), " +"[实施战略](https://flower.ai/docs/framework/how-to-implement-strategies.html)" +" 的新文件([#1097](https://github.com/adap/flower/pull/1097), " "[#1175](https://github.com/adap/flower/pull/1175)" #: ../../source/ref-changelog.md:707 @@ -15775,32 +17508,20 @@ msgstr "" "`PyTorch `_ 或 `TensorFlow " "`_。" -#: ../../source/ref-example-projects.rst:11 +#: ../../source/ref-example-projects.rst:10 +#, fuzzy msgid "" -"Flower usage examples used to be bundled with Flower in a package called " -"``flwr_example``. We are migrating those examples to standalone projects " -"to make them easier to use. All new examples are based in the directory " -"`examples `_." -msgstr "" -"Flower 的使用示例曾与 Flower 捆绑在一个名为 ``flwr_example`` " -"的软件包中。我们正在将这些示例迁移到独立项目中,以使它们更易于使用。所有新示例都位于目录 `examples " -"`_。" - -#: ../../source/ref-example-projects.rst:16 -msgid "The following examples are available as standalone projects." +"The following examples are available as standalone projects. Quickstart " +"TensorFlow/Keras ---------------------------" msgstr "以下示例可作为独立项目使用。" -#: ../../source/ref-example-projects.rst:20 -msgid "Quickstart TensorFlow/Keras" -msgstr "快速入门 TensorFlow/Keras" - -#: ../../source/ref-example-projects.rst:22 +#: ../../source/ref-example-projects.rst:14 msgid "" "The TensorFlow/Keras quickstart example shows CIFAR-10 image " "classification with MobileNetV2:" msgstr "TensorFlow/Keras 快速入门示例展示了使用 MobileNetV2 进行的 CIFAR-10 图像分类:" -#: ../../source/ref-example-projects.rst:25 +#: ../../source/ref-example-projects.rst:17 msgid "" "`Quickstart TensorFlow (Code) " "`_" -#: ../../source/ref-example-projects.rst:26 -msgid "" -"`Quickstart TensorFlow (Tutorial) `_" +#: ../../source/ref-example-projects.rst:18 +#, fuzzy +msgid ":doc:`Quickstart TensorFlow (Tutorial) `" msgstr "" "`TensorFlow快速入门 (教程) `_" -#: ../../source/ref-example-projects.rst:27 +#: ../../source/ref-example-projects.rst:19 msgid "" "`Quickstart TensorFlow (Blog Post) `_" @@ -15825,18 +17545,18 @@ msgstr "" "`TensorFlow快速入门 (博客) `_" -#: ../../source/ref-example-projects.rst:31 +#: ../../source/ref-example-projects.rst:23 #: ../../source/tutorial-quickstart-pytorch.rst:5 msgid "Quickstart PyTorch" msgstr "PyTorch快速入门" -#: ../../source/ref-example-projects.rst:33 +#: ../../source/ref-example-projects.rst:25 msgid "" "The PyTorch quickstart example shows CIFAR-10 image classification with a" " simple Convolutional Neural Network:" msgstr "PyTorch 快速入门范例展示了使用简单卷积神经网络进行 CIFAR-10 图像分类的情况:" -#: ../../source/ref-example-projects.rst:36 +#: ../../source/ref-example-projects.rst:28 msgid "" "`Quickstart PyTorch (Code) " "`_" @@ -15844,25 +17564,24 @@ msgstr "" "`PyTorch快速入门 (代码) `_" -#: ../../source/ref-example-projects.rst:37 -msgid "" -"`Quickstart PyTorch (Tutorial) `_" +#: ../../source/ref-example-projects.rst:29 +#, fuzzy +msgid ":doc:`Quickstart PyTorch (Tutorial) `" msgstr "" "`PyTorch快速入门 (教程) `_" -#: ../../source/ref-example-projects.rst:41 +#: ../../source/ref-example-projects.rst:33 msgid "PyTorch: From Centralized To Federated" msgstr "PyTorch: 从集中式到联邦式" -#: ../../source/ref-example-projects.rst:43 +#: ../../source/ref-example-projects.rst:35 msgid "" "This example shows how a regular PyTorch project can be federated using " "Flower:" msgstr "本例展示了如何使用 Flower 联邦化一个普通的 PyTorch 项目:" -#: ../../source/ref-example-projects.rst:45 +#: ../../source/ref-example-projects.rst:37 msgid "" "`PyTorch: From Centralized To Federated (Code) " "`_" -#: ../../source/ref-example-projects.rst:46 +#: ../../source/ref-example-projects.rst:38 +#, fuzzy msgid "" -"`PyTorch: From Centralized To Federated (Tutorial) " -"`_" +":doc:`PyTorch: From Centralized To Federated (Tutorial) `" msgstr "" "PyTorch: 从集中式到联邦式(教程) `_" -#: ../../source/ref-example-projects.rst:50 +#: ../../source/ref-example-projects.rst:42 msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" msgstr "树莓派和 Nvidia Jetson 上的联邦学习" -#: ../../source/ref-example-projects.rst:52 +#: ../../source/ref-example-projects.rst:44 msgid "" "This example shows how Flower can be used to build a federated learning " "system that run across Raspberry Pi and Nvidia Jetson:" msgstr "本示例展示了如何利用 Flower 建立一个跨 Raspberry Pi 和 Nvidia Jetson 运行的联邦学习系统:" -#: ../../source/ref-example-projects.rst:54 +#: ../../source/ref-example-projects.rst:46 msgid "" "`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " "`_" @@ -15898,7 +17617,7 @@ msgstr "" "Raspberry Pi 和 Nvidia Jetson 上的联邦学习(代码) " "`_" -#: ../../source/ref-example-projects.rst:55 +#: ../../source/ref-example-projects.rst:47 msgid "" "`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " "`_" @@ -15906,186 +17625,28 @@ msgstr "" "Raspberry Pi和 Nvidia Jetson 上的联邦学习(博客) " "`_" -#: ../../source/ref-example-projects.rst:60 -msgid "Legacy Examples (`flwr_example`)" -msgstr "传统示例 (`flwr_example`)" - -#: ../../source/ref-example-projects.rst:63 +#: ../../source/ref-faq.rst:4 msgid "" -"The useage examples in `flwr_example` are deprecated and will be removed " -"in the future. New examples are provided as standalone projects in " -"`examples `_." -msgstr "" -"在 `flwr_example` 中的使用示例已被弃用,今后将被移除。新示例将作为独立项目在 `examples " -"`_ 中提供。" +"This page collects answers to commonly asked questions about Federated " +"Learning with Flower." +msgstr "本页收集了有关 \"Flower 联邦学习 \"常见问题的答案。" -#: ../../source/ref-example-projects.rst:69 -msgid "Extra Dependencies" -msgstr "额外依赖" +#: ../../source/ref-faq.rst +#, fuzzy +msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" +msgstr ":fa:`eye,mr-1` Flower 可以在 Juptyter Notebooks / Google Colab 上运行吗?" -#: ../../source/ref-example-projects.rst:71 +#: ../../source/ref-faq.rst:8 msgid "" -"The core Flower framework keeps a minimal set of dependencies. The " -"examples demonstrate Flower in the context of different machine learning " -"frameworks, so additional dependencies need to be installed before an " -"example can be run." -msgstr "Flower 核心框架只保留了最低限度的依赖项。这些示例在不同机器学习框架的背景下演示了 Flower,因此在运行示例之前需要安装额外的依赖项。" - -#: ../../source/ref-example-projects.rst:75 -msgid "For PyTorch examples::" -msgstr "PyTorch 示例::" - -#: ../../source/ref-example-projects.rst:79 -msgid "For TensorFlow examples::" -msgstr "TensorFlow 示例::" - -#: ../../source/ref-example-projects.rst:83 -msgid "For both PyTorch and TensorFlow examples::" -msgstr "PyTorch 和 TensorFlow 示例::" +"Yes, it can! Flower even comes with a few under-the-hood optimizations to" +" make it work even better on Colab. Here's a quickstart example:" +msgstr "是的,它可以!Flower 甚至还进行了一些底层优化,使其在 Colab 上运行得更好。下面是一个快速启动示例:" -#: ../../source/ref-example-projects.rst:87 +#: ../../source/ref-faq.rst:10 msgid "" -"Please consult :code:`pyproject.toml` for a full list of possible extras " -"(section :code:`[tool.poetry.extras]`)." -msgstr "" -"请参阅 :code:`pyproject.toml`,了解可能的 extras 的完整列表(章节 " -":code:`[tool.poems.extras]`)。" - -#: ../../source/ref-example-projects.rst:92 -msgid "PyTorch Examples" -msgstr "PyTorch 示例" - -#: ../../source/ref-example-projects.rst:94 -msgid "" -"Our PyTorch examples are based on PyTorch 1.7. They should work with " -"other releases as well. So far, we provide the following examples." -msgstr "我们的 PyTorch 示例基于 PyTorch 1.7。它们应该也能在其他版本中使用。到目前为止,我们提供了以下示例。" - -#: ../../source/ref-example-projects.rst:98 -msgid "CIFAR-10 Image Classification" -msgstr "CIFAR-10 图像分类" - -#: ../../source/ref-example-projects.rst:100 -msgid "" -"`CIFAR-10 and CIFAR-100 `_ " -"are popular RGB image datasets. The Flower CIFAR-10 example uses PyTorch " -"to train a simple CNN classifier in a federated learning setup with two " -"clients." -msgstr "" -"CIFAR-10 和 CIFAR-100 ``_ " -"是流行的 RGB 图像数据集。Flower CIFAR-10 示例使用 PyTorch 在有两个客户端的联邦学习设置中训练一个简单的 CNN " -"分类器。" - -#: ../../source/ref-example-projects.rst:104 -#: ../../source/ref-example-projects.rst:121 -#: ../../source/ref-example-projects.rst:146 -msgid "First, start a Flower server:" -msgstr "首先,启动 Flower 服务器:" - -#: ../../source/ref-example-projects.rst:106 -msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" -msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" - -#: ../../source/ref-example-projects.rst:108 -#: ../../source/ref-example-projects.rst:125 -#: ../../source/ref-example-projects.rst:150 -msgid "Then, start the two clients in a new terminal window:" -msgstr "然后,在新的终端窗口中启动两个客户端:" - -#: ../../source/ref-example-projects.rst:110 -msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" -msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" - -#: ../../source/ref-example-projects.rst:112 -msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." -msgstr "更多详情,请参阅 :code:`src/py/flwr_example/pytorch_cifar`。" - -#: ../../source/ref-example-projects.rst:115 -msgid "ImageNet-2012 Image Classification" -msgstr "ImageNet-2012 图像分类" - -#: ../../source/ref-example-projects.rst:117 -msgid "" -"`ImageNet-2012 `_ is one of the major computer" -" vision datasets. The Flower ImageNet example uses PyTorch to train a " -"ResNet-18 classifier in a federated learning setup with ten clients." -msgstr "" -"ImageNet-2012 `_ 是主要的计算机视觉数据集之一。Flower " -"ImageNet 示例使用 PyTorch 在有十个客户端的联邦学习设置中训练 ResNet-18 分类器。" - -#: ../../source/ref-example-projects.rst:123 -msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" -msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" - -#: ../../source/ref-example-projects.rst:127 -msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" -msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" - -#: ../../source/ref-example-projects.rst:129 -msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." -msgstr "更多详情,请参阅 :code:`src/py/flwr_example/pytorch_imagenet`。" - -#: ../../source/ref-example-projects.rst:133 -msgid "TensorFlow Examples" -msgstr "TensorFlow 示例" - -#: ../../source/ref-example-projects.rst:135 -msgid "" -"Our TensorFlow examples are based on TensorFlow 2.0 or newer. So far, we " -"provide the following examples." -msgstr "我们的 TensorFlow 示例基于 TensorFlow 2.0 或更新版本。到目前为止,我们提供了以下示例。" - -#: ../../source/ref-example-projects.rst:139 -msgid "Fashion-MNIST Image Classification" -msgstr "Fashion-MNIST 图像分类" - -#: ../../source/ref-example-projects.rst:141 -msgid "" -"`Fashion-MNIST `_ is " -"often used as the \"Hello, world!\" of machine learning. We follow this " -"tradition and provide an example which samples random local datasets from" -" Fashion-MNIST and trains a simple image classification model over those " -"partitions." -msgstr "" -"`Fashion-MNIST `_ " -"经常被用作机器学习的 \"你好,世界!\"。我们遵循这一传统,提供了一个从Fashion-MNIST " -"中随机抽样本地数据集的示例,并在这些分区上训练一个简单的图像分类模型。" - -#: ../../source/ref-example-projects.rst:148 -msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" -msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" - -#: ../../source/ref-example-projects.rst:152 -msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" -msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" - -#: ../../source/ref-example-projects.rst:154 -msgid "" -"For more details, see " -":code:`src/py/flwr_example/tensorflow_fashion_mnist`." -msgstr "更多详情,请参阅 :code:`src/py/flwr_example/tensorflow_fashion_mnist`。" - -#: ../../source/ref-faq.rst:4 -msgid "" -"This page collects answers to commonly asked questions about Federated " -"Learning with Flower." -msgstr "本页收集了有关 \"Flower 联邦学习 \"常见问题的答案。" - -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Can Flower run on Juptyter Notebooks / Google Colab?" -msgstr ":fa:`eye,mr-1` Flower 可以在 Juptyter Notebooks / Google Colab 上运行吗?" - -#: ../../source/ref-faq.rst:8 -msgid "" -"Yes, it can! Flower even comes with a few under-the-hood optimizations to" -" make it work even better on Colab. Here's a quickstart example:" -msgstr "是的,它可以!Flower 甚至还进行了一些底层优化,使其在 Colab 上运行得更好。下面是一个快速启动示例:" - -#: ../../source/ref-faq.rst:10 -msgid "" -"`Flower simulation PyTorch " -"`_" +"`Flower simulation PyTorch " +"`_" msgstr "" "`Flower 模拟 PyTorch " "`_ or check out the code examples:" +"`_ or check out the code examples:" msgstr "" "是的,确实如此。请查看我们的 \"博客文章 `_\" 或查看代码示例:" @@ -16134,9 +17695,7 @@ msgstr "" msgid "" "`Android Kotlin example `_" -msgstr "" -"`Android Kotlin 示例 `_" +msgstr "`Android Kotlin 示例 `_" #: ../../source/ref-faq.rst:22 msgid "`Android Java example `_" @@ -16169,8 +17728,9 @@ msgstr "" "`_." #: ../../source/ref-faq.rst:30 +#, fuzzy msgid "" -"`Flower meets KOSMoS `_." msgstr "" "`Flower meets KOSMoS `_ ." msgstr "" "如果您想查看所有内容,请查看完整的代码示例: [https://github.com/adap/flower/tree/main/examples" "/quickstart-" "huggingface](https://github.com/adap/flower/tree/main/examples" "/quickstart-huggingface)." -#: ../../source/tutorial-quickstart-huggingface.rst:227 +#: ../../source/tutorial-quickstart-huggingface.rst:226 msgid "" "Of course, this is a very basic example, and a lot can be added or " "modified, it was just to showcase how simply we could federate a Hugging " "Face workflow using Flower." msgstr "当然,这只是一个非常基本的示例,还可以添加或修改很多内容,只是为了展示我们可以如何简单地使用 Flower 联合Hugging Face的工作流程。" -#: ../../source/tutorial-quickstart-huggingface.rst:230 +#: ../../source/tutorial-quickstart-huggingface.rst:229 msgid "" "Note that in this example we used :code:`PyTorch`, but we could have very" " well used :code:`TensorFlow`." @@ -16655,11 +18214,12 @@ msgid "" msgstr "在本教程中,我们将学习如何在 iOS 设备上使用 Flower 和 CoreML 在 MNIST 上训练神经网络。" #: ../../source/tutorial-quickstart-ios.rst:12 +#, fuzzy msgid "" "First of all, for running the Flower Python server, it is recommended to " -"create a virtual environment and run everything within a `virtualenv " -"`_. For the Flower " -"client implementation in iOS, it is recommended to use Xcode as our IDE." +"create a virtual environment and run everything within a :doc:`virtualenv" +" `. For the Flower client " +"implementation in iOS, it is recommended to use Xcode as our IDE." msgstr "" "首先,为了运行 Flower Python 服务器,建议创建一个虚拟环境,并在 `virtualenv " "`_ 中运行一切。对于在 iOS 中实现 " @@ -16691,6 +18251,15 @@ msgstr "现在我们已经有了一个大致的概念,让我们开始设置 Fl msgid "Or Poetry:" msgstr "或者Poetry:" +#: ../../source/tutorial-quickstart-ios.rst:34 +#: ../../source/tutorial-quickstart-mxnet.rst:36 +#: ../../source/tutorial-quickstart-pytorch.rst:37 +#: ../../source/tutorial-quickstart-scikitlearn.rst:40 +#: ../../source/tutorial-quickstart-tensorflow.rst:29 +#: ../../source/tutorial-quickstart-xgboost.rst:55 +msgid "Flower Client" +msgstr "Flower 客户端" + #: ../../source/tutorial-quickstart-ios.rst:36 msgid "" "Now that we have all our dependencies installed, let's run a simple " @@ -16738,20 +18307,22 @@ msgstr "" "中完成。" #: ../../source/tutorial-quickstart-ios.rst:99 +#, fuzzy msgid "" "Since CoreML does not allow the model parameters to be seen before " "training, and accessing the model parameters during or after the training" " can only be done by specifying the layer name, we need to know this " -"informations beforehand, through looking at the model specification, " -"which are written as proto files. The implementation can be seen in " +"information beforehand, through looking at the model specification, which" +" are written as proto files. The implementation can be seen in " ":code:`MLModelInspect`." msgstr "" "由于 CoreML 不允许在训练前查看模型参数,而在训练过程中或训练后访问模型参数只能通过指定层名来完成,因此我们需要事先通过查看模型规范(写成 " "proto 文件)来了解这些信息。具体实现可参见 :code:`MLModelInspect`。" #: ../../source/tutorial-quickstart-ios.rst:102 +#, fuzzy msgid "" -"After we have all of the necessary informations, let's create our Flower " +"After we have all of the necessary information, let's create our Flower " "client." msgstr "获得所有必要信息后,让我们创建 Flower 客户端。" @@ -16774,6 +18345,15 @@ msgstr "" ":code:`startFlwrGRPC()`。属性 :code:`hostname` 和 :code:`port` " "会告诉客户端要连接到哪个服务器。这可以通过在应用程序中输入主机名和端口来实现,然后再点击开始按钮启动联邦学习进程。" +#: ../../source/tutorial-quickstart-ios.rst:129 +#: ../../source/tutorial-quickstart-mxnet.rst:226 +#: ../../source/tutorial-quickstart-pytorch.rst:203 +#: ../../source/tutorial-quickstart-scikitlearn.rst:157 +#: ../../source/tutorial-quickstart-tensorflow.rst:98 +#: ../../source/tutorial-quickstart-xgboost.rst:309 +msgid "Flower Server" +msgstr "Flower 服务器" + #: ../../source/tutorial-quickstart-ios.rst:131 #: ../../source/tutorial-quickstart-mxnet.rst:228 #: ../../source/tutorial-quickstart-pytorch.rst:205 @@ -16853,7 +18433,7 @@ msgstr "快速入门 MXNet" msgid "" "MXNet is no longer maintained and has been moved into `Attic " "`_. As a result, we would " -"encourage you to use other ML frameworks alongise Flower, for example, " +"encourage you to use other ML frameworks alongside Flower, for example, " "PyTorch. This tutorial might be removed in future versions of Flower." msgstr "" @@ -16865,14 +18445,22 @@ msgstr "在本教程中,我们将学习如何使用 Flower 和 MXNet 在 MNIST #: ../../source/tutorial-quickstart-mxnet.rst:14 #: ../../source/tutorial-quickstart-scikitlearn.rst:12 +#, fuzzy msgid "" "It is recommended to create a virtual environment and run everything " -"within this `virtualenv `_." +"within this :doc:`virtualenv `." msgstr "" "建议创建一个虚拟环境,并在此 `virtualenv `_ 中运行所有内容。" +#: ../../source/tutorial-quickstart-mxnet.rst:16 +#: ../../source/tutorial-quickstart-pytorch.rst:17 +#: ../../source/tutorial-quickstart-scikitlearn.rst:14 +msgid "" +"Our example consists of one *server* and two *clients* all having the " +"same model." +msgstr "我们的例子包括一个*服务器*和两个*客户端*,它们都有相同的模型。" + #: ../../source/tutorial-quickstart-mxnet.rst:18 #: ../../source/tutorial-quickstart-scikitlearn.rst:16 msgid "" @@ -17139,14 +18727,30 @@ msgstr "在本教程中,我们将学习如何使用 Flower 和 PyTorch 在 CIF #: ../../source/tutorial-quickstart-pytorch.rst:15 #: ../../source/tutorial-quickstart-xgboost.rst:39 +#, fuzzy msgid "" "First of all, it is recommended to create a virtual environment and run " -"everything within a `virtualenv `_." +"everything within a :doc:`virtualenv `." msgstr "" "首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" +#: ../../source/tutorial-quickstart-pytorch.rst:19 +msgid "" +"*Clients* are responsible for generating individual weight-updates for " +"the model based on their local datasets. These updates are then sent to " +"the *server* which will aggregate them to produce a better model. " +"Finally, the *server* sends this improved version of the model back to " +"each *client*. A complete cycle of weight updates is called a *round*." +msgstr "*客户端*负责在其本地数据集上更新模型参数。然后,这些参数会被发送到*服务器*,由*服务器*聚合后生成一个更好的模型。最后,*服务器*将改进后的模型发送回每个*客户端*。一个完整的模型参数更新周期称为一*轮*。" + +#: ../../source/tutorial-quickstart-pytorch.rst:23 +msgid "" +"Now that we have a rough idea of what is going on, let's get started. We " +"first need to install Flower. You can do this by running :" +msgstr "现在,我们已经有了一个大致的概念了,那就让我们开始吧。首先,我们需要安装 Flower。可以通过运行 :" + #: ../../source/tutorial-quickstart-pytorch.rst:29 msgid "" "Since we want to use PyTorch to solve a computer vision task, let's go " @@ -17315,7 +18919,8 @@ msgstr "" "Regression` 模型。" #: ../../source/tutorial-quickstart-scikitlearn.rst:26 -msgid "Since we want to use scikt-learn, let's go ahead and install it:" +#, fuzzy +msgid "Since we want to use scikit-learn, let's go ahead and install it:" msgstr "既然我们要使用 scikt-learn,那就继续安装吧:" #: ../../source/tutorial-quickstart-scikitlearn.rst:32 @@ -17396,12 +19001,14 @@ msgstr "" "还需要导入几个软件包,如 Flower 和 scikit-learn:" #: ../../source/tutorial-quickstart-scikitlearn.rst:73 +#, fuzzy msgid "" -"We load the MNIST dataset from `OpenML `_, " -"a popular image classification dataset of handwritten digits for machine " -"learning. The utility :code:`utils.load_mnist()` downloads the training " -"and test data. The training set is split afterwards into 10 partitions " -"with :code:`utils.partition()`." +"We load the MNIST dataset from `OpenML " +"`_, a popular " +"image classification dataset of handwritten digits for machine learning. " +"The utility :code:`utils.load_mnist()` downloads the training and test " +"data. The training set is split afterwards into 10 partitions with " +":code:`utils.partition()`." msgstr "" "我们从 `OpenML `_ 中加载 MNIST " "数据集,这是一个用于机器学习的流行手写数字图像分类数据集。实用程序 :code:`utils.load_mnist()` " @@ -17969,10 +19576,9 @@ msgid "" "`_), we provide more options to define various experimental" " setups, including aggregation strategies, data partitioning and " -"centralised/distributed evaluation. We also support `Flower simulation " -"`_ making " -"it easy to simulate large client cohorts in a resource-aware manner. " -"Let's take a look!" +"centralised/distributed evaluation. We also support :doc:`Flower " +"simulation ` making it easy to simulate large " +"client cohorts in a resource-aware manner. Let's take a look!" msgstr "" "既然您已经知道联合 XGBoost 如何与 Flower 协同工作,那么现在就该通过自定义实验设置来运行一些更综合的实验了。在 xgboost-" "comprehensive 示例 (`完整代码 " @@ -18413,9 +20019,8 @@ msgid "" "pytorch.html>`__ introduces ``Client``, the flexible API underlying " "``NumPyClient``." msgstr "" -"Flower联邦学习教程 - 第4部分 `__ " -"介绍了``Client``,它是``NumPyClient``底层的灵活应用程序接口。" +"Flower联邦学习教程 - 第4部分 `__ 介绍了``Client``,它是``NumPyClient``底层的灵活应用程序接口。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 msgid "Customize the client" @@ -18438,8 +20043,8 @@ msgstr "" "pytorch.html>`__),了解了如何使用策略来定制服务器和客户端的执行(`part 2 " "`__),并从头开始构建了我们自己的定制策略(`part 3 " -"`__)。" +"`__)。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 msgid "" @@ -18678,8 +20283,8 @@ msgstr "客户端" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:615 msgid "" -"To be able to serialize our ``ndarray``\\ s into sparse " -"parameters, we will just have to call our custom functions in our " +"To be able to serialize our ``ndarray``\\ s into sparse parameters, we " +"will just have to call our custom functions in our " "``flwr.client.Client``." msgstr "为了能够将我们的 ``ndarray`` 序列化为稀疏参数,我们只需在 ``flwr.client.Client`` 中调用我们的自定义函数。" @@ -19569,9 +21174,9 @@ msgstr "" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:719 msgid "" "The `Flower Federated Learning Tutorial - Part 3 " -"`__ shows how to build a fully custom ``Strategy`` " -"from scratch." +"`__ shows how to build a fully custom ``Strategy`` from " +"scratch." msgstr "" "`Flower 联邦学习教程 - 第 3 部分 `__ 展示了如何从头开始构建完全自定义的 \"策略\"。" @@ -19606,8 +21211,8 @@ msgid "" "unclear, head over to the ``#questions`` channel." msgstr "" "`Star Flower on GitHub `__ ⭐️ 并加入 Slack " -"上的开源 Flower 社区,进行交流、提问并获得帮助: 加入 Slack `__ " -"🌼 我们希望在 ``#introductions`` 频道听到您的声音!如果有任何不清楚的地方,请访问 ``#questions`` 频道。" +"上的开源 Flower 社区,进行交流、提问并获得帮助: 加入 Slack `__ 🌼" +" 我们希望在 ``#introductions`` 频道听到您的声音!如果有任何不清楚的地方,请访问 ``#questions`` 频道。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 msgid "Let's get started!" @@ -19631,7 +21236,7 @@ msgid "" msgstr "在机器学习中,我们有一个模型和数据。模型可以是一个神经网络(如图所示),也可以是其他东西,比如经典的线性回归。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 -msgid "|31e4b1afa87c4b968327bbeafbf184d4|" +msgid "|2b5c62c529f6416f840c594cce062fbb|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 @@ -19646,7 +21251,7 @@ msgid "" msgstr "我们使用数据来训练模型,以完成一项有用的任务。任务可以是检测图像中的物体、转录音频或玩围棋等游戏。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 -msgid "|c9d935b4284e4c389a33d86b33e07c0a|" +msgid "|90b334680cb7467d9a04d39b8e8dca9f|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 @@ -19667,7 +21272,7 @@ msgid "" msgstr "它源于智能手机上用户与应用程序的交互、汽车上传感器数据的收集、笔记本电脑上键盘输入的接收,或者智能扬声器上某人试着唱的歌。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 -msgid "|00727b5faffb468f84dd1b03ded88638|" +msgid "|65764ceee89f4335bfd93fd0b115e831|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 @@ -19685,7 +21290,7 @@ msgstr "" "\"通常不只是一个地方,而是很多地方。它可能是多个运行同一应用程序的设备。但也可能是多个组织,都在为同一任务生成数据。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 -msgid "|daf0cf0ff4c24fd29439af78416cf47b|" +msgid "|d97319ec28bb407ea0ab9705e38f3bcf|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 @@ -19701,7 +21306,7 @@ msgid "" msgstr "因此,要使用机器学习或任何类型的数据分析,过去使用的方法是在中央服务器上收集所有数据。这个服务器可以在数据中心的某个地方,也可以在云端的某个地方。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 -msgid "|9f093007080d471d94ca90d3e9fde9b6|" +msgid "|11e95ac83a8548d8b3505b4663187d07|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 @@ -19716,7 +21321,7 @@ msgid "" msgstr "一旦所有数据都收集到一处,我们最终就可以使用机器学习算法在数据上训练我们的模型。这就是我们基本上一直依赖的机器学习方法。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 -msgid "|46a26e6150e0479fbd3dfd655f36eb13|" +msgid "|1dab2f3a23674abc8a6731f20fa10730|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 @@ -19736,7 +21341,7 @@ msgid "" msgstr "我们刚刚看到的经典机器学习方法可以在某些情况下使用。很好的例子包括对假日照片进行分类或分析网络流量。在这些案例中,所有数据自然都可以在中央服务器上获得。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 -msgid "|3daba297595c4c7fb845d90404a6179a|" +msgid "|7f0ee162da38450788493a21627306f7|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 @@ -19751,7 +21356,7 @@ msgid "" msgstr "但这种方法并不适用于许多其他情况。例如,集中服务器上没有数据,或者一台服务器上的数据不足以训练出一个好的模型。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 -msgid "|5769874fa9c4455b80b2efda850d39d7|" +msgid "|296a1fb72c514b23b3d8905ff0ff98c6|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 @@ -19835,10 +21440,10 @@ msgid "" "The popularity of privacy-enhancing systems like the `Brave " "`__ browser or the `Signal `__ " "messenger shows that users care about privacy. In fact, they choose the " -"privacy-enhancing version over other alternatives, if such an alternative " -"exists. But what can we do to apply machine learning and data science to " -"these cases to utilize private data? After all, these are all areas that " -"would benefit significantly from recent advances in AI." +"privacy-enhancing version over other alternatives, if such an alternative" +" exists. But what can we do to apply machine learning and data science to" +" these cases to utilize private data? After all, these are all areas that" +" would benefit significantly from recent advances in AI." msgstr "" "像 `Brave `__浏览器或 `Signal " "`__信息管理器这样的隐私增强系统的流行表明,用户关心隐私。事实上,他们会选择隐私性更好的产品。但是,我们能做些什么来将机器学习和数据科学应用到这些情况中,以利用隐私数据呢?毕竟,这些领域都将从人工智能的最新进展中受益匪浅。" @@ -19899,7 +21504,7 @@ msgid "" msgstr "我们首先在服务器上初始化模型。这与经典的集中式学习完全相同:我们随机或从先前保存的检查点初始化模型参数。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 -msgid "|ba47ffb421814b0f8f9fa5719093d839|" +msgid "|5b1408eec0d746cdb91162a9107b6089|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 @@ -19923,7 +21528,7 @@ msgid "" msgstr "接下来,我们会将全局模型的参数发送到连接的客户端节点(如智能手机等边缘设备或企业的服务器)。这是为了确保每个参与节点都使用相同的模型参数开始本地训练。我们通常只使用几个连接节点,而不是所有节点。这样做的原因是,选择越来越多的客户端节点会导致收益递减。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 -msgid "|aeac5bf79cbf497082e979834717e01b|" +msgid "|aef19f4b122c4e8d9f4c57f99bcd5dd2|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 @@ -19949,7 +21554,7 @@ msgstr "" "(mini-batches)。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 -msgid "|ce27ed4bbe95459dba016afc42486ba2|" +msgid "|2881a86d8fc54ba29d96b29fc2819f4a|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 @@ -19972,7 +21577,7 @@ msgid "" msgstr "经过本地训练后,每个客户节点最初收到的模型参数都会略有不同。参数之所以不同,是因为每个客户端节点的本地数据集中都有不同的数据。然后,客户端节点将这些模型更新发回服务器。它们发送的模型更新既可以是完整的模型参数,也可以只是本地训练过程中积累的梯度。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 -msgid "|ae94a7f71dda443cbec2385751427d41|" +msgid "|ec1fe880237247e0975f52766775ab84|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 @@ -20018,7 +21623,7 @@ msgstr "" " 100 个示例的 10 倍。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 -msgid "|e61fce4d43d243e7bb08bdde97d81ce6|" +msgid "|9fdf048ed58d4467b2718cdf4aaf1ec3|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 @@ -20093,10 +21698,6 @@ msgid "" "individual client nodes." msgstr "在很多情况下,机器学习并不是从数据中获取价值的必要条件。数据分析可以产生有价值的见解,但同样,往往没有足够的数据来获得明确的答案。人们患某种健康疾病的平均年龄是多少?联邦分析可以通过多个客户端节点进行此类查询。它通常与安全聚合等其他隐私增强技术结合使用,以防止服务器看到单个客户端节点提交的结果。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 -msgid "Differential Privacy" -msgstr "差分隐私" - #: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 msgid "" "Differential privacy (DP) is often mentioned in the context of Federated " @@ -20129,7 +21730,7 @@ msgstr "" "为联邦学习、分析和评估提供了一种统一的方法。它允许用户联邦化任何工作负载、任何 ML 框架和任何编程语言。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 -msgid "|08cb60859b07461588fe44e55810b050|" +msgid "|ff726bc5505e432388ee2fdd6ef420b9|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 @@ -20493,3 +22094,1021 @@ msgstr "" #~ "`_\" " #~ "的类来配置,因此行为方式也完全相同。除此之外,由 :code:`VirtualClientEngine` " #~ "管理的客户端还包括:" + +#~ msgid "" +#~ "Please follow the first section on " +#~ "`Run Flower using Docker " +#~ "`_ which covers this" +#~ " step in more detail." +#~ msgstr "" + +#~ msgid "" +#~ "If the section is completely empty " +#~ "(without any token) or non-existant, " +#~ "the changelog will just contain the " +#~ "title of the PR for the changelog" +#~ " entry, without any description." +#~ msgstr "" + +#~ msgid "Example: Walk-Through PyTorch & MNIST" +#~ msgstr "实例: PyTorch 和 MNIST 的演练" + +#~ msgid "" +#~ "In this tutorial we will learn, " +#~ "how to train a Convolutional Neural " +#~ "Network on MNIST using Flower and " +#~ "PyTorch." +#~ msgstr "在本教程中,我们将学习如何使用 Flower 和 PyTorch 在 MNIST 上训练卷积神经网络。" + +#~ msgid "" +#~ "Since we want to use PyTorch to" +#~ " solve a computer vision task, let's" +#~ " go ahead an install PyTorch and " +#~ "the **torchvision** library:" +#~ msgstr "我们想用 PyTorch 来做计算机视觉任务,需要先安装 PyTorch 和 **torchvision** 库:" + +#~ msgid "Ready... Set... Train!" +#~ msgstr "准备...设置...训练!" + +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on PyTorch's `Basic MNIST Example" +#~ " `_. " +#~ "This will allow you see how easy" +#~ " it is to wrap your code with" +#~ " Flower and begin training in a " +#~ "federated way. We provide you with " +#~ "two helper scripts, namely *run-" +#~ "server.sh*, and *run-clients.sh*. Don't " +#~ "be afraid to look inside, they are" +#~ " simple enough =)." +#~ msgstr "" +#~ "现在我们已经安装了所有的依赖包,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。我们的训练过程和网络架构基于 " +#~ "PyTorch 的 `Basic MNIST Example " +#~ "`_。您会发现用 " +#~ "Flower 来封装您的代码并进行联邦学习训练是多么容易。我们为您提供了两个辅助脚本,即 *run-" +#~ "server.sh* 和 *run-clients.sh*。别害怕,它们很简单 =)。" + +#~ msgid "" +#~ "Go ahead and launch on a terminal" +#~ " the *run-server.sh* script first as" +#~ " follows:" +#~ msgstr "首先在终端上启动 *run-server.sh* 脚本,如下所示:" + +#~ msgid "Now that the server is up and running, go ahead and launch the clients." +#~ msgstr "现在服务器已经启动并运行,请继续启动客户端。" + +#~ msgid "" +#~ "Et voilà! You should be seeing the" +#~ " training procedure and, after a few" +#~ " iterations, the test accuracy for " +#~ "each client." +#~ msgstr "然后就可以了!您应该能看到训练过程,以及经过几次反复后,每个客户端的测试准确率。" + +#~ msgid "Now, let's see what is really happening inside." +#~ msgstr "现在,让我们看看里面到底发生了什么。" + +#~ msgid "" +#~ "Inside the server helper script *run-" +#~ "server.sh* you will find the following" +#~ " code that basically runs the " +#~ ":code:`server.py`" +#~ msgstr "在服务器辅助脚本 *run-server.sh* 中,你可以找到以下代码,这些代码基本上都是运行 :code:`server.py` 的代码" + +#~ msgid "" +#~ "We can go a bit deeper and " +#~ "see that :code:`server.py` simply launches " +#~ "a server that will coordinate three " +#~ "rounds of training. Flower Servers are" +#~ " very customizable, but for simple " +#~ "workloads, we can start a server " +#~ "using the :ref:`start_server ` function and leave " +#~ "all the configuration possibilities at " +#~ "their default values, as seen below." +#~ msgstr "" +#~ "我们可以再深入一点,:code:`server.py` 只是启动了一个服务器,该服务器将协调三轮训练。Flower " +#~ "服务器是非常容易修改的,但对于简单的工作,我们可以使用 :ref:`start_server `函数启动服务器,并将所有可能的配置保留为默认值,如下所示。" + +#~ msgid "" +#~ "Next, let's take a look at the " +#~ "*run-clients.sh* file. You will see " +#~ "that it contains the main loop " +#~ "that starts a set of *clients*." +#~ msgstr "接下来,让我们看看 *run-clients.sh* 文件。您会看到它包含了用来启动多个 *客户端* 的代码。" + +#~ msgid "" +#~ "**cid**: is the client ID. It is" +#~ " an integer that uniquely identifies " +#~ "client identifier." +#~ msgstr "**cid**:是客户 ID。它是一个整数,可唯一标识客户标识符。" + +#~ msgid "**sever_address**: String that identifies IP and port of the server." +#~ msgstr "**sever_address**: 标识服务器 IP 和端口的字符串。" + +#~ msgid "" +#~ "**nb_clients**: This defines the number " +#~ "of clients being created. This piece " +#~ "of information is not required by " +#~ "the client, but it helps us " +#~ "partition the original MNIST dataset to" +#~ " make sure that every client is " +#~ "working on unique subsets of both " +#~ "*training* and *test* sets." +#~ msgstr "" +#~ "**nb_clients**: 这定义了正在创建的客户端数量。客户端并不需要这一信息,但它有助于我们对原始 " +#~ "MNIST 数据集进行划分,以确保每个客户端都在 *training* 和 *test*" +#~ " 数据集上有独立的数据。" + +#~ msgid "" +#~ "Again, we can go deeper and look" +#~ " inside :code:`flwr_example/quickstart-" +#~ "pytorch/client.py`. After going through the" +#~ " argument parsing code at the " +#~ "beginning of our :code:`main` function, " +#~ "you will find a call to " +#~ ":code:`mnist.load_data`. This function is " +#~ "responsible for partitioning the original " +#~ "MNIST datasets (*training* and *test*) " +#~ "and returning a :code:`torch.utils.data.DataLoader`" +#~ " s for each of them. We then" +#~ " instantiate a :code:`PytorchMNISTClient` object" +#~ " with our client ID, our DataLoaders," +#~ " the number of epochs in each " +#~ "round, and which device we want to" +#~ " use for training (CPU or GPU)." +#~ msgstr "" +#~ "我们可以深入看一下 :code:`flwr_example/quickstart-" +#~ "pytorch/client.py`。查看 :code:`main` 函数开头的参数解析代码后,你会发现一个对" +#~ " :code:`mnist.load_data` 的调用。该函数负责分割原始 MNIST " +#~ "数据集(*training* 和 *test*),并为每个数据集返回一个 " +#~ ":code:`torch.utils.data.DataLoader` 。然后,我们实例化一个 " +#~ ":code:`PytorchMNISTClient` 对象,其中包含我们的客户端 ID、 " +#~ "DataLoader、每一轮中的遍历数,以及我们希望用于训练的设备(CPU 或 GPU)。" + +#~ msgid "" +#~ "The :code:`PytorchMNISTClient` object when " +#~ "finally passed to :code:`fl.client.start_client` " +#~ "along with the server's address as " +#~ "the training process begins." +#~ msgstr "" +#~ "当训练过程开始时,:code:`PytorchMNISTClient` 对象会连同服务器地址一起传递给 " +#~ ":code:`fl.client.start_client`。" + +#~ msgid "A Closer Look" +#~ msgstr "仔细看一下" + +#~ msgid "" +#~ "Now, let's look closely into the " +#~ ":code:`PytorchMNISTClient` inside :code:`flwr_example" +#~ ".quickstart-pytorch.mnist` and see what it" +#~ " is doing:" +#~ msgstr "" +#~ "现在,让我们仔细研究一下 :code:`flwr_example.quickstart-pytorch.mnist`" +#~ " 中的 :code:`PytorchMNISTClient`,看看它在做什么:" + +#~ msgid "" +#~ "The first thing to notice is that" +#~ " :code:`PytorchMNISTClient` instantiates a CNN" +#~ " model inside its constructor" +#~ msgstr "首先要注意的是 :code:`PytorchMNISTClient` 在其构造函数中实例化了一个 CNN 模型" + +#~ msgid "" +#~ "The code for the CNN is available" +#~ " under :code:`quickstart-pytorch.mnist` and " +#~ "it is reproduced below. It is the" +#~ " same network found in `Basic MNIST" +#~ " Example " +#~ "`_." +#~ msgstr "" +#~ "CNN 的代码可在 :code:`quickstart-pytorch.mnist` " +#~ "下找到,现复制如下。它与 `Basic MNIST Example " +#~ "`_中的网络相同。" + +#~ msgid "" +#~ "The second thing to notice is that" +#~ " :code:`PytorchMNISTClient` class inherits from" +#~ " the :code:`fl.client.Client`, and hence it" +#~ " must implement the following methods:" +#~ msgstr "" +#~ "第二件要注意的事是 :code:`PytorchMNISTClient` 类继承自 " +#~ ":code:`fl.client.Client`,因此它必须实现以下方法:" + +#~ msgid "" +#~ "When comparing the abstract class to " +#~ "its derived class :code:`PytorchMNISTClient` " +#~ "you will notice that :code:`fit` calls" +#~ " a :code:`train` function and that " +#~ ":code:`evaluate` calls a :code:`test`: " +#~ "function." +#~ msgstr "" +#~ "将抽象类与其派生类 :code:`PytorchMNISTClient` 进行比较时,您会发现 " +#~ ":code:`fit` 调用了一个 :code:`train` 函数,而 " +#~ ":code:`evaluate` 则调用了一个 :code:`test`: 函数。" + +#~ msgid "" +#~ "These functions can both be found " +#~ "inside the same :code:`quickstart-" +#~ "pytorch.mnist` module:" +#~ msgstr "这些函数都可以在同一个 :code:`quickstart-pytorch.mnist` 模块中找到:" + +#~ msgid "" +#~ "Observe that these functions encapsulate " +#~ "regular training and test loops and " +#~ "provide :code:`fit` and :code:`evaluate` with" +#~ " final statistics for each round. You" +#~ " could substitute them with your " +#~ "custom train and test loops and " +#~ "change the network architecture, and the" +#~ " entire example would still work " +#~ "flawlessly. As a matter of fact, " +#~ "why not try and modify the code" +#~ " to an example of your liking?" +#~ msgstr "" +#~ "请注意,这些函数封装了常规的训练和测试循环,并为 :code:`fit` 和 " +#~ ":code:`evaluate` " +#~ "提供了每轮的最终统计数据。您可以用自定义的训练和测试循环来替代它们,并改变网络结构,整个示例仍然可以完美运行。事实上,为什么不按照自己的喜好修改代码呢?" + +#~ msgid "Give It a Try" +#~ msgstr "试试看" + +#~ msgid "" +#~ "Looking through the quickstart code " +#~ "description above will have given a " +#~ "good understanding of how *clients* and" +#~ " *servers* work in Flower, how to " +#~ "run a simple experiment, and the " +#~ "internals of a client wrapper. Here " +#~ "are a few things you could try " +#~ "on your own and get more " +#~ "experience with Flower:" +#~ msgstr "" +#~ "通过上面的快速入门代码描述,你将对 Flower " +#~ "中*客户端*和*服务器*的工作方式、如何运行一个简单的实验以及客户端封装器的内部结构有一个很好的了解。您可以自己尝试以下内容,以获得更多使用" +#~ " Flower 的经验:" + +#~ msgid "" +#~ "Try and change :code:`PytorchMNISTClient` so" +#~ " it can accept different architectures." +#~ msgstr "尝试修改 :code:`PytorchMNISTClient`,使其可以接受不同的架构。" + +#~ msgid "" +#~ "Modify the :code:`train` function so " +#~ "that it accepts different optimizers" +#~ msgstr "修改 :code:`train` 函数,使其接受不同的优化器" + +#~ msgid "" +#~ "Modify the :code:`test` function so that" +#~ " it proves not only the top-1 " +#~ "(regular accuracy) but also the top-5" +#~ " accuracy?" +#~ msgstr "修改 :code:`test` 函数,使其不仅能输出前 1 名(常规精确度),还能证明前 5 名的精确度?" + +#~ msgid "" +#~ "Go larger! Try to adapt the code" +#~ " to larger images and datasets. Why" +#~ " not try training on ImageNet with" +#~ " a ResNet-50?" +#~ msgstr "让我们尝试让代码适应更大的图像和数据集。为什么不尝试使用 ResNet-50 在 ImageNet 上进行训练呢?" + +#~ msgid "You are ready now. Enjoy learning in a federated way!" +#~ msgstr "您现在已经准备就绪。尽情享受联邦学习的乐趣吧!" + +#~ msgid "Differential privacy" +#~ msgstr "差别隐私" + +#~ msgid "" +#~ "Flower provides differential privacy (DP) " +#~ "wrapper classes for the easy integration" +#~ " of the central DP guarantees " +#~ "provided by DP-FedAvg into training " +#~ "pipelines defined in any of the " +#~ "various ML frameworks that Flower is " +#~ "compatible with." +#~ msgstr "" +#~ "Flower 提供了差分隐私 (DP) 封装类,可将 DP-FedAvg " +#~ "提供的核心 DP 轻松集成到 Flower 兼容的各种 ML " +#~ "框架中定义的训练模式中。" + +#~ msgid "" +#~ "Please note that these components are" +#~ " still experimental; the correct " +#~ "configuration of DP for a specific " +#~ "task is still an unsolved problem." +#~ msgstr "请注意,这些组件仍处于试验阶段,如何为特定任务正确配置 DP 仍是一个尚未解决的问题。" + +#~ msgid "" +#~ "The name DP-FedAvg is misleading " +#~ "since it can be applied on top " +#~ "of any FL algorithm that conforms " +#~ "to the general structure prescribed by" +#~ " the FedOpt family of algorithms." +#~ msgstr "DP-FedAvg 这个名称容易引起误解,因为它可以应用于任何符合 FedOpt 系列算法规定的一般结构的 FL 算法之上。" + +#~ msgid "DP-FedAvg" +#~ msgstr "DP-FedAvg" + +#~ msgid "" +#~ "DP-FedAvg, originally proposed by " +#~ "McMahan et al. [mcmahan]_ and extended" +#~ " by Andrew et al. [andrew]_, is " +#~ "essentially FedAvg with the following " +#~ "modifications." +#~ msgstr "DP-FedAvg 最初由McMahan等人提出,并由Andrew等人加以扩展。" + +#~ msgid "" +#~ "**Clipping** : The influence of each " +#~ "client's update is bounded by clipping" +#~ " it. This is achieved by enforcing" +#~ " a cap on the L2 norm of " +#~ "the update, scaling it down if " +#~ "needed." +#~ msgstr "**裁剪** : 裁剪会影响到每个客户端的模型参数。具体做法是对参数的 L2 准则设置上限,必要时将其缩减。" + +#~ msgid "" +#~ "**Noising** : Gaussian noise, calibrated " +#~ "to the clipping threshold, is added " +#~ "to the average computed at the " +#~ "server." +#~ msgstr "**噪声** : 在服务器计算出的平均值中加入高斯噪声,该噪声根据剪切阈值进行校准。" + +#~ msgid "" +#~ "The distribution of the update norm " +#~ "has been shown to vary from " +#~ "task-to-task and to evolve as " +#~ "training progresses. This variability is " +#~ "crucial in understanding its impact on" +#~ " differential privacy guarantees, emphasizing " +#~ "the need for an adaptive approach " +#~ "[andrew]_ that continuously adjusts the " +#~ "clipping threshold to track a " +#~ "prespecified quantile of the update norm" +#~ " distribution." +#~ msgstr "事实证明,参数更新准则的分布会随着任务的不同而变化,并随着训练的进展而演变。因此,我们采用了一种自适应方法,该方法会不断调整剪切阈值,以跟踪参数更新准则分布的预设量化值。" + +#~ msgid "Simplifying Assumptions" +#~ msgstr "简化假设" + +#~ msgid "" +#~ "We make (and attempt to enforce) a" +#~ " number of assumptions that must be" +#~ " satisfied to ensure that the " +#~ "training process actually realizes the " +#~ ":math:`(\\epsilon, \\delta)` guarantees the " +#~ "user has in mind when configuring " +#~ "the setup." +#~ msgstr "" +#~ "我们提出(并试图执行)了一系列必须满足的假设,以确保训练过程真正实现用户在配置设置时所定的 " +#~ ":math:`(\\epsilon,\\delta)` 。" + +#~ msgid "" +#~ "**Fixed-size subsampling** :Fixed-size " +#~ "subsamples of the clients must be " +#~ "taken at each round, as opposed to" +#~ " variable-sized Poisson subsamples." +#~ msgstr "** 固定大小的子样本** :与可变大小的泊松分布子样本相比,每轮必须抽取固定大小的客户端子样本。" + +#~ msgid "" +#~ "**Unweighted averaging** : The contributions" +#~ " from all the clients must weighted" +#~ " equally in the aggregate to " +#~ "eliminate the requirement for the server" +#~ " to know in advance the sum of" +#~ " the weights of all clients available" +#~ " for selection." +#~ msgstr "**非加权平均**: 所有客户端的贡献必须加权相等,这样服务器就不需要事先知道所有客户的权重总和。" + +#~ msgid "" +#~ "**No client failures** : The set " +#~ "of available clients must stay constant" +#~ " across all rounds of training. In" +#~ " other words, clients cannot drop out" +#~ " or fail." +#~ msgstr "**没有失败的客户端** : 在各轮训练中,可用客户端的数量必须保持不变。换句话说,客户端不能退出或失败。" + +#~ msgid "" +#~ "The first two are useful for " +#~ "eliminating a multitude of complications " +#~ "associated with calibrating the noise to" +#~ " the clipping threshold, while the " +#~ "third one is required to comply " +#~ "with the assumptions of the privacy " +#~ "analysis." +#~ msgstr "前两种方法有助于消除将噪声校准为削波阈值所带来的诸多复杂问题,而第三种方法则需要符合隐私分析的假设。" + +#~ msgid "" +#~ "These restrictions are in line with " +#~ "constraints imposed by Andrew et al. " +#~ "[andrew]_." +#~ msgstr "这些限制与 Andrew 等人所施加的限制一致。" + +#~ msgid "Customizable Responsibility for Noise injection" +#~ msgstr "可定制的噪声注入" + +#~ msgid "" +#~ "In contrast to other implementations " +#~ "where the addition of noise is " +#~ "performed at the server, you can " +#~ "configure the site of noise injection" +#~ " to better match your threat model." +#~ " We provide users with the " +#~ "flexibility to set up the training " +#~ "such that each client independently adds" +#~ " a small amount of noise to the" +#~ " clipped update, with the result that" +#~ " simply aggregating the noisy updates " +#~ "is equivalent to the explicit addition" +#~ " of noise to the non-noisy " +#~ "aggregate at the server." +#~ msgstr "与其他在服务器上添加噪声的实现方法不同,您可以配置噪声注入的位置,以便更好地匹配您的威胁模型。我们为用户提供了设置训练的灵活性,使每个客户端都能独立地为剪切参数更新添加少量噪声,这样,只需聚合噪声更新,就相当于在服务器上为非噪声聚合添加噪声了。" + +#~ msgid "" +#~ "To be precise, if we let :math:`m`" +#~ " be the number of clients sampled " +#~ "each round and :math:`\\sigma_\\Delta` be " +#~ "the scale of the total Gaussian " +#~ "noise that needs to be added to" +#~ " the sum of the model updates, " +#~ "we can use simple maths to show" +#~ " that this is equivalent to each " +#~ "client adding noise with scale " +#~ ":math:`\\sigma_\\Delta/\\sqrt{m}`." +#~ msgstr "" +#~ "准确地说,我们假设每轮采样的客户端数量为:math:`m`,:math:`\\sigma_\\Delta` " +#~ "为需要添加到模型更新总和中的总高斯噪声的规模,我们就可以用简单的数学方法证明了,这相当于每个客户端都添加了规模为 " +#~ ":math:`\\sigma_\\Delta/\\sqrt{m}` 的噪声。" + +#~ msgid "Wrapper-based approach" +#~ msgstr "基于封装的方法" + +#~ msgid "" +#~ "Introducing DP to an existing workload" +#~ " can be thought of as adding an" +#~ " extra layer of security around it." +#~ " This inspired us to provide the " +#~ "additional server and client-side logic" +#~ " needed to make the training process" +#~ " differentially private as wrappers for " +#~ "instances of the :code:`Strategy` and " +#~ ":code:`NumPyClient` abstract classes respectively." +#~ " This wrapper-based approach has the" +#~ " advantage of being easily composable " +#~ "with other wrappers that someone might" +#~ " contribute to the Flower library in" +#~ " the future, e.g., for secure " +#~ "aggregation. Using Inheritance instead can " +#~ "be tedious because that would require" +#~ " the creation of new sub- classes " +#~ "every time a new class implementing " +#~ ":code:`Strategy` or :code:`NumPyClient` is " +#~ "defined." +#~ msgstr "" +#~ "在现有工作负载中引入 DP " +#~ "可以被认为是在其周围增加了一层额外的安全性。受此启发,我们提供了额外的服务器端和客户端逻辑,分别作为 " +#~ ":code:`Strategy` 和 :code:`NumPyClient` " +#~ "抽象类实例的封装器,使训练过程具有不同的隐私性。这种基于封装器的方法的优点是可以很容易地与将来有人贡献给 Flower " +#~ "的其他封装器(例如用于安全聚合的封装器)进行组合。使用继承可能会比较繁琐,因为每次定义实现 :code:`Strategy`" +#~ " 或 :code:`NumPyClient` 的新类时,都需要创建新的子类。" + +#~ msgid "" +#~ "The first version of our solution " +#~ "was to define a decorator whose " +#~ "constructor accepted, among other things, " +#~ "a boolean-valued variable indicating " +#~ "whether adaptive clipping was to be " +#~ "enabled or not. We quickly realized " +#~ "that this would clutter its " +#~ ":code:`__init__()` function with variables " +#~ "corresponding to hyperparameters of adaptive" +#~ " clipping that would remain unused " +#~ "when it was disabled. A cleaner " +#~ "implementation could be achieved by " +#~ "splitting the functionality into two " +#~ "decorators, :code:`DPFedAvgFixed` and " +#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" +#~ " classing the former. The constructors " +#~ "for both classes accept a boolean " +#~ "parameter :code:`server_side_noising`, which, as " +#~ "the name suggests, determines where " +#~ "noising is to be performed." +#~ msgstr "" +#~ "我们的第一版解决方案是定义一个装饰器,其构造函数接受一个布尔值变量,表示是否启用自适应剪裁。我们很快意识到,这样会使其 " +#~ ":code:`__init__()` " +#~ "函数中与自适应裁剪超参数相对应的变量变得杂乱无章,而这些变量在自适应裁剪被禁用时将保持未使用状态。要实现更简洁的功能,可以将该功能拆分为两个装饰器,即" +#~ " :code:`DPFedAvgFixed` 和 " +#~ ":code:`DPFedAvgAdaptive`,后者是前者的子类。这两个类的构造函数都接受一个布尔参数 " +#~ ":code:`server_side_noising`,顾名思义,它决定了在哪里加噪声。" + +#~ msgid "" +#~ "The server-side capabilities required " +#~ "for the original version of DP-" +#~ "FedAvg, i.e., the one which performed" +#~ " fixed clipping, can be completely " +#~ "captured with the help of wrapper " +#~ "logic for just the following two " +#~ "methods of the :code:`Strategy` abstract " +#~ "class." +#~ msgstr "" +#~ "只需对 :code:`Strategy` 抽象类的以下两个方法进行封装,就能完全捕获 DP-" +#~ "FedAvg 原始版本(即执行固定剪裁的版本)所需的服务器端功能。" + +#~ msgid "" +#~ ":code:`configure_fit()` : The config " +#~ "dictionary being sent by the wrapped " +#~ ":code:`Strategy` to each client needs to" +#~ " be augmented with an additional " +#~ "value equal to the clipping threshold" +#~ " (keyed under :code:`dpfedavg_clip_norm`) and," +#~ " if :code:`server_side_noising=true`, another one" +#~ " equal to the scale of the " +#~ "Gaussian noise that needs to be " +#~ "added at the client (keyed under " +#~ ":code:`dpfedavg_noise_stddev`). This entails " +#~ "*post*-processing of the results returned " +#~ "by the wrappee's implementation of " +#~ ":code:`configure_fit()`." +#~ msgstr "" +#~ ":code:`configure_fit()` :由封装的 :code:`Strategy` " +#~ "发送到每个客户端的配置字典需要使用等于裁剪阈值的附加值(在 :code:`dpfedavg_clip_norm` " +#~ "下键入)进行扩充。并且,如果 " +#~ "server_side_noising=true,则另一个值等于需要在客户端添加的高斯噪声的大小(在 " +#~ "dpfedavg_noise_stddev 下键入)。这需要对封装后的configure_fit() " +#~ "所返回的结果进行后处理。" + +#~ msgid "" +#~ ":code:`aggregate_fit()`: We check whether any" +#~ " of the sampled clients dropped out" +#~ " or failed to upload an update " +#~ "before the round timed out. In " +#~ "that case, we need to abort the" +#~ " current round, discarding any successful" +#~ " updates that were received, and move" +#~ " on to the next one. On the " +#~ "other hand, if all clients responded " +#~ "successfully, we must force the " +#~ "averaging of the updates to happen " +#~ "in an unweighted manner by intercepting" +#~ " the :code:`parameters` field of " +#~ ":code:`FitRes` for each received update " +#~ "and setting it to 1. Furthermore, " +#~ "if :code:`server_side_noising=true`, each update " +#~ "is perturbed with an amount of " +#~ "noise equal to what it would have" +#~ " been subjected to had client-side" +#~ " noising being enabled. This entails " +#~ "*pre*-processing of the arguments to " +#~ "this method before passing them on " +#~ "to the wrappee's implementation of " +#~ ":code:`aggregate_fit()`." +#~ msgstr "" +#~ ":code:`aggregate_fit()`: " +#~ "我们会检查是否有任何客户端在本轮超时前退出或未能上传参数更新。在这种情况下,我们需要中止当前一轮,丢弃已收到的所有参数更新,然后继续下一轮。另一方面,如果所有客户端都成功响应,我们就必须通过拦截" +#~ " :code:`FitRes` 的 :code:`parameters` 字段并将其设置为 " +#~ "1,强制以不加权的方式平均更新。此外,如果 " +#~ ":code:`server_side_noising=true`,每次更新都会受到一定量的噪声扰动,其扰动量相当于启用客户端噪声时的扰动量。" +#~ " 这就需要在将本方法的参数传递给封装的 :code:`aggregate_fit()` " +#~ "之前,对参数进行*预*处理。" + +#~ msgid "" +#~ "We can't directly change the aggregation" +#~ " function of the wrapped strategy to" +#~ " force it to add noise to the" +#~ " aggregate, hence we simulate client-" +#~ "side noising to implement server-side" +#~ " noising." +#~ msgstr "我们无法直接改变封装策略的聚合函数,迫使它在聚合中添加噪声,因此我们模拟客户端噪声来实现服务器端噪声。" + +#~ msgid "" +#~ "These changes have been put together " +#~ "into a class called :code:`DPFedAvgFixed`, " +#~ "whose constructor accepts the strategy " +#~ "being decorated, the clipping threshold " +#~ "and the number of clients sampled " +#~ "every round as compulsory arguments. The" +#~ " user is expected to specify the " +#~ "clipping threshold since the order of" +#~ " magnitude of the update norms is " +#~ "highly dependent on the model being " +#~ "trained and providing a default value" +#~ " would be misleading. The number of" +#~ " clients sampled at every round is" +#~ " required to calculate the amount of" +#~ " noise that must be added to " +#~ "each individual update, either by the" +#~ " server or the clients." +#~ msgstr "" +#~ "这些变化被整合到一个名为 :code:`DPFedAvgFixed` " +#~ "的类中,其构造函数接受被装饰的策略、剪切阈值和每轮采样的客户数作为必选参数。用户需要指定剪切阈值,因为参数更新规范的数量级在很大程度上取决于正在训练的模型,提供默认值会产生误导。每轮采样的客户端数量是计算服务器或客户在每次参数更新时添加的噪音量所必需的。" + +#~ msgid "" +#~ "The additional functionality required to " +#~ "facilitate adaptive clipping has been " +#~ "provided in :code:`DPFedAvgAdaptive`, a " +#~ "subclass of :code:`DPFedAvgFixed`. It " +#~ "overrides the above-mentioned methods to" +#~ " do the following." +#~ msgstr "" +#~ "自适应剪裁所需的附加功能在 :code:`DPFedAvgAdaptive` 中提供,其是 " +#~ ":code:`DPFedAvgFixed` 的子类。它重写了上述方法,以实现以下功能。" + +#~ msgid "" +#~ ":code:`configure_fit()` : It intercepts the" +#~ " config dict returned by " +#~ ":code:`super.configure_fit()` to add the " +#~ "key-value pair " +#~ ":code:`dpfedavg_adaptive_clip_enabled:True` to it, " +#~ "which the client interprets as an " +#~ "instruction to include an indicator bit" +#~ " (1 if update norm <= clipping " +#~ "threshold, 0 otherwise) in the results" +#~ " returned by it." +#~ msgstr "" +#~ ":code:`configure_fit()`:它截取由 :code:`super.configure_fit()` " +#~ "返回的 config 字典,并在其中添加键-值对 " +#~ ":code:`dpfedavg_adaptive_clip_enabled:True\",客户端将其解释为在返回结果中包含一个指示位(如果参数更新范式" +#~ " <= 剪裁阈值,则为 1,否则为 0)的指令。" + +#~ msgid "" +#~ ":code:`aggregate_fit()` : It follows a " +#~ "call to :code:`super.aggregate_fit()` with one" +#~ " to :code:`__update_clip_norm__()`, a procedure" +#~ " which adjusts the clipping threshold " +#~ "on the basis of the indicator bits" +#~ " received from the sampled clients." +#~ msgstr ":code:`aggregate_fit()`:在调用:code:`super.aggregate_fit()`后,再调用:code:`__update_clip_norm__()`,该过程根据从采样客户端接收到的指示位调整裁剪阈值。" + +#~ msgid "" +#~ "The client-side capabilities required " +#~ "can be completely captured through " +#~ "wrapper logic for just the :code:`fit()`" +#~ " method of the :code:`NumPyClient` abstract" +#~ " class. To be precise, we need " +#~ "to *post-process* the update computed" +#~ " by the wrapped client to clip " +#~ "it, if necessary, to the threshold " +#~ "value supplied by the server as " +#~ "part of the config dictionary. In " +#~ "addition to this, it may need to" +#~ " perform some extra work if either" +#~ " (or both) of the following keys " +#~ "are also present in the dict." +#~ msgstr "" +#~ "客户端所需的功能完全可以通过 :code:`NumPyClient` 抽象类的 " +#~ ":code:`fit()` " +#~ "方法的封装逻辑来实现。准确地说,我们需要对封装客户端计算的参数更新进行处理,以便在必要时将其剪切到服务器作为配置字典的一部分提供的阈值。除此之外,如果配置字典中还存在以下任一(或两个)键,客户端可能还需要执行一些额外的工作。" + +#~ msgid "" +#~ ":code:`dpfedavg_noise_stddev` : Generate and " +#~ "add the specified amount of noise " +#~ "to the clipped update." +#~ msgstr "code:`dpfedavg_noise_stddev`:生成并在剪切参数更新中添加指定数量的噪声。" + +#~ msgid "" +#~ ":code:`dpfedavg_adaptive_clip_enabled` : Augment the" +#~ " metrics dict in the :code:`FitRes` " +#~ "object being returned to the server " +#~ "with an indicator bit, calculated as " +#~ "described earlier." +#~ msgstr "" +#~ ":code:`dpfedavg_adaptive_clip_enabled`:在返回给服务器的 :code:`FitRes`" +#~ " 对象中的度量值字典中增加一个指标位,计算方法如前所述。" + +#~ msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" +#~ msgstr "进行 :math:`(epsilon, \\delta)` 分析" + +#~ msgid "" +#~ "Assume you have trained for :math:`n`" +#~ " rounds with sampling fraction :math:`q`" +#~ " and noise multiplier :math:`z`. In " +#~ "order to calculate the :math:`\\epsilon` " +#~ "value this would result in for a" +#~ " particular :math:`\\delta`, the following " +#~ "script may be used." +#~ msgstr "" +#~ "假设您已经训练了 :math:`n` 轮,采样比例为 :math:`q`,噪声乘数为 " +#~ ":math:`z`。为了计算特定 :math:`\\delta` 的 :math:`epsilon`" +#~ " 值,可以使用下面的脚本。" + +#~ msgid "Enjoy building more robust and flexible ``ClientApp``s with mods!" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`ClientApp `\\ " +#~ "\\(client\\_fn\\[\\, mods\\]\\)" +#~ msgstr "" + +#~ msgid ":py:obj:`flwr.server.driver `\\" +#~ msgstr "" + +#~ msgid "Flower driver SDK." +#~ msgstr "Flower 服务器。" + +#~ msgid "driver" +#~ msgstr "服务器" + +#~ msgid "" +#~ ":py:obj:`start_driver `\\ " +#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`Driver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`GrpcDriver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "`GrpcDriver` provides access to the gRPC Driver API/service." +#~ msgstr "" + +#~ msgid ":py:obj:`get_nodes `\\ \\(\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`pull_task_res " +#~ "`\\ \\(task\\_ids\\)" +#~ msgstr "" + +#~ msgid "Get task results." +#~ msgstr "汇总训练结果。" + +#~ msgid "" +#~ ":py:obj:`push_task_ins " +#~ "`\\ " +#~ "\\(task\\_ins\\_list\\)" +#~ msgstr "" + +#~ msgid "Schedule tasks." +#~ msgstr "" + +#~ msgid "GrpcDriver" +#~ msgstr "" + +#~ msgid ":py:obj:`connect `\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Connect to the Driver API." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`create_run " +#~ "`\\ \\(req\\)" +#~ msgstr "" + +#~ msgid "Request for run ID." +#~ msgstr "Flower 基线申请" + +#~ msgid "" +#~ ":py:obj:`disconnect " +#~ "`\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Disconnect from the Driver API." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`get_nodes `\\" +#~ " \\(req\\)" +#~ msgstr "" + +#~ msgid "Get client IDs." +#~ msgstr "返回客户端(本身)。" + +#~ msgid "" +#~ ":py:obj:`pull_task_res " +#~ "`\\ \\(req\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`push_task_ins " +#~ "`\\ \\(req\\)" +#~ msgstr "" + +#~ msgid "" +#~ "Flower usage examples used to be " +#~ "bundled with Flower in a package " +#~ "called ``flwr_example``. We are migrating " +#~ "those examples to standalone projects to" +#~ " make them easier to use. All " +#~ "new examples are based in the " +#~ "directory `examples " +#~ "`_." +#~ msgstr "" +#~ "Flower 的使用示例曾与 Flower 捆绑在一个名为 ``flwr_example``" +#~ " 的软件包中。我们正在将这些示例迁移到独立项目中,以使它们更易于使用。所有新示例都位于目录 `examples " +#~ "`_。" + +#~ msgid "Quickstart TensorFlow/Keras" +#~ msgstr "快速入门 TensorFlow/Keras" + +#~ msgid "Legacy Examples (`flwr_example`)" +#~ msgstr "传统示例 (`flwr_example`)" + +#~ msgid "" +#~ "The useage examples in `flwr_example` " +#~ "are deprecated and will be removed " +#~ "in the future. New examples are " +#~ "provided as standalone projects in " +#~ "`examples `_." +#~ msgstr "" +#~ "在 `flwr_example` 中的使用示例已被弃用,今后将被移除。新示例将作为独立项目在 " +#~ "`examples `_" +#~ " 中提供。" + +#~ msgid "Extra Dependencies" +#~ msgstr "额外依赖" + +#~ msgid "" +#~ "The core Flower framework keeps a " +#~ "minimal set of dependencies. The " +#~ "examples demonstrate Flower in the " +#~ "context of different machine learning " +#~ "frameworks, so additional dependencies need" +#~ " to be installed before an example" +#~ " can be run." +#~ msgstr "" +#~ "Flower 核心框架只保留了最低限度的依赖项。这些示例在不同机器学习框架的背景下演示了 " +#~ "Flower,因此在运行示例之前需要安装额外的依赖项。" + +#~ msgid "For PyTorch examples::" +#~ msgstr "PyTorch 示例::" + +#~ msgid "For TensorFlow examples::" +#~ msgstr "TensorFlow 示例::" + +#~ msgid "For both PyTorch and TensorFlow examples::" +#~ msgstr "PyTorch 和 TensorFlow 示例::" + +#~ msgid "" +#~ "Please consult :code:`pyproject.toml` for a" +#~ " full list of possible extras " +#~ "(section :code:`[tool.poetry.extras]`)." +#~ msgstr "" +#~ "请参阅 :code:`pyproject.toml`,了解可能的 extras 的完整列表(章节 " +#~ ":code:`[tool.poems.extras]`)。" + +#~ msgid "PyTorch Examples" +#~ msgstr "PyTorch 示例" + +#~ msgid "" +#~ "Our PyTorch examples are based on " +#~ "PyTorch 1.7. They should work with " +#~ "other releases as well. So far, we" +#~ " provide the following examples." +#~ msgstr "我们的 PyTorch 示例基于 PyTorch 1.7。它们应该也能在其他版本中使用。到目前为止,我们提供了以下示例。" + +#~ msgid "CIFAR-10 Image Classification" +#~ msgstr "CIFAR-10 图像分类" + +#~ msgid "" +#~ "`CIFAR-10 and CIFAR-100 " +#~ "`_ are " +#~ "popular RGB image datasets. The Flower" +#~ " CIFAR-10 example uses PyTorch to " +#~ "train a simple CNN classifier in a" +#~ " federated learning setup with two " +#~ "clients." +#~ msgstr "" +#~ "CIFAR-10 和 CIFAR-100 " +#~ "``_ 是流行的 RGB" +#~ " 图像数据集。Flower CIFAR-10 示例使用 PyTorch " +#~ "在有两个客户端的联邦学习设置中训练一个简单的 CNN 分类器。" + +#~ msgid "First, start a Flower server:" +#~ msgstr "首先,启动 Flower 服务器:" + +#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" + +#~ msgid "Then, start the two clients in a new terminal window:" +#~ msgstr "然后,在新的终端窗口中启动两个客户端:" + +#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" + +#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." +#~ msgstr "更多详情,请参阅 :code:`src/py/flwr_example/pytorch_cifar`。" + +#~ msgid "ImageNet-2012 Image Classification" +#~ msgstr "ImageNet-2012 图像分类" + +#~ msgid "" +#~ "`ImageNet-2012 `_ is " +#~ "one of the major computer vision " +#~ "datasets. The Flower ImageNet example " +#~ "uses PyTorch to train a ResNet-18 " +#~ "classifier in a federated learning setup" +#~ " with ten clients." +#~ msgstr "" +#~ "ImageNet-2012 `_ " +#~ "是主要的计算机视觉数据集之一。Flower ImageNet 示例使用 PyTorch " +#~ "在有十个客户端的联邦学习设置中训练 ResNet-18 分类器。" + +#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" + +#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" + +#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." +#~ msgstr "更多详情,请参阅 :code:`src/py/flwr_example/pytorch_imagenet`。" + +#~ msgid "TensorFlow Examples" +#~ msgstr "TensorFlow 示例" + +#~ msgid "" +#~ "Our TensorFlow examples are based on " +#~ "TensorFlow 2.0 or newer. So far, " +#~ "we provide the following examples." +#~ msgstr "我们的 TensorFlow 示例基于 TensorFlow 2.0 或更新版本。到目前为止,我们提供了以下示例。" + +#~ msgid "Fashion-MNIST Image Classification" +#~ msgstr "Fashion-MNIST 图像分类" + +#~ msgid "" +#~ "`Fashion-MNIST `_ is often used as " +#~ "the \"Hello, world!\" of machine " +#~ "learning. We follow this tradition and" +#~ " provide an example which samples " +#~ "random local datasets from Fashion-MNIST" +#~ " and trains a simple image " +#~ "classification model over those partitions." +#~ msgstr "" +#~ "`Fashion-MNIST `_ 经常被用作机器学习的 \"你好,世界!\"。我们遵循这一传统" +#~ ",提供了一个从Fashion-MNIST 中随机抽样本地数据集的示例,并在这些分区上训练一个简单的图像分类模型。" + +#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" +#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" + +#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" +#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" + +#~ msgid "" +#~ "For more details, see " +#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." +#~ msgstr "更多详情,请参阅 :code:`src/py/flwr_example/tensorflow_fashion_mnist`。" + +#~ msgid "" +#~ "MXNet is no longer maintained and " +#~ "has been moved into `Attic " +#~ "`_. As a " +#~ "result, we would encourage you to " +#~ "use other ML frameworks alongise Flower," +#~ " for example, PyTorch. This tutorial " +#~ "might be removed in future versions " +#~ "of Flower." +#~ msgstr "" + +#~ msgid "|31e4b1afa87c4b968327bbeafbf184d4|" +#~ msgstr "" + +#~ msgid "|c9d935b4284e4c389a33d86b33e07c0a|" +#~ msgstr "" + +#~ msgid "|00727b5faffb468f84dd1b03ded88638|" +#~ msgstr "" + +#~ msgid "|daf0cf0ff4c24fd29439af78416cf47b|" +#~ msgstr "" + +#~ msgid "|9f093007080d471d94ca90d3e9fde9b6|" +#~ msgstr "" + +#~ msgid "|46a26e6150e0479fbd3dfd655f36eb13|" +#~ msgstr "" + +#~ msgid "|3daba297595c4c7fb845d90404a6179a|" +#~ msgstr "" + +#~ msgid "|5769874fa9c4455b80b2efda850d39d7|" +#~ msgstr "" + +#~ msgid "|ba47ffb421814b0f8f9fa5719093d839|" +#~ msgstr "" + +#~ msgid "|aeac5bf79cbf497082e979834717e01b|" +#~ msgstr "" + +#~ msgid "|ce27ed4bbe95459dba016afc42486ba2|" +#~ msgstr "" + +#~ msgid "|ae94a7f71dda443cbec2385751427d41|" +#~ msgstr "" + +#~ msgid "|e61fce4d43d243e7bb08bdde97d81ce6|" +#~ msgstr "" + +#~ msgid "|08cb60859b07461588fe44e55810b050|" +#~ msgstr "" + diff --git a/doc/source/example-walkthrough-pytorch-mnist.rst b/doc/source/example-walkthrough-pytorch-mnist.rst deleted file mode 100644 index f8eacc8647fe..000000000000 --- a/doc/source/example-walkthrough-pytorch-mnist.rst +++ /dev/null @@ -1,453 +0,0 @@ -Example: Walk-Through PyTorch & MNIST -===================================== - -In this tutorial we will learn, how to train a Convolutional Neural Network on MNIST using Flower and PyTorch. - -Our example consists of one *server* and two *clients* all having the same model. - -*Clients* are responsible for generating individual weight-updates for the model based on their local datasets. -These updates are then sent to the *server* which will aggregate them to produce a better model. Finally, the *server* sends this improved version of the model back to each *client*. -A complete cycle of weight updates is called a *round*. - -Now that we have a rough idea of what is going on, let's get started. We first need to install Flower. You can do this by running : - -.. code-block:: shell - - $ pip install flwr - -Since we want to use PyTorch to solve a computer vision task, let's go ahead an install PyTorch and the **torchvision** library: - -.. code-block:: shell - - $ pip install torch torchvision - - -Ready... Set... Train! ----------------------- - -Now that we have all our dependencies installed, let's run a simple distributed training with two clients and one server. Our training procedure and network architecture are based on PyTorch's `Basic MNIST Example `_. This will allow you see how easy it is to wrap your code with Flower and begin training in a federated way. -We provide you with two helper scripts, namely *run-server.sh*, and *run-clients.sh*. Don't be afraid to look inside, they are simple enough =). - -Go ahead and launch on a terminal the *run-server.sh* script first as follows: - -.. code-block:: shell - - $ bash ./run-server.sh - - -Now that the server is up and running, go ahead and launch the clients. - -.. code-block:: shell - - $ bash ./run-clients.sh - - -Et voilà! You should be seeing the training procedure and, after a few iterations, the test accuracy for each client. - -.. code-block:: shell - - Train Epoch: 10 [30000/30016 (100%)] Loss: 0.007014 - - Train Epoch: 10 [30000/30016 (100%)] Loss: 0.000403 - - Train Epoch: 11 [30000/30016 (100%)] Loss: 0.001280 - - Train Epoch: 11 [30000/30016 (100%)] Loss: 0.000641 - - Train Epoch: 12 [30000/30016 (100%)] Loss: 0.006784 - - Train Epoch: 12 [30000/30016 (100%)] Loss: 0.007134 - - Client 1 - Evaluate on 5000 samples: Average loss: 0.0290, Accuracy: 99.16% - - Client 0 - Evaluate on 5000 samples: Average loss: 0.0328, Accuracy: 99.14% - - -Now, let's see what is really happening inside. - -Flower Server -------------- - -Inside the server helper script *run-server.sh* you will find the following code that basically runs the :code:`server.py` - -.. code-block:: bash - - python -m flwr_example.quickstart-pytorch.server - - -We can go a bit deeper and see that :code:`server.py` simply launches a server that will coordinate three rounds of training. -Flower Servers are very customizable, but for simple workloads, we can start a server using the `start_server `_ function and leave all the configuration possibilities at their default values, as seen below. - -.. code-block:: python - - import flwr as fl - - fl.server.start_server(config=fl.server.ServerConfig(num_rounds=3)) - - -Flower Client -------------- - -Next, let's take a look at the *run-clients.sh* file. You will see that it contains the main loop that starts a set of *clients*. - -.. code-block:: bash - - python -m flwr_example.quickstart-pytorch.client \ - --cid=$i \ - --server_address=$SERVER_ADDRESS \ - --nb_clients=$NUM_CLIENTS - -* **cid**: is the client ID. It is an integer that uniquely identifies client identifier. -* **sever_address**: String that identifies IP and port of the server. -* **nb_clients**: This defines the number of clients being created. This piece of information is not required by the client, but it helps us partition the original MNIST dataset to make sure that every client is working on unique subsets of both *training* and *test* sets. - -Again, we can go deeper and look inside :code:`flwr_example/quickstart-pytorch/client.py`. -After going through the argument parsing code at the beginning of our :code:`main` function, you will find a call to :code:`mnist.load_data`. This function is responsible for partitioning the original MNIST datasets (*training* and *test*) and returning a :code:`torch.utils.data.DataLoader` s for each of them. -We then instantiate a :code:`PytorchMNISTClient` object with our client ID, our DataLoaders, the number of epochs in each round, and which device we want to use for training (CPU or GPU). - - -.. code-block:: python - - client = mnist.PytorchMNISTClient( - cid=args.cid, - train_loader=train_loader, - test_loader=test_loader, - epochs=args.epochs, - device=device, - ) - -The :code:`PytorchMNISTClient` object when finally passed to :code:`fl.client.start_client` along with the server's address as the training process begins. - - -A Closer Look -------------- - -Now, let's look closely into the :code:`PytorchMNISTClient` inside :code:`flwr_example.quickstart-pytorch.mnist` and see what it is doing: - -.. code-block:: python - - class PytorchMNISTClient(fl.client.Client): - """Flower client implementing MNIST handwritten classification using PyTorch.""" - def __init__( - self, - cid: int, - train_loader: datasets, - test_loader: datasets, - epochs: int, - device: torch.device = torch.device("cpu"), - ) -> None: - self.model = MNISTNet().to(device) - self.cid = cid - self.train_loader = train_loader - self.test_loader = test_loader - self.device = device - self.epochs = epochs - - def get_weights(self) -> fl.common.NDArrays: - """Get model weights as a list of NumPy ndarrays.""" - return [val.cpu().numpy() for _, val in self.model.state_dict().items()] - - def set_weights(self, weights: fl.common.NDArrays) -> None: - """Set model weights from a list of NumPy ndarrays. - - Parameters - ---------- - weights: fl.common.NDArrays - Weights received by the server and set to local model - - - Returns - ------- - - """ - state_dict = OrderedDict( - { - k: torch.tensor(v) - for k, v in zip(self.model.state_dict().keys(), weights) - } - ) - self.model.load_state_dict(state_dict, strict=True) - - def get_parameters(self, config) -> fl.common.ParametersRes: - """Encapsulates the weight into Flower Parameters """ - weights: fl.common.NDArrays = self.get_weights() - parameters = fl.common.ndarrays_to_parameters(weights) - return fl.common.ParametersRes(parameters=parameters) - - def fit(self, ins: fl.common.FitIns) -> fl.common.FitRes: - """Trains the model on local dataset - - Parameters - ---------- - ins: fl.common.FitIns - Parameters sent by the server to be used during training. - - Returns - ------- - Set of variables containing the new set of weights and information the client. - - """ - weights: fl.common.NDArrays = fl.common.parameters_to_ndarrays(ins.parameters) - fit_begin = timeit.default_timer() - - # Set model parameters/weights - self.set_weights(weights) - - # Train model - num_examples_train: int = train( - self.model, self.train_loader, epochs=self.epochs, device=self.device - ) - - # Return the refined weights and the number of examples used for training - weights_prime: fl.common.NDArrays = self.get_weights() - params_prime = fl.common.ndarrays_to_parameters(weights_prime) - fit_duration = timeit.default_timer() - fit_begin - return fl.common.FitRes( - parameters=params_prime, - num_examples=num_examples_train, - num_examples_ceil=num_examples_train, - fit_duration=fit_duration, - ) - - def evaluate(self, ins: fl.common.EvaluateIns) -> fl.common.EvaluateRes: - """ - - Parameters - ---------- - ins: fl.common.EvaluateIns - Parameters sent by the server to be used during testing. - - - Returns - ------- - Information the clients testing results. - - -The first thing to notice is that :code:`PytorchMNISTClient` instantiates a CNN model inside its constructor - -.. code-block:: python - - class PytorchMNISTClient(fl.client.Client): - """Flower client implementing MNIST handwritten classification using PyTorch.""" - - def __init__( - self, - cid: int, - train_loader: datasets, - test_loader: datasets, - epochs: int, - device: torch.device = torch.device("cpu"), - ) -> None: - self.model = MNISTNet().to(device) - ... - -The code for the CNN is available under :code:`quickstart-pytorch.mnist` and it is reproduced below. It is the same network found in `Basic MNIST Example `_. - -.. code-block:: python - - class MNISTNet(nn.Module): - """Simple CNN adapted from Pytorch's 'Basic MNIST Example'.""" - - def __init__(self) -> None: - super(MNISTNet, self).__init__() - self.conv1 = nn.Conv2d(1, 32, 3, 1) - self.conv2 = nn.Conv2d(32, 64, 3, 1) - self.dropout1 = nn.Dropout2d(0.25) - self.dropout2 = nn.Dropout2d(0.5) - self.fc1 = nn.Linear(9216, 128) - self.fc2 = nn.Linear(128, 10) - - def forward(self, x: Tensor) -> Tensor: - """Compute forward pass. - - Parameters - ---------- - x: Tensor - Mini-batch of shape (N,28,28) containing images from MNIST dataset. - - - Returns - ------- - output: Tensor - The probability density of the output being from a specific class given the input. - - """ - x = self.conv1(x) - x = F.relu(x) - x = self.conv2(x) - x = F.relu(x) - x = F.max_pool2d(x, 2) - x = self.dropout1(x) - x = torch.flatten(x, 1) - x = self.fc1(x) - x = F.relu(x) - x = self.dropout2(x) - x = self.fc2(x) - output = F.log_softmax(x, dim=1) - return output - - -The second thing to notice is that :code:`PytorchMNISTClient` class inherits from the :code:`fl.client.Client`, and hence it must implement the following methods: - -.. code-block:: python - - from abc import ABC, abstractmethod - - from flwr.common import EvaluateIns, EvaluateRes, FitIns, FitRes, ParametersRes - - - class Client(ABC): - """Abstract base class for Flower clients.""" - - @abstractmethod - def get_parameters(self, config) -> ParametersRes: - """Return the current local model parameters.""" - - @abstractmethod - def fit(self, ins: FitIns) -> FitRes: - """Refine the provided weights using the locally held dataset.""" - - @abstractmethod - def evaluate(self, ins: EvaluateIns) -> EvaluateRes: - """Evaluate the provided weights using the locally held dataset.""" - - -When comparing the abstract class to its derived class :code:`PytorchMNISTClient` you will notice that :code:`fit` calls a :code:`train` function and that :code:`evaluate` calls a :code:`test`: function. - -These functions can both be found inside the same :code:`quickstart-pytorch.mnist` module: - -.. code-block:: python - - def train( - model: torch.nn.ModuleList, - train_loader: torch.utils.data.DataLoader, - epochs: int, - device: torch.device = torch.device("cpu"), - ) -> int: - """Train routine based on 'Basic MNIST Example' - - Parameters - ---------- - model: torch.nn.ModuleList - Neural network model used in this example. - - train_loader: torch.utils.data.DataLoader - DataLoader used in training. - - epochs: int - Number of epochs to run in each round. - - device: torch.device - (Default value = torch.device("cpu")) - Device where the network will be trained within a client. - - Returns - ------- - num_examples_train: int - Number of total samples used during training. - - """ - model.train() - optimizer = optim.Adadelta(model.parameters(), lr=1.0) - scheduler = StepLR(optimizer, step_size=1, gamma=0.7) - print(f"Training {epochs} epoch(s) w/ {len(train_loader)} mini-batches each") - for epoch in range(epochs): # loop over the dataset multiple time - print() - loss_epoch: float = 0.0 - num_examples_train: int = 0 - for batch_idx, (data, target) in enumerate(train_loader): - # Grab mini-batch and transfer to device - data, target = data.to(device), target.to(device) - num_examples_train += len(data) - - # Zero gradients - optimizer.zero_grad() - - output = model(data) - loss = F.nll_loss(output, target) - loss.backward() - optimizer.step() - - loss_epoch += loss.item() - if batch_idx % 10 == 8: - print( - "Train Epoch: {} [{}/{} ({:.0f}%)] Loss: {:.6f}\t\t\t\t".format( - epoch, - num_examples_train, - len(train_loader) * train_loader.batch_size, - 100.0 - * num_examples_train - / len(train_loader) - / train_loader.batch_size, - loss.item(), - ), - end="\r", - flush=True, - ) - scheduler.step() - return num_examples_train - - - def test( - model: torch.nn.ModuleList, - test_loader: torch.utils.data.DataLoader, - device: torch.device = torch.device("cpu"), - ) -> Tuple[int, float, float]: - """Test routine 'Basic MNIST Example' - - Parameters - ---------- - model: torch.nn.ModuleList : - Neural network model used in this example. - - test_loader: torch.utils.data.DataLoader : - DataLoader used in test. - - device: torch.device : - (Default value = torch.device("cpu")) - Device where the network will be tested within a client. - - Returns - ------- - Tuple containing the total number of test samples, the test_loss, and the accuracy evaluated on the test set. - - """ - model.eval() - test_loss: float = 0 - correct: int = 0 - num_test_samples: int = 0 - with torch.no_grad(): - for data, target in test_loader: - data, target = data.to(device), target.to(device) - num_test_samples += len(data) - output = model(data) - test_loss += F.nll_loss( - output, target, reduction="sum" - ).item() # sum up batch loss - pred = output.argmax( - dim=1, keepdim=True - ) # get the index of the max log-probability - correct += pred.eq(target.view_as(pred)).sum().item() - - test_loss /= num_test_samples - - return (num_test_samples, test_loss, correct / num_test_samples) - - -Observe that these functions encapsulate regular training and test loops and provide :code:`fit` and :code:`evaluate` with final statistics for each round. -You could substitute them with your custom train and test loops and change the network architecture, and the entire example would still work flawlessly. -As a matter of fact, why not try and modify the code to an example of your liking? - - - -Give It a Try -------------- -Looking through the quickstart code description above will have given a good understanding of how *clients* and *servers* work in Flower, how to run a simple experiment, and the internals of a client wrapper. -Here are a few things you could try on your own and get more experience with Flower: - -- Try and change :code:`PytorchMNISTClient` so it can accept different architectures. -- Modify the :code:`train` function so that it accepts different optimizers -- Modify the :code:`test` function so that it proves not only the top-1 (regular accuracy) but also the top-5 accuracy? -- Go larger! Try to adapt the code to larger images and datasets. Why not try training on ImageNet with a ResNet-50? - -You are ready now. Enjoy learning in a federated way! diff --git a/doc/source/index.rst b/doc/source/index.rst index 894155be03f1..c634ce939e73 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -99,7 +99,6 @@ Problem-oriented how-to guides show step-by-step how to achieve a specific goal. :maxdepth: 1 :caption: Legacy example guides - example-walkthrough-pytorch-mnist example-pytorch-from-centralized-to-federated example-mxnet-walk-through example-jax-from-centralized-to-federated diff --git a/doc/source/ref-example-projects.rst b/doc/source/ref-example-projects.rst index bade86dfaa54..597e3a596c51 100644 --- a/doc/source/ref-example-projects.rst +++ b/doc/source/ref-example-projects.rst @@ -7,15 +7,7 @@ pipelines, usually leveraging popular machine learning frameworks such as `PyTorch `_ or `TensorFlow `_. -.. note:: - Flower usage examples used to be bundled with Flower in a package called - ``flwr_example``. We are migrating those examples to standalone projects to - make them easier to use. All new examples are based in the directory - `examples `_. - The following examples are available as standalone projects. - - Quickstart TensorFlow/Keras --------------------------- @@ -54,101 +46,3 @@ This example shows how Flower can be used to build a federated learning system t - `Federated Learning on Raspberry Pi and Nvidia Jetson (Code) `_ - `Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) `_ - - -Legacy Examples (`flwr_example`) --------------------------------- - -.. warning:: - The usage examples in `flwr_example` are deprecated and will be removed in - the future. New examples are provided as standalone projects in - `examples `_. - - -Extra Dependencies -~~~~~~~~~~~~~~~~~~ - -The core Flower framework keeps a minimal set of dependencies. The examples -demonstrate Flower in the context of different machine learning frameworks, so -additional dependencies need to be installed before an example can be run. - -For PyTorch examples:: - - $ pip install flwr[examples-pytorch] - -For TensorFlow examples:: - - $ pip install flwr[examples-tensorflow] - -For both PyTorch and TensorFlow examples:: - - $ pip install flwr[examples-pytorch,examples-tensorflow] - -Please consult :code:`pyproject.toml` for a full list of possible extras -(section :code:`[tool.poetry.extras]`). - - -PyTorch Examples -~~~~~~~~~~~~~~~~ - -Our PyTorch examples are based on PyTorch 1.7. They should work with other -releases as well. So far, we provide the following examples. - -CIFAR-10 Image Classification -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -`CIFAR-10 and CIFAR-100 `_ are -popular RGB image datasets. The Flower CIFAR-10 example uses PyTorch to train a -simple CNN classifier in a federated learning setup with two clients. - -First, start a Flower server: - - $ ./src/py/flwr_example/pytorch_cifar/run-server.sh - -Then, start the two clients in a new terminal window: - - $ ./src/py/flwr_example/pytorch_cifar/run-clients.sh - -For more details, see :code:`src/py/flwr_example/pytorch_cifar`. - -ImageNet-2012 Image Classification -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -`ImageNet-2012 `_ is one of the major computer -vision datasets. The Flower ImageNet example uses PyTorch to train a ResNet-18 -classifier in a federated learning setup with ten clients. - -First, start a Flower server: - - $ ./src/py/flwr_example/pytorch_imagenet/run-server.sh - -Then, start the two clients in a new terminal window: - - $ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh - -For more details, see :code:`src/py/flwr_example/pytorch_imagenet`. - - -TensorFlow Examples -~~~~~~~~~~~~~~~~~~~ - -Our TensorFlow examples are based on TensorFlow 2.0 or newer. So far, we -provide the following examples. - -Fashion-MNIST Image Classification -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -`Fashion-MNIST `_ is often -used as the "Hello, world!" of machine learning. We follow this tradition and -provide an example which samples random local datasets from Fashion-MNIST and -trains a simple image classification model over those partitions. - -First, start a Flower server: - - $ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh - -Then, start the two clients in a new terminal window: - - $ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh - -For more details, see :code:`src/py/flwr_example/tensorflow_fashion_mnist`. diff --git a/pyproject.toml b/pyproject.toml index e0514254ecac..dc8b293bc880 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -171,7 +171,6 @@ strict = true [[tool.mypy.overrides]] module = [ - "flwr_example.*", "flwr_experimental.*", ] ignore_errors = true diff --git a/src/py/flwr_example/__init__.py b/src/py/flwr_example/__init__.py deleted file mode 100644 index cd2e721e36b5..000000000000 --- a/src/py/flwr_example/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""DEPRECATED Flower usage examples. - -Please note that the entire `flwr_examples` packages will be removed in a -future release. Examples will be migrated to the `/examples` directory. -""" - -warning = """ -DEPRECATION WARNING: Flower usage examples will be removed. - -All examples will be migrated to the `examples` directory. The `flwr_example` -package will be removed in a future release. -""" -print(warning) diff --git a/src/py/flwr_example/pytorch_cifar/__init__.py b/src/py/flwr_example/pytorch_cifar/__init__.py deleted file mode 100644 index e1a6d4c2e25e..000000000000 --- a/src/py/flwr_example/pytorch_cifar/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower example using PyTorch for CIFAR-10 image classification.""" - - -DEFAULT_SERVER_ADDRESS = "[::]:8080" diff --git a/src/py/flwr_example/pytorch_cifar/cifar.py b/src/py/flwr_example/pytorch_cifar/cifar.py deleted file mode 100644 index 279e635de356..000000000000 --- a/src/py/flwr_example/pytorch_cifar/cifar.py +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""PyTorch CIFAR-10 image classification. - -The code is generally adapted from 'PyTorch: A 60 Minute Blitz'. Further -explanations are given in the official PyTorch tutorial: - -https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html -""" - - -# mypy: ignore-errors -# pylint: disable=W0223 - - -from collections import OrderedDict -from typing import Tuple - -import torch -import torch.nn as nn -import torch.nn.functional as F -import torchvision -import torchvision.transforms as transforms -from torch import Tensor - -import flwr as fl - -DATA_ROOT = "~/.flower/data/cifar-10" - - -# pylint: disable=unsubscriptable-object,bad-option-value,R1725 -class Net(nn.Module): - """Simple CNN adapted from 'PyTorch: A 60 Minute Blitz'.""" - - def __init__(self) -> None: - super(Net, self).__init__() - self.conv1 = nn.Conv2d(3, 6, 5) - self.pool = nn.MaxPool2d(2, 2) - self.conv2 = nn.Conv2d(6, 16, 5) - self.fc1 = nn.Linear(16 * 5 * 5, 120) - self.fc2 = nn.Linear(120, 84) - self.fc3 = nn.Linear(84, 10) - - # pylint: disable=arguments-differ,invalid-name - def forward(self, x: Tensor) -> Tensor: - """Compute forward pass.""" - x = self.pool(F.relu(self.conv1(x))) - x = self.pool(F.relu(self.conv2(x))) - x = x.view(-1, 16 * 5 * 5) - x = F.relu(self.fc1(x)) - x = F.relu(self.fc2(x)) - x = self.fc3(x) - return x - - def get_weights(self) -> fl.common.NDArrays: - """Get model weights as a list of NumPy ndarrays.""" - return [val.cpu().numpy() for _, val in self.state_dict().items()] - - def set_weights(self, weights: fl.common.NDArrays) -> None: - """Set model weights from a list of NumPy ndarrays.""" - state_dict = OrderedDict( - {k: torch.tensor(v) for k, v in zip(self.state_dict().keys(), weights)} - ) - self.load_state_dict(state_dict, strict=True) - - -def load_model() -> Net: - """Load a simple CNN.""" - return Net() - - -# pylint: disable=unused-argument -def load_data() -> Tuple[torchvision.datasets.CIFAR10, torchvision.datasets.CIFAR10]: - """Load CIFAR-10 (training and test set).""" - transform = transforms.Compose( - [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] - ) - trainset = torchvision.datasets.CIFAR10( - root=DATA_ROOT, train=True, download=True, transform=transform - ) - testset = torchvision.datasets.CIFAR10( - root=DATA_ROOT, train=False, download=True, transform=transform - ) - return trainset, testset - - -def train( - net: Net, - trainloader: torch.utils.data.DataLoader, - epochs: int, - device: torch.device, # pylint: disable=no-member -) -> None: - """Train the network.""" - # Define loss and optimizer - criterion = nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(net.parameters(), lr=0.001, momentum=0.9) - - print(f"Training {epochs} epoch(s) w/ {len(trainloader)} batches each") - - # Train the network - for epoch in range(epochs): # loop over the dataset multiple times - running_loss = 0.0 - for i, data in enumerate(trainloader, 0): - images, labels = data[0].to(device), data[1].to(device) - - # zero the parameter gradients - optimizer.zero_grad() - - # forward + backward + optimize - outputs = net(images) - loss = criterion(outputs, labels) - loss.backward() - optimizer.step() - - # print statistics - running_loss += loss.item() - if i % 2000 == 1999: # print every 2000 mini-batches - print("[%d, %5d] loss: %.3f" % (epoch + 1, i + 1, running_loss / 2000)) - running_loss = 0.0 - - -def test( - net: Net, - testloader: torch.utils.data.DataLoader, - device: torch.device, # pylint: disable=no-member -) -> Tuple[float, float]: - """Validate the network on the entire test set.""" - criterion = nn.CrossEntropyLoss() - correct = 0 - total = 0 - loss = 0.0 - with torch.no_grad(): - for data in testloader: - images, labels = data[0].to(device), data[1].to(device) - outputs = net(images) - loss += criterion(outputs, labels).item() - _, predicted = torch.max(outputs.data, 1) # pylint: disable=no-member - total += labels.size(0) - correct += (predicted == labels).sum().item() - accuracy = correct / total - return loss, accuracy diff --git a/src/py/flwr_example/pytorch_cifar/cifar_test.py b/src/py/flwr_example/pytorch_cifar/cifar_test.py deleted file mode 100644 index e9f908531020..000000000000 --- a/src/py/flwr_example/pytorch_cifar/cifar_test.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for PyTorch CIFAR-10 image classification.""" - - -import unittest - -import numpy as np - -from flwr.common import NDArrays - -from . import cifar - - -class CifarTestCase(unittest.TestCase): - """Tests for cifar module.""" - - def test_load_model(self) -> None: - """Test the number of (trainable) model parameters.""" - # pylint: disable=no-self-use - - # Prepare - expected = 62006 - - # Execute - model: cifar.Net = cifar.load_model() - actual = sum(p.numel() for p in model.parameters() if p.requires_grad) - - # Assert - assert actual == expected - - def test_get_weights(self) -> None: - """Test get_weights.""" - # pylint: disable=no-self-use - - # Prepare - model: cifar.Net = cifar.load_model() - expected = 10 - - # Execute - weights: NDArrays = model.get_weights() - - # Assert - assert len(weights) == expected - - def test_set_weights(self) -> None: - """Test set_weights.""" - # pylint: disable=no-self-use - - # Prepare - weights_expected: NDArrays = cifar.load_model().get_weights() - model: cifar.Net = cifar.load_model() - - # Execute - model.set_weights(weights_expected) - weights_actual: NDArrays = model.get_weights() - - # Assert - for nda_expected, nda_actual in zip(weights_expected, weights_actual): - np.testing.assert_array_equal(nda_expected, nda_actual) - - -if __name__ == "__main__": - unittest.main(verbosity=2) diff --git a/src/py/flwr_example/pytorch_cifar/client.py b/src/py/flwr_example/pytorch_cifar/client.py deleted file mode 100644 index 369e6a84377d..000000000000 --- a/src/py/flwr_example/pytorch_cifar/client.py +++ /dev/null @@ -1,148 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower client example using PyTorch for CIFAR-10 image classification.""" - - -import argparse -import timeit - -import torch -import torchvision - -import flwr as fl -from flwr.common import ( - EvaluateIns, - EvaluateRes, - FitIns, - FitRes, - NDArrays, - ParametersRes, -) - -from . import DEFAULT_SERVER_ADDRESS, cifar - -# pylint: disable=no-member -DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") -# pylint: enable=no-member - - -class CifarClient(fl.client.Client): - """Flower client implementing CIFAR-10 image classification using PyTorch.""" - - def __init__( - self, - cid: str, - model: cifar.Net, - trainset: torchvision.datasets.CIFAR10, - testset: torchvision.datasets.CIFAR10, - ) -> None: - self.cid = cid - self.model = model - self.trainset = trainset - self.testset = testset - - def get_parameters(self) -> ParametersRes: - print(f"Client {self.cid}: get_parameters") - - weights: NDArrays = self.model.get_weights() - parameters = fl.common.ndarrays_to_parameters(weights) - return ParametersRes(parameters=parameters) - - def fit(self, ins: FitIns) -> FitRes: - print(f"Client {self.cid}: fit") - - weights: NDArrays = fl.common.parameters_to_ndarrays(ins.parameters) - config = ins.config - fit_begin = timeit.default_timer() - - # Get training config - epochs = int(config["epochs"]) - batch_size = int(config["batch_size"]) - - # Set model parameters - self.model.set_weights(weights) - - # Train model - trainloader = torch.utils.data.DataLoader( - self.trainset, batch_size=batch_size, shuffle=True - ) - cifar.train(self.model, trainloader, epochs=epochs, device=DEVICE) - - # Return the refined weights and the number of examples used for training - weights_prime: NDArrays = self.model.get_weights() - params_prime = fl.common.ndarrays_to_parameters(weights_prime) - num_examples_train = len(self.trainset) - fit_duration = timeit.default_timer() - fit_begin - return FitRes( - parameters=params_prime, - num_examples=num_examples_train, - num_examples_ceil=num_examples_train, - fit_duration=fit_duration, - ) - - def evaluate(self, ins: EvaluateIns) -> EvaluateRes: - print(f"Client {self.cid}: evaluate") - - weights = fl.common.parameters_to_ndarrays(ins.parameters) - - # Use provided weights to update the local model - self.model.set_weights(weights) - - # Evaluate the updated model on the local dataset - testloader = torch.utils.data.DataLoader( - self.testset, batch_size=32, shuffle=False - ) - loss, accuracy = cifar.test(self.model, testloader, device=DEVICE) - - # Return the number of evaluation examples and the evaluation result (loss) - return EvaluateRes( - loss=float(loss), num_examples=len(self.testset), accuracy=float(accuracy) - ) - - -def main() -> None: - """Load data, create and start CifarClient.""" - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--server_address", - type=str, - default=DEFAULT_SERVER_ADDRESS, - help=f"gRPC server address (default: {DEFAULT_SERVER_ADDRESS})", - ) - parser.add_argument( - "--cid", type=str, required=True, help="Client CID (no default)" - ) - parser.add_argument( - "--log_host", - type=str, - help="Logserver address (no default)", - ) - args = parser.parse_args() - - # Configure logger - fl.common.logger.configure(f"client_{args.cid}", host=args.log_host) - - # Load model and data - model = cifar.load_model() - model.to(DEVICE) - trainset, testset = cifar.load_data() - - # Start client - client = CifarClient(args.cid, model, trainset, testset) - fl.client.start_client(args.server_address, client) - - -if __name__ == "__main__": - main() diff --git a/src/py/flwr_example/pytorch_cifar/run-clients.sh b/src/py/flwr_example/pytorch_cifar/run-clients.sh deleted file mode 100755 index de2c6e0d0cb4..000000000000 --- a/src/py/flwr_example/pytorch_cifar/run-clients.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -set -e - -SERVER_ADDRESS="[::]:8080" -NUM_CLIENTS=2 - -echo "Starting $NUM_CLIENTS clients." -for ((i = 0; i < $NUM_CLIENTS; i++)) -do - echo "Starting client(cid=$i) with partition $i out of $NUM_CLIENTS clients." - python -m flwr_example.pytorch_cifar.client \ - --cid=$i \ - --server_address=$SERVER_ADDRESS & -done -echo "Started $NUM_CLIENTS clients." diff --git a/src/py/flwr_example/pytorch_cifar/run-server.sh b/src/py/flwr_example/pytorch_cifar/run-server.sh deleted file mode 100755 index 6bb0d6148bc3..000000000000 --- a/src/py/flwr_example/pytorch_cifar/run-server.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash - -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -# Start a Flower server -python -m flwr_example.pytorch_cifar.server \ - --rounds=5 \ - --sample_fraction=1.0 \ - --min_sample_size=2 \ - --min_num_clients=2 diff --git a/src/py/flwr_example/pytorch_cifar/server.py b/src/py/flwr_example/pytorch_cifar/server.py deleted file mode 100644 index 5acb4a114b44..000000000000 --- a/src/py/flwr_example/pytorch_cifar/server.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Minimal example on how to start a simple Flower server.""" - - -import argparse -from typing import Callable, Dict, Optional, Tuple - -import torch -import torchvision - -import flwr as fl - -from . import DEFAULT_SERVER_ADDRESS, cifar - -# pylint: disable=no-member -DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") -# pylint: enable=no-member - - -def main() -> None: - """Start server and train five rounds.""" - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--server_address", - type=str, - default=DEFAULT_SERVER_ADDRESS, - help=f"gRPC server address (default: {DEFAULT_SERVER_ADDRESS})", - ) - parser.add_argument( - "--rounds", - type=int, - default=1, - help="Number of rounds of federated learning (default: 1)", - ) - parser.add_argument( - "--sample_fraction", - type=float, - default=1.0, - help="Fraction of available clients used for fit/evaluate (default: 1.0)", - ) - parser.add_argument( - "--min_sample_size", - type=int, - default=2, - help="Minimum number of clients used for fit/evaluate (default: 2)", - ) - parser.add_argument( - "--min_num_clients", - type=int, - default=2, - help="Minimum number of available clients required for sampling (default: 2)", - ) - parser.add_argument( - "--log_host", - type=str, - help="Logserver address (no default)", - ) - args = parser.parse_args() - - # Load evaluation data - _, testset = cifar.load_data() - - # Create strategy - strategy = fl.server.strategy.FedAvg( - fraction_fit=args.sample_fraction, - min_fit_clients=args.min_sample_size, - min_available_clients=args.min_num_clients, - evaluate_fn=get_evaluate_fn(testset), - on_fit_config_fn=fit_config, - ) - - # Configure logger and start server - fl.common.logger.configure("server", host=args.log_host) - fl.server.start_server( - args.server_address, - config={"num_rounds": args.rounds}, - strategy=strategy, - ) - - -def fit_config(server_round: int) -> Dict[str, fl.common.Scalar]: - """Return a configuration with static batch size and (local) epochs.""" - config: Dict[str, fl.common.Scalar] = { - "epoch_global": str(server_round), - "epochs": str(1), - "batch_size": str(32), - } - return config - - -def get_evaluate_fn( - testset: torchvision.datasets.CIFAR10, -) -> Callable[[fl.common.NDArrays], Optional[Tuple[float, float]]]: - """Return an evaluation function for centralized evaluation.""" - - def evaluate(weights: fl.common.NDArrays) -> Optional[Tuple[float, float]]: - """Use the entire CIFAR-10 test set for evaluation.""" - model = cifar.load_model() - model.set_weights(weights) - model.to(DEVICE) - testloader = torch.utils.data.DataLoader(testset, batch_size=32, shuffle=False) - return cifar.test(model, testloader, device=DEVICE) - - return evaluate - - -if __name__ == "__main__": - main() diff --git a/src/py/flwr_example/pytorch_imagenet/__init__.py b/src/py/flwr_example/pytorch_imagenet/__init__.py deleted file mode 100644 index 5a78f1f627df..000000000000 --- a/src/py/flwr_example/pytorch_imagenet/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower example using PyTorch for ImageNet image classification.""" - - -DEFAULT_SERVER_ADDRESS = "[::]:8080" diff --git a/src/py/flwr_example/pytorch_imagenet/client.py b/src/py/flwr_example/pytorch_imagenet/client.py deleted file mode 100644 index 72732edee6ef..000000000000 --- a/src/py/flwr_example/pytorch_imagenet/client.py +++ /dev/null @@ -1,206 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower client example using PyTorch for Imagenet image classification.""" - - -import argparse -import timeit -from collections import OrderedDict - -import numpy as np -import torch -import torchvision -import torchvision.models as models - -import flwr as fl - -from . import imagenet - -DEFAULT_SERVER_ADDRESS = "[::]:8080" - -# pylint: disable=no-member -DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") -# pylint: enable=no-member - - -def get_weights(model: torch.nn.ModuleList) -> fl.common.NDArrays: - """Get model weights as a list of NumPy ndarrays.""" - return [val.cpu().numpy() for _, val in model.state_dict().items()] - - -def set_weights(model: torch.nn.ModuleList, weights: fl.common.NDArrays) -> None: - """Set model weights from a list of NumPy ndarrays.""" - state_dict = OrderedDict( - { - k: torch.tensor(np.atleast_1d(v)) - for k, v in zip(model.state_dict().keys(), weights) - } - ) - model.load_state_dict(state_dict, strict=True) - - -class ImageNetClient(fl.client.Client): - """Flower client implementing ImageNet image classification using PyTorch.""" - - def __init__( - self, - cid: str, - trainset: torchvision.datasets, - testset: torchvision.datasets, - nb_clients: int, - ) -> None: - self.cid = cid - self.model = models.resnet18().to(DEVICE) - self.trainset = trainset - self.testset = testset - self.nb_clients = nb_clients - - def get_parameters(self) -> fl.common.ParametersRes: - print(f"Client {self.cid}: get_parameters") - weights: fl.common.NDArrays = get_weights(self.model) - parameters = fl.common.ndarrays_to_parameters(weights) - return fl.common.ParametersRes(parameters=parameters) - - def fit(self, ins: fl.common.FitIns) -> fl.common.FitRes: - # Set the seed so we are sure to generate the same global batches - # indices across all clients - np.random.seed(123) - - print(f"Client {self.cid}: fit") - - weights: fl.common.NDArrays = fl.common.parameters_to_ndarrays(ins.parameters) - config = ins.config - fit_begin = timeit.default_timer() - - # Get training config - epochs = int(config["epochs"]) - batch_size = int(config["batch_size"]) - - # Set model parameters - set_weights(self.model, weights) - - # Get the data corresponding to this client - dataset_size = len(self.trainset) - nb_samples_per_clients = dataset_size // self.nb_clients - dataset_indices = list(range(dataset_size)) - np.random.shuffle(dataset_indices) - - # Get starting and ending indices w.r.t cid - start_ind = int(self.cid) * nb_samples_per_clients - end_ind = (int(self.cid) * nb_samples_per_clients) + nb_samples_per_clients - train_sampler = torch.utils.data.SubsetRandomSampler( - dataset_indices[start_ind:end_ind] - ) - - # Train model - trainloader = torch.utils.data.DataLoader( - self.trainset, batch_size=batch_size, shuffle=False, sampler=train_sampler - ) - - imagenet.train(self.model, trainloader, epochs=epochs, device=DEVICE) - - # Return the refined weights and the number of examples used for training - weights_prime: fl.common.NDArrays = get_weights(self.model) - params_prime = fl.common.ndarrays_to_parameters(weights_prime) - num_examples_train = len(self.trainset) - fit_duration = timeit.default_timer() - fit_begin - return fl.common.FitRes( - parameters=params_prime, - num_examples=num_examples_train, - num_examples_ceil=num_examples_train, - fit_duration=fit_duration, - ) - - def evaluate(self, ins: fl.common.EvaluateIns) -> fl.common.EvaluateRes: - # Set the set so we are sure to generate the same batches - # across all clients. - np.random.seed(123) - - print(f"Client {self.cid}: evaluate") - - config = ins.config - batch_size = int(config["batch_size"]) - - weights = fl.common.parameters_to_ndarrays(ins.parameters) - - # Use provided weights to update the local model - set_weights(self.model, weights) - - # Get the data corresponding to this client - dataset_size = len(self.testset) - nb_samples_per_clients = dataset_size // self.nb_clients - dataset_indices = list(range(dataset_size)) - np.random.shuffle(dataset_indices) - - # Get starting and ending indices w.r.t cid - start_ind = int(self.cid) * nb_samples_per_clients - end_ind = (int(self.cid) * nb_samples_per_clients) + nb_samples_per_clients - test_sampler = torch.utils.data.SubsetRandomSampler( - dataset_indices[start_ind:end_ind] - ) - - # Evaluate the updated model on the local dataset - testloader = torch.utils.data.DataLoader( - self.testset, batch_size=batch_size, shuffle=False, sampler=test_sampler - ) - - loss, accuracy = imagenet.test(self.model, testloader, device=DEVICE) - - # Return the number of evaluation examples and the evaluation result (loss) - return fl.common.EvaluateRes( - loss=float(loss), num_examples=len(self.testset), accuracy=float(accuracy) - ) - - -def main() -> None: - """Load data, create and start CifarClient.""" - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--server_address", - type=str, - default=DEFAULT_SERVER_ADDRESS, - help=f"gRPC server address (default: {DEFAULT_SERVER_ADDRESS})", - ) - parser.add_argument( - "--cid", type=str, required=True, help="Client CID (no default)" - ) - parser.add_argument( - "--data_path", type=str, required=True, help="ImageNet datapath" - ) - parser.add_argument( - "--log_host", - type=str, - help="Logserver address (no default)", - ) - parser.add_argument( - "--nb_clients", - type=int, - default=40, - help="Total number of clients", - ) - args = parser.parse_args() - - # Configure logger - fl.common.logger.configure(f"client_{args.cid}", host=args.log_host) - - trainset, testset = imagenet.load_data(args.data_path) - - # Start client - client = ImageNetClient(args.cid, trainset, testset, args.nb_clients) - fl.client.start_client(args.server_address, client) - - -if __name__ == "__main__": - main() diff --git a/src/py/flwr_example/pytorch_imagenet/imagenet.py b/src/py/flwr_example/pytorch_imagenet/imagenet.py deleted file mode 100644 index 20ec24fd85bf..000000000000 --- a/src/py/flwr_example/pytorch_imagenet/imagenet.py +++ /dev/null @@ -1,169 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""PyTorch ImageNet image classification. - -ImageNet dataset must be downloaded first -http://image-net.org - -""" - - -# mypy: ignore-errors - - -import os -from collections import OrderedDict -from typing import Tuple - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -import torchvision -import torchvision.datasets as datasets -import torchvision.transforms as transforms -from torch import Tensor -from tqdm import tqdm - -import flwr as fl - - -def load_data(data_path) -> Tuple[datasets.ImageFolder, datasets.ImageFolder]: - """Load ImageNet (training and val set).""" - - # Load ImageNet and normalize - traindir = os.path.join(data_path, "train") - valdir = os.path.join(data_path, "val") - - normalize = transforms.Normalize( - mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] - ) - - trainset = datasets.ImageFolder( - traindir, - transforms.Compose( - [ - transforms.RandomResizedCrop(224), - transforms.RandomHorizontalFlip(), - transforms.ToTensor(), - normalize, - ] - ), - ) - - valset = datasets.ImageFolder( - valdir, - transforms.Compose( - [ - transforms.Resize(256), - transforms.CenterCrop(224), - transforms.ToTensor(), - normalize, - ] - ), - ) - - return trainset, valset - - -def train( - net: torch.nn.ModuleList, - trainloader: torch.utils.data.DataLoader, - epochs: int, - device: torch.device, -) -> None: - """Train the network.""" - - # Define loss and optimizer - criterion = nn.CrossEntropyLoss() - optimizer = torch.optim.Adadelta(net.parameters(), lr=1.0) - - print(f"Training {epochs} epoch(s) w/ {len(trainloader)} batches each") - - # Train the network - for epoch in range(epochs): # loop over the dataset multiple times - running_loss = 0.0 - acc1 = 0.0 - acc5 = 0.0 - for i, data in enumerate(tqdm(trainloader), 0): - images, labels = data[0].to(device), data[1].to(device) - - # zero the parameter gradients - optimizer.zero_grad() - - # forward + backward + optimize - outputs = net(images) - loss = criterion(outputs, labels) - loss.backward() - optimizer.step() - - # print statistics - running_loss += loss.item() - tmp1, tmp2 = accuracy(outputs, labels, topk=(1, 5)) - acc1, acc5 = acc1 + tmp1, acc5 + tmp2 - if i % 5 == 4: # print every 5 mini-batches - print( - "[%d, %5d] loss: %.3f acc1: %.3f acc5: %.3f" - % ( - epoch + 1, - i + 1, - running_loss / (i + 1), - acc1 / (i + 1), - acc5 / (i + 1), - ), - flush=True, - ) - - -def test( - net: torch.nn.ModuleList, - testloader: torch.utils.data.DataLoader, - device: torch.device, -) -> Tuple[float, float]: - """Validate the network on the entire test set.""" - criterion = nn.CrossEntropyLoss() - total = 0 - loss = 0.0 - acc1 = 0.0 - acc5 = 0.0 - with torch.no_grad(): - i = 0 - for data in tqdm(testloader): - images, labels = data[0].to(device), data[1].to(device) - outputs = net(images) - loss += criterion(outputs, labels).item() - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - tmp1, tmp2 = accuracy(outputs, labels, topk=(1, 5)) - acc1, acc5 = acc1 + tmp1, acc5 + tmp2 - i += 1 - return loss / i, acc1 / i - - -def accuracy(output, target, topk=(1,)): - """Computes the accuracy over the k top predictions for the specified values of k""" - with torch.no_grad(): - maxk = max(topk) - batch_size = target.size(0) - - _, pred = output.topk(maxk, 1, True, True) - pred = pred.t() - correct = pred.eq(target.view(1, -1).expand_as(pred)) - - res = [] - for k in topk: - correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) - res.append(correct_k.mul_(100.0 / batch_size)) - return res diff --git a/src/py/flwr_example/pytorch_imagenet/run-clients.sh b/src/py/flwr_example/pytorch_imagenet/run-clients.sh deleted file mode 100755 index f907ac67db12..000000000000 --- a/src/py/flwr_example/pytorch_imagenet/run-clients.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -set -e - -SERVER_ADDRESS="[::]:8080" -NUM_CLIENTS=40 -IMAGENET_PATH="~/Downloads/imagenet-object-localization-challenge/" - -echo "Starting $NUM_CLIENTS clients." -for ((i = 0; i < $NUM_CLIENTS; i++)) -do - echo "Starting client(cid=$i) with partition $i out of $NUM_CLIENTS clients." - python -m flwr_example.pytorch_imagenet.client \ - --cid=$i \ - --server_address=$SERVER_ADDRESS \ - --data_path=$IMAGENET_PATH & -done -echo "Started $NUM_CLIENTS clients." diff --git a/src/py/flwr_example/pytorch_imagenet/run-server.sh b/src/py/flwr_example/pytorch_imagenet/run-server.sh deleted file mode 100755 index cd6909202d6f..000000000000 --- a/src/py/flwr_example/pytorch_imagenet/run-server.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -IMAGENET_PATH="~/Downloads/imagenet-object-localization-challenge/" - -# Start a Flower server -python -m flwr_example.pytorch_imagenet.server \ - --rounds=100 \ - --sample_fraction=0.25 \ - --min_sample_size=10 \ - --min_num_clients=30 \ - --data_path=$IMAGENET_PATH diff --git a/src/py/flwr_example/pytorch_imagenet/server.py b/src/py/flwr_example/pytorch_imagenet/server.py deleted file mode 100644 index cde8a3d572fd..000000000000 --- a/src/py/flwr_example/pytorch_imagenet/server.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Minimal example on how to start a simple Flower server.""" - - -import argparse -from typing import Callable, Dict, Optional, Tuple - -import torch -import torchvision -import torchvision.models as models - -import flwr as fl - -from . import imagenet -from .client import get_weights, set_weights - -DEFAULT_SERVER_ADDRESS = "[::]:8080" - -# pylint: disable=no-member -DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") -# pylint: enable=no-member - - -def main() -> None: - """Start server and train five rounds.""" - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--server_address", - type=str, - default=DEFAULT_SERVER_ADDRESS, - help=f"gRPC server address (default: {DEFAULT_SERVER_ADDRESS})", - ) - parser.add_argument( - "--rounds", - type=int, - default=1, - help="Number of rounds of federated learning (default: 1)", - ) - parser.add_argument( - "--data_path", type=str, required=True, help="ImageNet datapath" - ) - parser.add_argument( - "--sample_fraction", - type=float, - default=1.0, - help="Fraction of available clients used for fit/evaluate (default: 1.0)", - ) - parser.add_argument( - "--min_sample_size", - type=int, - default=2, - help="Minimum number of clients used for fit/evaluate (default: 2)", - ) - parser.add_argument( - "--min_num_clients", - type=int, - default=2, - help="Minimum number of available clients required for sampling (default: 2)", - ) - parser.add_argument( - "--log_host", - type=str, - help="Logserver address (no default)", - ) - args = parser.parse_args() - - # Load evaluation data - _, testset = imagenet.load_data(args.data_path) - - # Create strategy - strategy = fl.server.strategy.FedAvg( - fraction_fit=args.sample_fraction, - min_fit_clients=args.min_sample_size, - min_available_clients=args.min_num_clients, - evaluate_fn=get_evaluate_fn(testset), - on_fit_config_fn=fit_config, - ) - - # Configure logger and start server - fl.common.logger.configure("server", host=args.log_host) - fl.server.start_server( - args.server_address, - config={"num_rounds": args.rounds}, - strategy=strategy, - ) - - -def fit_config(server_round: int) -> Dict[str, fl.common.Scalar]: - """Return a configuration with static batch size and (local) epochs.""" - config: Dict[str, fl.common.Scalar] = { - "epoch_global": str(server_round), - "epochs": str(5), - "batch_size": str(128), - } - return config - - -def get_evaluate_fn( - testset: torchvision.datasets, -) -> Callable[[fl.common.NDArrays], Optional[Tuple[float, float]]]: - """Return an evaluation function for centralized evaluation.""" - - def evaluate(weights: fl.common.NDArrays) -> Optional[Tuple[float, float]]: - """Use the entire ImageNet test set for evaluation.""" - - model = models.resnet18() - - set_weights(model, weights) - model.to(DEVICE) - model.eval() - - testloader = torch.utils.data.DataLoader( - testset, num_workers=6, batch_size=128, shuffle=False - ) - return imagenet.test(model, testloader, device=DEVICE) - - return evaluate - - -if __name__ == "__main__": - main() diff --git a/src/py/flwr_example/pytorch_save_weights/__init__.py b/src/py/flwr_example/pytorch_save_weights/__init__.py deleted file mode 100644 index 7a4638952123..000000000000 --- a/src/py/flwr_example/pytorch_save_weights/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower example using PyTorch for CIFAR-10 image classification.""" diff --git a/src/py/flwr_example/pytorch_save_weights/cifar.py b/src/py/flwr_example/pytorch_save_weights/cifar.py deleted file mode 100644 index e9fb0552a054..000000000000 --- a/src/py/flwr_example/pytorch_save_weights/cifar.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""PyTorch CIFAR-10 image classification. - -The code is generally adapted from 'PyTorch: A 60 Minute Blitz'. Further -explanations are given in the official PyTorch tutorial: - -https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html -""" - - -# mypy: ignore-errors -# pylint: disable=W0223 - - -from collections import OrderedDict -from typing import Tuple - -import torch -import torch.nn as nn -import torch.nn.functional as F -import torchvision -import torchvision.transforms as transforms -from torch import Tensor - -import flwr as fl - -DATA_ROOT = "~/.flower/data/cifar-10" - - -# pylint: disable=unsubscriptable-object -class Net(nn.Module): - """Simple CNN adapted from 'PyTorch: A 60 Minute Blitz'.""" - - def __init__(self) -> None: - super(Net, self).__init__() - self.conv1 = nn.Conv2d(3, 6, 5) - self.pool = nn.MaxPool2d(2, 2) - self.conv2 = nn.Conv2d(6, 16, 5) - self.fc1 = nn.Linear(16 * 5 * 5, 120) - self.fc2 = nn.Linear(120, 84) - self.fc3 = nn.Linear(84, 10) - - # pylint: disable=arguments-differ,invalid-name - def forward(self, x: Tensor) -> Tensor: - """Compute forward pass.""" - x = self.pool(F.relu(self.conv1(x))) - x = self.pool(F.relu(self.conv2(x))) - x = x.view(-1, 16 * 5 * 5) - x = F.relu(self.fc1(x)) - x = F.relu(self.fc2(x)) - x = self.fc3(x) - return x - - def get_weights(self) -> fl.common.NDArrays: - """Get model weights as a list of NumPy ndarrays.""" - return [val.cpu().numpy() for _, val in self.state_dict().items()] - - def set_weights(self, weights: fl.common.NDArrays) -> None: - """Set model weights from a list of NumPy ndarrays.""" - state_dict = OrderedDict( - {k: torch.tensor(v) for k, v in zip(self.state_dict().keys(), weights)} - ) - self.load_state_dict(state_dict, strict=True) - - -# pylint: disable=unused-argument -def load_data() -> Tuple[torch.utils.data.DataLoader, torch.utils.data.DataLoader]: - """Load CIFAR-10 (training and test set).""" - transform = transforms.Compose( - [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] - ) - - # Training set - trainset = torchvision.datasets.CIFAR10( - root=DATA_ROOT, train=True, download=True, transform=transform - ) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=32, shuffle=True) - - # Test set - testset = torchvision.datasets.CIFAR10( - root=DATA_ROOT, train=False, download=True, transform=transform - ) - testloader = torch.utils.data.DataLoader(testset, batch_size=32, shuffle=False) - - return trainloader, testloader - - -def train( - net: Net, - trainloader: torch.utils.data.DataLoader, - epochs: int, - device: torch.device, # pylint: disable=no-member -) -> None: - """Train the network.""" - # Define loss and optimizer - criterion = nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(net.parameters(), lr=0.001, momentum=0.9) - - print(f"Training {epochs} epoch(s) w/ {len(trainloader)} batches each") - - # Train the network - for epoch in range(epochs): # loop over the dataset multiple times - running_loss = 0.0 - for i, data in enumerate(trainloader, 0): - images, labels = data[0].to(device), data[1].to(device) - - # zero the parameter gradients - optimizer.zero_grad() - - # forward + backward + optimize - outputs = net(images) - loss = criterion(outputs, labels) - loss.backward() - optimizer.step() - - # print statistics - running_loss += loss.item() - if i % 2000 == 1999: # print every 2000 mini-batches - print("[%d, %5d] loss: %.3f" % (epoch + 1, i + 1, running_loss / 2000)) - running_loss = 0.0 - - -def test( - net: Net, - testloader: torch.utils.data.DataLoader, - device: torch.device, # pylint: disable=no-member -) -> Tuple[float, float]: - """Validate the network on the entire test set.""" - criterion = nn.CrossEntropyLoss() - correct = 0 - total = 0 - loss = 0.0 - with torch.no_grad(): - for data in testloader: - images, labels = data[0].to(device), data[1].to(device) - outputs = net(images) - loss += criterion(outputs, labels).item() - _, predicted = torch.max(outputs.data, 1) # pylint: disable=no-member - total += labels.size(0) - correct += (predicted == labels).sum().item() - accuracy = correct / total - return loss, accuracy diff --git a/src/py/flwr_example/pytorch_save_weights/client.py b/src/py/flwr_example/pytorch_save_weights/client.py deleted file mode 100644 index 6f35fdb881c3..000000000000 --- a/src/py/flwr_example/pytorch_save_weights/client.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower client example using PyTorch for CIFAR-10 image classification.""" - - -import argparse -import timeit -from typing import Dict, List, Tuple - -import numpy as np -import torch -import torchvision - -import flwr as fl -from flwr.common import ( - EvaluateIns, - EvaluateRes, - FitIns, - FitRes, - NDArrays, - ParametersRes, -) - -from . import cifar - -# pylint: disable=no-member -DEVICE: str = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - - -# Flower Client -class CifarClient(fl.client.NumPyClient): - """Flower client implementing CIFAR-10 image classification using PyTorch.""" - - def __init__( - self, - model: cifar.Net, - trainloader: torch.utils.data.DataLoader, - testloader: torch.utils.data.DataLoader, - ) -> None: - self.model = model - self.trainloader = trainloader - self.testloader = testloader - - def get_parameters(self) -> List[np.ndarray]: - return self.model.get_weights() - - def fit( - self, parameters: List[np.ndarray], config: Dict[str, fl.common.Scalar] - ) -> Tuple[List[np.ndarray], int]: - # Set model parameters - self.model.set_weights(parameters) - - # Train model - cifar.train(self.model, self.trainloader, epochs=1, device=DEVICE) - - # Return the updated model parameters - return self.model.get_weights(), len(self.trainloader) - - def evaluate( - self, parameters: List[np.ndarray], config: Dict[str, fl.common.Scalar] - ) -> Tuple[int, float, float]: - # Use provided weights to update the local model - self.model.set_weights(parameters) - - # Evaluate the updated model on the local dataset - loss, accuracy = cifar.test(self.model, self.testloader, device=DEVICE) - - # Return the number of evaluation examples and the evaluation result (loss) - return len(self.testloader), float(loss), float(accuracy) - - -def main() -> None: - """Load data, start CifarClient.""" - - # Load model and data - model = cifar.Net() - model.to(DEVICE) - trainloader, testloader = cifar.load_data() - - # Start client - client = CifarClient(model, trainloader, testloader) - fl.client.start_numpy_client(server_address="[::]:8080", client=client) - - -if __name__ == "__main__": - main() diff --git a/src/py/flwr_example/pytorch_save_weights/run-clients.sh b/src/py/flwr_example/pytorch_save_weights/run-clients.sh deleted file mode 100755 index 9065415148a0..000000000000 --- a/src/py/flwr_example/pytorch_save_weights/run-clients.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash - -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -set -e - -NUM_CLIENTS=2 - -echo "Starting $NUM_CLIENTS clients." -for ((i = 0; i < $NUM_CLIENTS; i++)) -do - echo "Starting client(cid=$i) with partition $i out of $NUM_CLIENTS clients." - python -m flwr_example.pytorch_save_weights.client & -done -echo "Started $NUM_CLIENTS clients." diff --git a/src/py/flwr_example/pytorch_save_weights/run-server.sh b/src/py/flwr_example/pytorch_save_weights/run-server.sh deleted file mode 100755 index 0464c371169c..000000000000 --- a/src/py/flwr_example/pytorch_save_weights/run-server.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -# Start a Flower server -python -m flwr_example.pytorch_save_weights.server diff --git a/src/py/flwr_example/pytorch_save_weights/server.py b/src/py/flwr_example/pytorch_save_weights/server.py deleted file mode 100644 index ede93e268c17..000000000000 --- a/src/py/flwr_example/pytorch_save_weights/server.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower server example.""" - - -import argparse -from typing import Callable, Dict, List, Optional, Tuple, Union - -import numpy as np -import torch -import torchvision - -import flwr as fl - -from . import cifar - - -class SaveModelStrategy(fl.server.strategy.FedAvg): - def aggregate_fit( - self, - server_round: int, - results: List[Tuple[fl.server.client_proxy.ClientProxy, fl.common.FitRes]], - failures: List[ - Union[ - Tuple[fl.server.client_proxy.ClientProxy, fl.common.FitRes], - BaseException, - ] - ], - ) -> Optional[fl.common.NDArrays]: - weights = super().aggregate_fit(server_round, results, failures) - if weights is not None: - # Save weights - print(f"Saving round {server_round} weights...") - np.savez(f"round-{server_round}-weights.npz", *weights) - return weights - - -def main() -> None: - """Start server and train five rounds.""" - # Load evaluation data - _, testloader = cifar.load_data() - - # Create client_manager, strategy, and server - strategy = SaveModelStrategy( - fraction_fit=1.0, - min_fit_clients=2, - min_available_clients=2, - evaluate_fn=get_evaluate_fn(testloader), - on_fit_config_fn=fit_config, - ) - - # Run server - fl.server.start_server( - config=fl.server.ServerConfig(num_rounds=3), - strategy=strategy, - ) - - -def fit_config(server_round: int) -> Dict[str, fl.common.Scalar]: - """Return a configuration with static batch size and (local) epochs.""" - config: Dict[str, fl.common.Scalar] = { - "epoch_global": str(server_round), - "epochs": str(1), - "batch_size": str(32), - } - return config - - -def get_evaluate_fn( - testloader: torch.utils.data.DataLoader, -) -> Callable[[fl.common.NDArrays], Optional[Tuple[float, float]]]: - """Return an evaluation function for centralized evaluation.""" - - # pylint: disable=no-member - DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - # pylint: enable=no-member - - def evaluate(weights: fl.common.NDArrays) -> Optional[Tuple[float, float]]: - """Use the entire CIFAR-10 test set for evaluation.""" - model = cifar.Net() - model.set_weights(weights) - model.to(DEVICE) - return cifar.test(model, testloader, device=DEVICE) - - return evaluate - - -if __name__ == "__main__": - main() diff --git a/src/py/flwr_example/quickstart_pytorch/__init__.py b/src/py/flwr_example/quickstart_pytorch/__init__.py deleted file mode 100644 index f3ab11afad31..000000000000 --- a/src/py/flwr_example/quickstart_pytorch/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -warning = """ -DEPRECATION WARNING: Example moved to `examples/quickstart_pytorch`. - -All examples will be migrated to the `examples` directory. `flwr_example` will -be removed in a future release. -""" -print(warning) diff --git a/src/py/flwr_example/quickstart_pytorch/client.py b/src/py/flwr_example/quickstart_pytorch/client.py deleted file mode 100644 index fc675f123cc8..000000000000 --- a/src/py/flwr_example/quickstart_pytorch/client.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - - -from argparse import ArgumentParser - -import numpy as np -import torch - -import flwr as fl - -from . import mnist - -DATA_ROOT = "./data/mnist" - -if __name__ == "__main__": - # Training settings - parser = ArgumentParser(description="PyTorch MNIST Example") - parser.add_argument( - "--server_address", - type=str, - default="[::]:8080", - help=f"gRPC server address (default: '[::]:8080')", - ) - parser.add_argument( - "--cid", - type=int, - metavar="N", - help="ID of current client (default: 0)", - ) - parser.add_argument( - "--nb_clients", - type=int, - default=2, - metavar="N", - help="Total number of clients being launched (default: 2)", - ) - parser.add_argument( - "--train-batch-size", - type=int, - default=64, - metavar="N", - help="input batch size for training (default: 64)", - ) - parser.add_argument( - "--test-batch-size", - type=int, - default=1000, - metavar="N", - help="input batch size for testing (default: 1000)", - ) - parser.add_argument( - "--epochs", - type=int, - default=14, - metavar="N", - help="number of epochs to train (default: 14)", - ) - - args = parser.parse_args() - - # Load MNIST data - train_loader, test_loader = mnist.load_data( - data_root=DATA_ROOT, - train_batch_size=args.train_batch_size, - test_batch_size=args.test_batch_size, - cid=args.cid, - nb_clients=args.nb_clients, - ) - - # pylint: disable=no-member - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - # pylint: enable=no-member - - # Instantiate client - client = mnist.PytorchMNISTClient( - cid=args.cid, - train_loader=train_loader, - test_loader=test_loader, - epochs=args.epochs, - device=device, - ) - - # Start client - fl.client.start_client(args.server_address, client) diff --git a/src/py/flwr_example/quickstart_pytorch/mnist.py b/src/py/flwr_example/quickstart_pytorch/mnist.py deleted file mode 100644 index 9a65145e6ca3..000000000000 --- a/src/py/flwr_example/quickstart_pytorch/mnist.py +++ /dev/null @@ -1,436 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""PyTorch MNIST image classification. - -The code is generally adapted from PyTorch's Basic MNIST Example. -The original code can be inspected in the official PyTorch github: - -https://github.com/pytorch/examples/blob/master/mnist/main.py -""" - - -# mypy: ignore-errors -# pylint: disable=W0223 - -import timeit -from collections import OrderedDict -from typing import Tuple - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch import Tensor, optim -from torch.optim.lr_scheduler import StepLR -from torch.utils.data import DataLoader, Dataset, SubsetRandomSampler -from torchvision import datasets, transforms - -import flwr as fl - - -def dataset_partitioner( - dataset: torch.utils.data.Dataset, - batch_size: int, - client_id: int, - number_of_clients: int, -) -> torch.utils.data.DataLoader: - """Helper function to partition datasets - - Parameters - ---------- - dataset: torch.utils.data.Dataset - Dataset to be partitioned into *number_of_clients* subsets. - - batch_size: int - Size of mini-batches used by the returned DataLoader. - - client_id: int - Unique integer used for selecting a specific partition. - - number_of_clients: int - Total number of clients launched during training. This value dictates the number of partitions to be created. - - - Returns - ------- - data_loader: torch.utils.data.Dataset - DataLoader for specific client_id considering number_of_clients partitions. - - """ - - # Set the seed so we are sure to generate the same global batches - # indices across all clients - np.random.seed(123) - - # Get the data corresponding to this client - dataset_size = len(dataset) - nb_samples_per_clients = dataset_size // number_of_clients - dataset_indices = list(range(dataset_size)) - np.random.shuffle(dataset_indices) - - # Get starting and ending indices w.r.t CLIENT_ID - start_ind = client_id * nb_samples_per_clients - end_ind = start_ind + nb_samples_per_clients - data_sampler = SubsetRandomSampler(dataset_indices[start_ind:end_ind]) - data_loader = torch.utils.data.DataLoader( - dataset, batch_size=batch_size, shuffle=False, sampler=data_sampler - ) - return data_loader - - -def load_data( - data_root: str, - train_batch_size: int, - test_batch_size: int, - cid: int, - nb_clients: int, -) -> Tuple[DataLoader, DataLoader]: - """Helper function that loads both training and test datasets for MNIST. - - Parameters - ---------- - data_root: str - Directory where MNIST dataset will be stored. - - train_batch_size: int - Mini-batch size for training set. - - test_batch_size: int - Mini-batch size for test set. - - cid: int - Client ID used to select a specific partition. - - nb_clients: int - Total number of clients launched during training. This value dictates the number of unique to be created. - - - Returns - ------- - (train_loader, test_loader): Tuple[DataLoader, DataLoader] - Tuple contaning DataLoaders for training and test sets. - - """ - - transform = transforms.Compose( - [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))] - ) - - train_dataset = datasets.MNIST( - data_root, train=True, download=True, transform=transform - ) - test_dataset = datasets.MNIST(data_root, train=False, transform=transform) - - # Create partitioned datasets based on the total number of clients and client_id - train_loader = dataset_partitioner( - dataset=train_dataset, - batch_size=train_batch_size, - client_id=cid, - number_of_clients=nb_clients, - ) - - test_loader = dataset_partitioner( - dataset=test_dataset, - batch_size=test_batch_size, - client_id=cid, - number_of_clients=nb_clients, - ) - - return (train_loader, test_loader) - - -class MNISTNet(nn.Module): - """Simple CNN adapted from Pytorch's 'Basic MNIST Example'.""" - - def __init__(self) -> None: - super(MNISTNet, self).__init__() - self.conv1 = nn.Conv2d(1, 32, 3, 1) - self.conv2 = nn.Conv2d(32, 64, 3, 1) - self.dropout1 = nn.Dropout2d(0.25) - self.dropout2 = nn.Dropout2d(0.5) - self.fc1 = nn.Linear(9216, 128) - self.fc2 = nn.Linear(128, 10) - - # pylint: disable=arguments-differ,invalid-name - def forward(self, x: Tensor) -> Tensor: - """Compute forward pass. - - Parameters - ---------- - x: Tensor - Mini-batch of shape (N,28,28) containing images from MNIST dataset. - - - Returns - ------- - output: Tensor - The probability density of the output being from a specific class given the input. - - """ - x = self.conv1(x) - x = F.relu(x) - x = self.conv2(x) - x = F.relu(x) - x = F.max_pool2d(x, 2) - x = self.dropout1(x) - x = torch.flatten(x, 1) - x = self.fc1(x) - x = F.relu(x) - x = self.dropout2(x) - x = self.fc2(x) - output = F.log_softmax(x, dim=1) - return output - - -def train( - model: torch.nn.Module, - train_loader: torch.utils.data.DataLoader, - epochs: int, - device: torch.device = torch.device("cpu"), -) -> int: - """Train routine based on 'Basic MNIST Example' - - Parameters - ---------- - model: torch.nn.Module - Neural network model used in this example. - - train_loader: torch.utils.data.DataLoader - DataLoader used in training. - - epochs: int - Number of epochs to run in each round. - - device: torch.device - (Default value = torch.device("cpu")) - Device where the network will be trained within a client. - - Returns - ------- - num_examples_train: int - Number of total samples used during training. - - """ - model.train() - optimizer = optim.Adadelta(model.parameters(), lr=1.0) - scheduler = StepLR(optimizer, step_size=1, gamma=0.7) - print(f"Training {epochs} epoch(s) w/ {len(train_loader)} mini-batches each") - for epoch in range(epochs): # loop over the dataset multiple times - print() - loss_epoch: float = 0.0 - num_examples_train: int = 0 - for batch_idx, (data, target) in enumerate(train_loader): - # Grab mini-batch and transfer to device - data, target = data.to(device), target.to(device) - num_examples_train += len(data) - - # Zero gradients - optimizer.zero_grad() - - output = model(data) - loss = F.nll_loss(output, target) - loss.backward() - optimizer.step() - - loss_epoch += loss.item() - if batch_idx % 10 == 8: - print( - "Train Epoch: {} [{}/{} ({:.0f}%)] Loss: {:.6f}\t\t\t\t".format( - epoch, - num_examples_train, - len(train_loader) * train_loader.batch_size, - 100.0 - * num_examples_train - / len(train_loader) - / train_loader.batch_size, - loss.item(), - ), - end="\r", - flush=True, - ) - scheduler.step() - return num_examples_train - - -def test( - model: torch.nn.Module, - test_loader: torch.utils.data.DataLoader, - device: torch.device = torch.device("cpu"), -) -> Tuple[int, float, float]: - """Test routine 'Basic MNIST Example' - - Parameters - ---------- - model: torch.nn.Module : - Neural network model used in this example. - - test_loader: torch.utils.data.DataLoader : - DataLoader used in test. - - device: torch.device : - (Default value = torch.device("cpu")) - Device where the network will be tested within a client. - - Returns - ------- - Tuple containing the total number of test samples, the test_loss, and the accuracy evaluated on the test set. - - """ - model.eval() - test_loss: float = 0 - correct: int = 0 - num_test_samples: int = 0 - with torch.no_grad(): - for data, target in test_loader: - data, target = data.to(device), target.to(device) - num_test_samples += len(data) - output = model(data) - test_loss += F.nll_loss( - output, target, reduction="sum" - ).item() # sum up batch loss - pred = output.argmax( - dim=1, keepdim=True - ) # get the index of the max log-probability - correct += pred.eq(target.view_as(pred)).sum().item() - - test_loss /= num_test_samples - - return (num_test_samples, test_loss, correct / num_test_samples) - - -class PytorchMNISTClient(fl.client.Client): - """Flower client implementing MNIST handwritten classification using PyTorch.""" - - def __init__( - self, - cid: int, - train_loader: datasets, - test_loader: datasets, - epochs: int, - device: torch.device = torch.device("cpu"), - ) -> None: - self.model = MNISTNet().to(device) - self.cid = cid - self.train_loader = train_loader - self.test_loader = test_loader - self.device = device - self.epochs = epochs - - def get_weights(self) -> fl.common.NDArrays: - """Get model weights as a list of NumPy ndarrays.""" - return [val.cpu().numpy() for _, val in self.model.state_dict().items()] - - def set_weights(self, weights: fl.common.NDArrays) -> None: - """Set model weights from a list of NumPy ndarrays. - - Parameters - ---------- - weights: fl.common.NDArrays - Weights received by the server and set to local model - - - Returns - ------- - - """ - state_dict = OrderedDict( - { - k: torch.tensor(v) - for k, v in zip(self.model.state_dict().keys(), weights) - } - ) - self.model.load_state_dict(state_dict, strict=True) - - def get_parameters(self) -> fl.common.ParametersRes: - """Encapsulates the weights into Flower Parameters.""" - weights: fl.common.NDArrays = self.get_weights() - parameters = fl.common.ndarrays_to_parameters(weights) - return fl.common.ParametersRes(parameters=parameters) - - def fit(self, ins: fl.common.FitIns) -> fl.common.FitRes: - """Trains the model on local dataset - - Parameters - ---------- - ins: fl.common.FitIns - Parameters sent by the server to be used during training. - - Returns - ------- - Set of variables containing the new set of weights and information the client. - - """ - - # Set the seed so we are sure to generate the same global batches - # indices across all clients - np.random.seed(123) - - weights: fl.common.NDArrays = fl.common.parameters_to_ndarrays(ins.parameters) - fit_begin = timeit.default_timer() - - # Set model parameters/weights - self.set_weights(weights) - - # Train model - num_examples_train: int = train( - self.model, self.train_loader, epochs=self.epochs, device=self.device - ) - - # Return the refined weights and the number of examples used for training - weights_prime: fl.common.NDArrays = self.get_weights() - params_prime = fl.common.ndarrays_to_parameters(weights_prime) - fit_duration = timeit.default_timer() - fit_begin - return fl.common.FitRes( - parameters=params_prime, - num_examples=num_examples_train, - num_examples_ceil=num_examples_train, - fit_duration=fit_duration, - ) - - def evaluate(self, ins: fl.common.EvaluateIns) -> fl.common.EvaluateRes: - """ - - Parameters - ---------- - ins: fl.common.EvaluateIns - Parameters sent by the server to be used during testing. - - - Returns - ------- - Information the clients testing results. - - """ - weights = fl.common.parameters_to_ndarrays(ins.parameters) - - # Use provided weights to update the local model - self.set_weights(weights) - - ( - num_examples_test, - test_loss, - accuracy, - ) = test(self.model, self.test_loader, device=self.device) - print( - f"Client {self.cid} - Evaluate on {num_examples_test} samples: Average loss: {test_loss:.4f}, Accuracy: {100*accuracy:.2f}%\n" - ) - - # Return the number of evaluation examples and the evaluation result (loss) - return fl.common.EvaluateRes( - loss=float(test_loss), - num_examples=num_examples_test, - accuracy=float(accuracy), - ) diff --git a/src/py/flwr_example/quickstart_pytorch/run-clients.sh b/src/py/flwr_example/quickstart_pytorch/run-clients.sh deleted file mode 100755 index f53d63dc6168..000000000000 --- a/src/py/flwr_example/quickstart_pytorch/run-clients.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -set -e - -SERVER_ADDRESS="[::]:8080" -NUM_CLIENTS=2 - -echo "Starting $NUM_CLIENTS clients." -for ((i = 0; i < $NUM_CLIENTS; i++)) -do - echo "Starting client(cid=$i) with partition $i out of $NUM_CLIENTS clients." - python -m flwr_example.quickstart_pytorch.client \ - --cid=$i \ - --server_address=$SERVER_ADDRESS \ - --nb_clients=$NUM_CLIENTS & -done -echo "Started $NUM_CLIENTS clients." diff --git a/src/py/flwr_example/quickstart_pytorch/run-server.sh b/src/py/flwr_example/quickstart_pytorch/run-server.sh deleted file mode 100755 index 29cb8cf7caf7..000000000000 --- a/src/py/flwr_example/quickstart_pytorch/run-server.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -# Start a Flower server -python -m flwr_example.quickstart_pytorch.server diff --git a/src/py/flwr_example/quickstart_pytorch/server.py b/src/py/flwr_example/quickstart_pytorch/server.py deleted file mode 100644 index b8cc1e15aca1..000000000000 --- a/src/py/flwr_example/quickstart_pytorch/server.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - - -import flwr as fl - -if __name__ == "__main__": - fl.server.start_server(config=fl.server.ServerConfig(num_rounds=3)) diff --git a/src/py/flwr_example/quickstart_tensorflow/__init__.py b/src/py/flwr_example/quickstart_tensorflow/__init__.py deleted file mode 100644 index c3a6e19194a5..000000000000 --- a/src/py/flwr_example/quickstart_tensorflow/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -warning = """ -DEPRECATION WARNING: Example moved to `examples/quickstart_tensorflow`. - -All examples will be migrated to the `examples` directory. `flwr_example` will -be removed in a future release. -""" -print(warning) diff --git a/src/py/flwr_example/quickstart_tensorflow/client.py b/src/py/flwr_example/quickstart_tensorflow/client.py deleted file mode 100644 index 9c4aab0b33ab..000000000000 --- a/src/py/flwr_example/quickstart_tensorflow/client.py +++ /dev/null @@ -1,55 +0,0 @@ -from typing import Tuple, cast - -import numpy as np -import tensorflow as tf - -import flwr as fl - -### uncomment this if you are getting the ssl error -# ssl._create_default_https_context = ssl._create_unverified_context -### - - -def main() -> None: - # Build and compile Keras model - model = tf.keras.models.Sequential( - [ - tf.keras.layers.Flatten(input_shape=(28, 28)), - tf.keras.layers.Dense(128, activation="relu"), - tf.keras.layers.Dropout(0.2), - tf.keras.layers.Dense(10, activation="softmax"), - ] - ) - - model.compile( - optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"] - ) - - # Load MNIST data - (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() - x_train, x_test = x_train / 255.0, x_test / 255.0 - - # Implement a Flower client - class MnistClient(fl.client.NumPyClient): - def get_parameters(self, config) -> fl.common.NDArrays: - return cast(fl.common.NDArrays, model.get_weights()) - - def fit(self, parameters, config) -> Tuple[fl.common.NDArrays, int, dict]: - model.set_weights(parameters) - model.fit(x_train, y_train, epochs=1, batch_size=32) - return model.get_weights(), len(x_train), {} - - def evaluate(self, parameters, config) -> Tuple[int, int, dict]: - model.set_weights(parameters) - loss, accuracy = model.evaluate(x_test, y_test) - return loss, len(x_test), {"accuracy": accuracy} - - # Start client - fl.client.start_numpy_client( - server_address="127.0.0.1:8080", - client=MnistClient(), - ) - - -if __name__ == "__main__": - main() diff --git a/src/py/flwr_example/quickstart_tensorflow/run-clients.sh b/src/py/flwr_example/quickstart_tensorflow/run-clients.sh deleted file mode 100755 index 5747bfe5fe1b..000000000000 --- a/src/py/flwr_example/quickstart_tensorflow/run-clients.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -set -e - -SERVER_ADDRESS="[::]:8080" -NUM_CLIENTS=2 - -echo "Starting $NUM_CLIENTS clients." -for ((i = 0; i < $NUM_CLIENTS; i++)) -do - echo "Starting client(cid=$i) with partition $i out of $NUM_CLIENTS clients." - python -m flwr_example.quickstart_tensorflow.client & -done -echo "Started $NUM_CLIENTS clients." diff --git a/src/py/flwr_example/quickstart_tensorflow/run-server.sh b/src/py/flwr_example/quickstart_tensorflow/run-server.sh deleted file mode 100755 index 030167972ac0..000000000000 --- a/src/py/flwr_example/quickstart_tensorflow/run-server.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -# Start a Flower server -python -m flwr_example.quickstart_tensorflow.server diff --git a/src/py/flwr_example/quickstart_tensorflow/server.py b/src/py/flwr_example/quickstart_tensorflow/server.py deleted file mode 100644 index e621005ed277..000000000000 --- a/src/py/flwr_example/quickstart_tensorflow/server.py +++ /dev/null @@ -1,4 +0,0 @@ -import flwr as fl - -if __name__ == "__main__": - fl.server.start_server(config=fl.server.ServerConfig(num_rounds=3)) diff --git a/src/py/flwr_example/tensorflow_fashion_mnist/__init__.py b/src/py/flwr_example/tensorflow_fashion_mnist/__init__.py deleted file mode 100644 index 3370fa33e781..000000000000 --- a/src/py/flwr_example/tensorflow_fashion_mnist/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower example using TensorFlow for Fashion-MNIST image classification.""" - - -DEFAULT_SERVER_ADDRESS = "[::]:8080" diff --git a/src/py/flwr_example/tensorflow_fashion_mnist/client.py b/src/py/flwr_example/tensorflow_fashion_mnist/client.py deleted file mode 100644 index bcee031163e1..000000000000 --- a/src/py/flwr_example/tensorflow_fashion_mnist/client.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower client example using TensorFlow for Fashion-MNIST image classification.""" - - -import argparse -from typing import Dict, Tuple, cast - -import numpy as np -import tensorflow as tf - -import flwr as fl -from flwr.common import NDArrays - -from . import DEFAULT_SERVER_ADDRESS, fashion_mnist - - -class FashionMnistClient(fl.client.KerasClient): - """Flower KerasClient implementing Fashion-MNIST image classification.""" - - def __init__( - self, - model: tf.keras.Model, - xy_train: Tuple[np.ndarray, np.ndarray], - xy_test: Tuple[np.ndarray, np.ndarray], - ): - self.model = model - self.x_train, self.y_train = xy_train - self.x_test, self.y_test = xy_test - - def get_weights(self) -> NDArrays: - return cast(NDArrays, self.model.get_weights()) - - def fit( - self, weights: NDArrays, config: Dict[str, fl.common.Scalar] - ) -> Tuple[NDArrays, int, int]: - # Use provided weights to update local model - self.model.set_weights(weights) - - # Train the local model using local dataset - self.model.fit( - self.x_train, - self.y_train, - batch_size=int(config["batch_size"]), - epochs=int(config["epochs"]), - ) - - # Return the refined weights and the number of examples used for training - return self.model.get_weights(), len(self.x_train), len(self.x_train) - - def evaluate( - self, weights: NDArrays, config: Dict[str, fl.common.Scalar] - ) -> Tuple[int, float, float]: - # Update local model and evaluate on local dataset - self.model.set_weights(weights) - loss, accuracy = self.model.evaluate( - self.x_test, self.y_test, batch_size=len(self.x_test), verbose=2 - ) - - # Return number of evaluation examples and evaluation result (loss/accuracy) - return len(self.x_test), float(loss), float(accuracy) - - -def main() -> None: - """Load data, create and start FashionMnistClient.""" - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--server_address", - type=str, - default=DEFAULT_SERVER_ADDRESS, - help=f"gRPC server address (default: {DEFAULT_SERVER_ADDRESS})", - ) - parser.add_argument( - "--partition", type=int, required=True, help="Partition index (no default)" - ) - parser.add_argument( - "--clients", - type=int, - required=True, - help="Number of clients (no default)", - ) - parser.add_argument( - "--log_host", - type=str, - help="Logserver address (no default)", - ) - args = parser.parse_args() - - # Configure logger - fl.common.logger.configure(f"client_{args.partition}", host=args.log_host) - - # Load model and data - model = fashion_mnist.load_model() - xy_train, xy_test = fashion_mnist.load_data( - partition=args.partition, num_partitions=args.clients - ) - - # Start client - client = FashionMnistClient(model, xy_train, xy_test) - fl.client.start_keras_client(args.server_address, client) - - -if __name__ == "__main__": - main() diff --git a/src/py/flwr_example/tensorflow_fashion_mnist/download.py b/src/py/flwr_example/tensorflow_fashion_mnist/download.py deleted file mode 100644 index c2f0eb580cf0..000000000000 --- a/src/py/flwr_example/tensorflow_fashion_mnist/download.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Helper script to download CIFAR-10/100.""" - - -from logging import INFO - -import tensorflow as tf - -from flwr.common.logger import log - -tf.get_logger().setLevel("ERROR") - - -def main() -> None: - """Download data.""" - log(INFO, "Download Fashion-MNIST") - tf.keras.datasets.fashion_mnist.load_data() - - -if __name__ == "__main__": - main() diff --git a/src/py/flwr_example/tensorflow_fashion_mnist/fashion_mnist.py b/src/py/flwr_example/tensorflow_fashion_mnist/fashion_mnist.py deleted file mode 100644 index 6c7ad2eead79..000000000000 --- a/src/py/flwr_example/tensorflow_fashion_mnist/fashion_mnist.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower client example using TensorFlow for Fashion-MNIST image classification.""" - - -from typing import Tuple, cast - -import numpy as np -import tensorflow as tf - -tf.get_logger().setLevel("ERROR") - -SEED = 2020 - - -def load_model(input_shape: Tuple[int, int, int] = (28, 28, 1)) -> tf.keras.Model: - """Load model for Fashion-MNIST.""" - # Kernel initializer - kernel_initializer = tf.keras.initializers.glorot_uniform(seed=SEED) - - # Architecture - inputs = tf.keras.layers.Input(shape=input_shape) - layers = tf.keras.layers.Conv2D( - 32, - kernel_size=(5, 5), - strides=(1, 1), - kernel_initializer=kernel_initializer, - padding="same", - activation="relu", - )(inputs) - layers = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2))(layers) - layers = tf.keras.layers.Conv2D( - 64, - kernel_size=(5, 5), - strides=(1, 1), - kernel_initializer=kernel_initializer, - padding="same", - activation="relu", - )(layers) - layers = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2))(layers) - layers = tf.keras.layers.Flatten()(layers) - layers = tf.keras.layers.Dense( - 512, kernel_initializer=kernel_initializer, activation="relu" - )(layers) - - outputs = tf.keras.layers.Dense( - 10, kernel_initializer=kernel_initializer, activation="softmax" - )(layers) - - model = tf.keras.Model(inputs=inputs, outputs=outputs) - - # Compile model - model.compile( - optimizer=tf.keras.optimizers.Adam(), - loss=tf.keras.losses.categorical_crossentropy, - metrics=["accuracy"], - ) - return model - - -def load_data( - partition: int, num_partitions: int -) -> Tuple[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]: - """Load partition of randomly shuffled Fashion-MNIST subset.""" - # Load training and test data (ignoring the test data for now) - (x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data() - - # Take a subset - x_train, y_train = shuffle(x_train, y_train, seed=SEED) - x_test, y_test = shuffle(x_test, y_test, seed=SEED) - - x_train, y_train = get_partition(x_train, y_train, partition, num_partitions) - x_test, y_test = get_partition(x_test, y_test, partition, num_partitions) - - # Adjust x sets shape for model - x_train = adjust_x_shape(x_train) - x_test = adjust_x_shape(x_test) - - # Normalize data - x_train = x_train.astype("float32") / 255.0 - x_test = x_test.astype("float32") / 255.0 - - # Convert class vectors to one-hot encoded labels - y_train = tf.keras.utils.to_categorical(y_train, 10) - y_test = tf.keras.utils.to_categorical(y_test, 10) - - return (x_train, y_train), (x_test, y_test) - - -def adjust_x_shape(nda: np.ndarray) -> np.ndarray: - """Turn shape (x, y, z) into (x, y, z, 1).""" - nda_adjusted = np.reshape(nda, (nda.shape[0], nda.shape[1], nda.shape[2], 1)) - return cast(np.ndarray, nda_adjusted) - - -def shuffle( - x_orig: np.ndarray, y_orig: np.ndarray, seed: int -) -> Tuple[np.ndarray, np.ndarray]: - """Shuffle x and y in the same way.""" - np.random.seed(seed) - idx = np.random.permutation(len(x_orig)) - return x_orig[idx], y_orig[idx] - - -def get_partition( - x_orig: np.ndarray, y_orig: np.ndarray, partition: int, num_clients: int -) -> Tuple[np.ndarray, np.ndarray]: - """Return a single partition of an equally partitioned dataset.""" - step_size = len(x_orig) / num_clients - start_index = int(step_size * partition) - end_index = int(start_index + step_size) - return x_orig[start_index:end_index], y_orig[start_index:end_index] diff --git a/src/py/flwr_example/tensorflow_fashion_mnist/fashion_mnist_test.py b/src/py/flwr_example/tensorflow_fashion_mnist/fashion_mnist_test.py deleted file mode 100644 index f6b922b27eab..000000000000 --- a/src/py/flwr_example/tensorflow_fashion_mnist/fashion_mnist_test.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for Fashion-MNIST.""" - - -import numpy as np - -from .fashion_mnist import shuffle - - -def test_shuffle() -> None: - """Test if shuffle is deterministic depending on the provided seed.""" - # Prepare - x_tt = np.arange(8) - y_tt = np.arange(8) - - x_expected_2019 = np.array([1, 4, 3, 6, 7, 5, 2, 0]) - y_expected_2019 = np.array([1, 4, 3, 6, 7, 5, 2, 0]) - - x_expected_2020 = np.array([6, 2, 1, 4, 5, 3, 7, 0]) - y_expected_2020 = np.array([6, 2, 1, 4, 5, 3, 7, 0]) - - # Execute & assert - for _ in range(3): - x_actual, y_actual = shuffle(x_tt, y_tt, seed=2019) - np.testing.assert_array_equal(x_expected_2019, x_actual) - np.testing.assert_array_equal(y_expected_2019, y_actual) - - for _ in range(3): - x_actual, y_actual = shuffle(x_tt, y_tt, seed=2020) - np.testing.assert_array_equal(x_expected_2020, x_actual) - np.testing.assert_array_equal(y_expected_2020, y_actual) diff --git a/src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh b/src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh deleted file mode 100755 index 732688b9be08..000000000000 --- a/src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -set -e - -SERVER_ADDRESS="[::]:8080" -NUM_CLIENTS=10 - -echo "Starting $NUM_CLIENTS clients." -for ((i = 0; i < $NUM_CLIENTS; i++)) -do - echo "Starting client(cid=$i) with partition $i out of $NUM_CLIENTS clients." - python -m flwr_example.tensorflow_fashion_mnist.client \ - --cid=$i \ - --partition=$i \ - --clients=$NUM_CLIENTS \ - --server_address=$SERVER_ADDRESS & -done -echo "Started $NUM_CLIENTS clients." diff --git a/src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh b/src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh deleted file mode 100755 index d80f3c8b30ce..000000000000 --- a/src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash - -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -# Start a Flower server -python -m flwr_example.tensorflow_fashion_mnist.server \ - --rounds=5 \ - --sample_fraction=0.5 \ - --min_sample_size=5 \ - --min_num_clients=5 diff --git a/src/py/flwr_example/tensorflow_fashion_mnist/server.py b/src/py/flwr_example/tensorflow_fashion_mnist/server.py deleted file mode 100644 index f16da1e4bc93..000000000000 --- a/src/py/flwr_example/tensorflow_fashion_mnist/server.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Minimal example on how to start a simple Flower server.""" - - -import argparse -from typing import Callable, Dict, Optional, Tuple - -import numpy as np - -import flwr as fl - -from . import DEFAULT_SERVER_ADDRESS, fashion_mnist - - -def main() -> None: - """Start server and train five rounds.""" - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--server_address", - type=str, - default=DEFAULT_SERVER_ADDRESS, - help=f"gRPC server address (default: {DEFAULT_SERVER_ADDRESS})", - ) - parser.add_argument( - "--rounds", - type=int, - default=1, - help="Number of rounds of federated learning (default: 1)", - ) - parser.add_argument( - "--sample_fraction", - type=float, - default=0.1, - help="Fraction of available clients used for fit/evaluate (default: 0.1)", - ) - parser.add_argument( - "--min_sample_size", - type=int, - default=1, - help="Minimum number of clients used for fit/evaluate (default: 1)", - ) - parser.add_argument( - "--min_num_clients", - type=int, - default=1, - help="Minimum number of available clients required for sampling (default: 1)", - ) - parser.add_argument( - "--log_host", - type=str, - help="Logserver address (no default)", - ) - args = parser.parse_args() - - # Load evaluation data - _, xy_test = fashion_mnist.load_data(partition=0, num_partitions=1) - - # Create strategy - strategy = fl.server.strategy.FedAvg( - fraction_fit=args.sample_fraction, - min_fit_clients=args.min_sample_size, - min_available_clients=args.min_num_clients, - evaluate_fn=get_evaluate_fn(xy_test=xy_test), - on_fit_config_fn=fit_config, - ) - - # Configure logger and start server - fl.common.logger.configure("server", host=args.log_host) - fl.server.start_server( - args.server_address, - config={"num_rounds": args.rounds}, - strategy=strategy, - ) - - -def fit_config(server_round: int) -> Dict[str, fl.common.Scalar]: - """Return a configuration with static batch size and (local) epochs.""" - config: Dict[str, fl.common.Scalar] = { - "epoch_global": str(server_round), - "epochs": str(1), - "batch_size": str(64), - } - return config - - -def get_evaluate_fn( - xy_test: Tuple[np.ndarray, np.ndarray] -) -> Callable[[fl.common.NDArrays], Optional[Tuple[float, float]]]: - """Return an evaluation function for centralized evaluation.""" - - def evaluate(weights: fl.common.NDArrays) -> Optional[Tuple[float, float]]: - """Use the entire Fashion-MNIST test set for evaluation.""" - model = fashion_mnist.load_model() - model.set_weights(weights) - loss, acc = model.evaluate(xy_test[0], xy_test[1], batch_size=len(xy_test)) - return float(loss), float(acc) - - return evaluate - - -if __name__ == "__main__": - main() From a9161a47d41e35b41cbfd03dbe27bedc4ad79bd8 Mon Sep 17 00:00:00 2001 From: Yan Gao Date: Tue, 26 Mar 2024 16:19:36 +0000 Subject: [PATCH 36/57] Specify required version of transformers package (#3172) --- examples/llm-flowertune/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/llm-flowertune/requirements.txt b/examples/llm-flowertune/requirements.txt index c7ff57b403f7..196531c99b92 100644 --- a/examples/llm-flowertune/requirements.txt +++ b/examples/llm-flowertune/requirements.txt @@ -6,3 +6,4 @@ bitsandbytes==0.41.3 scipy==1.11.2 peft==0.4.0 fschat[model_worker,webui]==0.2.35 +transformers==4.38.1 From 907ff225a7d2047e21d6f72aa5c1b9b5822dda2c Mon Sep 17 00:00:00 2001 From: "Daniel J. Beutel" Date: Tue, 26 Mar 2024 17:34:42 +0100 Subject: [PATCH 37/57] Create Ping endpoint (#3171) --- src/proto/flwr/proto/fleet.proto | 5 +++ src/py/flwr/proto/fleet_pb2.py | 34 +++++++++++-------- src/py/flwr/proto/fleet_pb2.pyi | 25 ++++++++++++++ src/py/flwr/proto/fleet_pb2_grpc.py | 33 ++++++++++++++++++ src/py/flwr/proto/fleet_pb2_grpc.pyi | 10 ++++++ .../fleet/grpc_rere/fleet_servicer.py | 12 ++++++- .../fleet/message_handler/message_handler.py | 10 ++++++ 7 files changed, 113 insertions(+), 16 deletions(-) diff --git a/src/proto/flwr/proto/fleet.proto b/src/proto/flwr/proto/fleet.proto index c900a3b1148d..fcb301181f5a 100644 --- a/src/proto/flwr/proto/fleet.proto +++ b/src/proto/flwr/proto/fleet.proto @@ -23,6 +23,7 @@ import "flwr/proto/task.proto"; service Fleet { rpc CreateNode(CreateNodeRequest) returns (CreateNodeResponse) {} rpc DeleteNode(DeleteNodeRequest) returns (DeleteNodeResponse) {} + rpc Ping(PingRequest) returns (PingResponse) {} // Retrieve one or more tasks, if possible // @@ -43,6 +44,10 @@ message CreateNodeResponse { Node node = 1; } message DeleteNodeRequest { Node node = 1; } message DeleteNodeResponse {} +// Ping messages +message PingRequest { Node node = 1; } +message PingResponse { bool success = 1; } + // PullTaskIns messages message PullTaskInsRequest { Node node = 1; diff --git a/src/py/flwr/proto/fleet_pb2.py b/src/py/flwr/proto/fleet_pb2.py index e8443c296f0c..dbf64fb850a5 100644 --- a/src/py/flwr/proto/fleet_pb2.py +++ b/src/py/flwr/proto/fleet_pb2.py @@ -16,7 +16,7 @@ from flwr.proto import task_pb2 as flwr_dot_proto_dot_task__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x16\x66lwr/proto/fleet.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x15\x66lwr/proto/task.proto\"\x13\n\x11\x43reateNodeRequest\"4\n\x12\x43reateNodeResponse\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"3\n\x11\x44\x65leteNodeRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"\x14\n\x12\x44\x65leteNodeResponse\"F\n\x12PullTaskInsRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"k\n\x13PullTaskInsResponse\x12(\n\treconnect\x18\x01 \x01(\x0b\x32\x15.flwr.proto.Reconnect\x12*\n\rtask_ins_list\x18\x02 \x03(\x0b\x32\x13.flwr.proto.TaskIns\"@\n\x12PushTaskResRequest\x12*\n\rtask_res_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskRes\"\xae\x01\n\x13PushTaskResResponse\x12(\n\treconnect\x18\x01 \x01(\x0b\x32\x15.flwr.proto.Reconnect\x12=\n\x07results\x18\x02 \x03(\x0b\x32,.flwr.proto.PushTaskResResponse.ResultsEntry\x1a.\n\x0cResultsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\r:\x02\x38\x01\"\x1e\n\tReconnect\x12\x11\n\treconnect\x18\x01 \x01(\x04\x32\xc9\x02\n\x05\x46leet\x12M\n\nCreateNode\x12\x1d.flwr.proto.CreateNodeRequest\x1a\x1e.flwr.proto.CreateNodeResponse\"\x00\x12M\n\nDeleteNode\x12\x1d.flwr.proto.DeleteNodeRequest\x1a\x1e.flwr.proto.DeleteNodeResponse\"\x00\x12P\n\x0bPullTaskIns\x12\x1e.flwr.proto.PullTaskInsRequest\x1a\x1f.flwr.proto.PullTaskInsResponse\"\x00\x12P\n\x0bPushTaskRes\x12\x1e.flwr.proto.PushTaskResRequest\x1a\x1f.flwr.proto.PushTaskResResponse\"\x00\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x16\x66lwr/proto/fleet.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x15\x66lwr/proto/task.proto\"\x13\n\x11\x43reateNodeRequest\"4\n\x12\x43reateNodeResponse\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"3\n\x11\x44\x65leteNodeRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"\x14\n\x12\x44\x65leteNodeResponse\"-\n\x0bPingRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"\x1f\n\x0cPingResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\"F\n\x12PullTaskInsRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"k\n\x13PullTaskInsResponse\x12(\n\treconnect\x18\x01 \x01(\x0b\x32\x15.flwr.proto.Reconnect\x12*\n\rtask_ins_list\x18\x02 \x03(\x0b\x32\x13.flwr.proto.TaskIns\"@\n\x12PushTaskResRequest\x12*\n\rtask_res_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskRes\"\xae\x01\n\x13PushTaskResResponse\x12(\n\treconnect\x18\x01 \x01(\x0b\x32\x15.flwr.proto.Reconnect\x12=\n\x07results\x18\x02 \x03(\x0b\x32,.flwr.proto.PushTaskResResponse.ResultsEntry\x1a.\n\x0cResultsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\r:\x02\x38\x01\"\x1e\n\tReconnect\x12\x11\n\treconnect\x18\x01 \x01(\x04\x32\x86\x03\n\x05\x46leet\x12M\n\nCreateNode\x12\x1d.flwr.proto.CreateNodeRequest\x1a\x1e.flwr.proto.CreateNodeResponse\"\x00\x12M\n\nDeleteNode\x12\x1d.flwr.proto.DeleteNodeRequest\x1a\x1e.flwr.proto.DeleteNodeResponse\"\x00\x12;\n\x04Ping\x12\x17.flwr.proto.PingRequest\x1a\x18.flwr.proto.PingResponse\"\x00\x12P\n\x0bPullTaskIns\x12\x1e.flwr.proto.PullTaskInsRequest\x1a\x1f.flwr.proto.PullTaskInsResponse\"\x00\x12P\n\x0bPushTaskRes\x12\x1e.flwr.proto.PushTaskResRequest\x1a\x1f.flwr.proto.PushTaskResResponse\"\x00\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -33,18 +33,22 @@ _globals['_DELETENODEREQUEST']._serialized_end=210 _globals['_DELETENODERESPONSE']._serialized_start=212 _globals['_DELETENODERESPONSE']._serialized_end=232 - _globals['_PULLTASKINSREQUEST']._serialized_start=234 - _globals['_PULLTASKINSREQUEST']._serialized_end=304 - _globals['_PULLTASKINSRESPONSE']._serialized_start=306 - _globals['_PULLTASKINSRESPONSE']._serialized_end=413 - _globals['_PUSHTASKRESREQUEST']._serialized_start=415 - _globals['_PUSHTASKRESREQUEST']._serialized_end=479 - _globals['_PUSHTASKRESRESPONSE']._serialized_start=482 - _globals['_PUSHTASKRESRESPONSE']._serialized_end=656 - _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_start=610 - _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_end=656 - _globals['_RECONNECT']._serialized_start=658 - _globals['_RECONNECT']._serialized_end=688 - _globals['_FLEET']._serialized_start=691 - _globals['_FLEET']._serialized_end=1020 + _globals['_PINGREQUEST']._serialized_start=234 + _globals['_PINGREQUEST']._serialized_end=279 + _globals['_PINGRESPONSE']._serialized_start=281 + _globals['_PINGRESPONSE']._serialized_end=312 + _globals['_PULLTASKINSREQUEST']._serialized_start=314 + _globals['_PULLTASKINSREQUEST']._serialized_end=384 + _globals['_PULLTASKINSRESPONSE']._serialized_start=386 + _globals['_PULLTASKINSRESPONSE']._serialized_end=493 + _globals['_PUSHTASKRESREQUEST']._serialized_start=495 + _globals['_PUSHTASKRESREQUEST']._serialized_end=559 + _globals['_PUSHTASKRESRESPONSE']._serialized_start=562 + _globals['_PUSHTASKRESRESPONSE']._serialized_end=736 + _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_start=690 + _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_end=736 + _globals['_RECONNECT']._serialized_start=738 + _globals['_RECONNECT']._serialized_end=768 + _globals['_FLEET']._serialized_start=771 + _globals['_FLEET']._serialized_end=1161 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/fleet_pb2.pyi b/src/py/flwr/proto/fleet_pb2.pyi index 86bc358858d2..39edb61ca0d7 100644 --- a/src/py/flwr/proto/fleet_pb2.pyi +++ b/src/py/flwr/proto/fleet_pb2.pyi @@ -53,6 +53,31 @@ class DeleteNodeResponse(google.protobuf.message.Message): ) -> None: ... global___DeleteNodeResponse = DeleteNodeResponse +class PingRequest(google.protobuf.message.Message): + """Ping messages""" + DESCRIPTOR: google.protobuf.descriptor.Descriptor + NODE_FIELD_NUMBER: builtins.int + @property + def node(self) -> flwr.proto.node_pb2.Node: ... + def __init__(self, + *, + node: typing.Optional[flwr.proto.node_pb2.Node] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["node",b"node"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["node",b"node"]) -> None: ... +global___PingRequest = PingRequest + +class PingResponse(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + SUCCESS_FIELD_NUMBER: builtins.int + success: builtins.bool + def __init__(self, + *, + success: builtins.bool = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["success",b"success"]) -> None: ... +global___PingResponse = PingResponse + class PullTaskInsRequest(google.protobuf.message.Message): """PullTaskIns messages""" DESCRIPTOR: google.protobuf.descriptor.Descriptor diff --git a/src/py/flwr/proto/fleet_pb2_grpc.py b/src/py/flwr/proto/fleet_pb2_grpc.py index 2b53ec43e851..c31a4ec73f0e 100644 --- a/src/py/flwr/proto/fleet_pb2_grpc.py +++ b/src/py/flwr/proto/fleet_pb2_grpc.py @@ -24,6 +24,11 @@ def __init__(self, channel): request_serializer=flwr_dot_proto_dot_fleet__pb2.DeleteNodeRequest.SerializeToString, response_deserializer=flwr_dot_proto_dot_fleet__pb2.DeleteNodeResponse.FromString, ) + self.Ping = channel.unary_unary( + '/flwr.proto.Fleet/Ping', + request_serializer=flwr_dot_proto_dot_fleet__pb2.PingRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_fleet__pb2.PingResponse.FromString, + ) self.PullTaskIns = channel.unary_unary( '/flwr.proto.Fleet/PullTaskIns', request_serializer=flwr_dot_proto_dot_fleet__pb2.PullTaskInsRequest.SerializeToString, @@ -51,6 +56,12 @@ def DeleteNode(self, request, context): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def Ping(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def PullTaskIns(self, request, context): """Retrieve one or more tasks, if possible @@ -82,6 +93,11 @@ def add_FleetServicer_to_server(servicer, server): request_deserializer=flwr_dot_proto_dot_fleet__pb2.DeleteNodeRequest.FromString, response_serializer=flwr_dot_proto_dot_fleet__pb2.DeleteNodeResponse.SerializeToString, ), + 'Ping': grpc.unary_unary_rpc_method_handler( + servicer.Ping, + request_deserializer=flwr_dot_proto_dot_fleet__pb2.PingRequest.FromString, + response_serializer=flwr_dot_proto_dot_fleet__pb2.PingResponse.SerializeToString, + ), 'PullTaskIns': grpc.unary_unary_rpc_method_handler( servicer.PullTaskIns, request_deserializer=flwr_dot_proto_dot_fleet__pb2.PullTaskInsRequest.FromString, @@ -136,6 +152,23 @@ def DeleteNode(request, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + @staticmethod + def Ping(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.Fleet/Ping', + flwr_dot_proto_dot_fleet__pb2.PingRequest.SerializeToString, + flwr_dot_proto_dot_fleet__pb2.PingResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + @staticmethod def PullTaskIns(request, target, diff --git a/src/py/flwr/proto/fleet_pb2_grpc.pyi b/src/py/flwr/proto/fleet_pb2_grpc.pyi index cfa83f737439..33ba9440793a 100644 --- a/src/py/flwr/proto/fleet_pb2_grpc.pyi +++ b/src/py/flwr/proto/fleet_pb2_grpc.pyi @@ -16,6 +16,10 @@ class FleetStub: flwr.proto.fleet_pb2.DeleteNodeRequest, flwr.proto.fleet_pb2.DeleteNodeResponse] + Ping: grpc.UnaryUnaryMultiCallable[ + flwr.proto.fleet_pb2.PingRequest, + flwr.proto.fleet_pb2.PingResponse] + PullTaskIns: grpc.UnaryUnaryMultiCallable[ flwr.proto.fleet_pb2.PullTaskInsRequest, flwr.proto.fleet_pb2.PullTaskInsResponse] @@ -46,6 +50,12 @@ class FleetServicer(metaclass=abc.ABCMeta): context: grpc.ServicerContext, ) -> flwr.proto.fleet_pb2.DeleteNodeResponse: ... + @abc.abstractmethod + def Ping(self, + request: flwr.proto.fleet_pb2.PingRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.fleet_pb2.PingResponse: ... + @abc.abstractmethod def PullTaskIns(self, request: flwr.proto.fleet_pb2.PullTaskInsRequest, diff --git a/src/py/flwr/server/superlink/fleet/grpc_rere/fleet_servicer.py b/src/py/flwr/server/superlink/fleet/grpc_rere/fleet_servicer.py index 278474477379..eb8dd800ea37 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_rere/fleet_servicer.py +++ b/src/py/flwr/server/superlink/fleet/grpc_rere/fleet_servicer.py @@ -15,7 +15,7 @@ """Fleet API gRPC request-response servicer.""" -from logging import INFO +from logging import DEBUG, INFO import grpc @@ -26,6 +26,8 @@ CreateNodeResponse, DeleteNodeRequest, DeleteNodeResponse, + PingRequest, + PingResponse, PullTaskInsRequest, PullTaskInsResponse, PushTaskResRequest, @@ -61,6 +63,14 @@ def DeleteNode( state=self.state_factory.state(), ) + def Ping(self, request: PingRequest, context: grpc.ServicerContext) -> PingResponse: + """.""" + log(DEBUG, "FleetServicer.Ping") + return message_handler.ping( + request=request, + state=self.state_factory.state(), + ) + def PullTaskIns( self, request: PullTaskInsRequest, context: grpc.ServicerContext ) -> PullTaskInsResponse: diff --git a/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py b/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py index c99a7854d53a..2e696dde78e1 100644 --- a/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py +++ b/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py @@ -23,6 +23,8 @@ CreateNodeResponse, DeleteNodeRequest, DeleteNodeResponse, + PingRequest, + PingResponse, PullTaskInsRequest, PullTaskInsResponse, PushTaskResRequest, @@ -55,6 +57,14 @@ def delete_node(request: DeleteNodeRequest, state: State) -> DeleteNodeResponse: return DeleteNodeResponse() +def ping( + request: PingRequest, # pylint: disable=unused-argument + state: State, # pylint: disable=unused-argument +) -> PingResponse: + """.""" + return PingResponse(success=True) + + def pull_task_ins(request: PullTaskInsRequest, state: State) -> PullTaskInsResponse: """Pull TaskIns handler.""" # Get node_id if client node is not anonymous From 3acdf47291f67da70447d111e5822e8404046fb7 Mon Sep 17 00:00:00 2001 From: Javier Date: Tue, 26 Mar 2024 19:57:51 +0000 Subject: [PATCH 38/57] Make `ttl` a `float` (#3166) --- examples/app-pytorch/client_low_level.py | 6 ++-- examples/app-pytorch/server_custom.py | 3 +- examples/app-pytorch/server_low_level.py | 12 +++++-- src/proto/flwr/proto/task.proto | 2 +- src/py/flwr/client/client_app.py | 8 ++--- src/py/flwr/client/grpc_client/connection.py | 3 +- .../client/grpc_client/connection_test.py | 6 ++-- .../client/message_handler/message_handler.py | 4 +-- .../message_handler/message_handler_test.py | 13 ++++---- .../mod/secure_aggregation/secaggplus_mod.py | 2 +- .../secure_aggregation/secaggplus_mod_test.py | 13 ++++++-- src/py/flwr/client/mod/utils_test.py | 3 +- src/py/flwr/common/__init__.py | 2 ++ src/py/flwr/common/message.py | 32 +++++++++++-------- src/py/flwr/common/serde_test.py | 2 +- src/py/flwr/proto/task_pb2.py | 2 +- src/py/flwr/proto/task_pb2.pyi | 4 +-- .../flwr/server/compat/driver_client_proxy.py | 3 +- src/py/flwr/server/driver/driver.py | 11 ++++--- src/py/flwr/server/driver/driver_test.py | 12 ++++--- .../fleet/vce/backend/raybackend_test.py | 3 +- .../superlink/fleet/vce/vce_api_test.py | 10 ++++-- .../server/superlink/state/in_memory_state.py | 10 ++---- .../server/superlink/state/sqlite_state.py | 14 +++----- .../flwr/server/superlink/state/state_test.py | 9 +++--- src/py/flwr/server/utils/validator.py | 18 +++++++---- src/py/flwr/server/utils/validator_test.py | 3 ++ .../flwr/server/workflow/default_workflows.py | 8 ++--- .../secure_aggregation/secaggplus_workflow.py | 9 +++--- .../ray_transport/ray_client_proxy.py | 4 +-- .../ray_transport/ray_client_proxy_test.py | 3 +- 31 files changed, 137 insertions(+), 97 deletions(-) diff --git a/examples/app-pytorch/client_low_level.py b/examples/app-pytorch/client_low_level.py index feea1ee658fe..19268ff84ba4 100644 --- a/examples/app-pytorch/client_low_level.py +++ b/examples/app-pytorch/client_low_level.py @@ -20,16 +20,16 @@ def hello_world_mod(msg, ctx, call_next) -> Message: @app.train() def train(msg: Message, ctx: Context): print("`train` is not implemented, echoing original message") - return msg.create_reply(msg.content, ttl="") + return msg.create_reply(msg.content) @app.evaluate() def eval(msg: Message, ctx: Context): print("`evaluate` is not implemented, echoing original message") - return msg.create_reply(msg.content, ttl="") + return msg.create_reply(msg.content) @app.query() def query(msg: Message, ctx: Context): print("`query` is not implemented, echoing original message") - return msg.create_reply(msg.content, ttl="") + return msg.create_reply(msg.content) diff --git a/examples/app-pytorch/server_custom.py b/examples/app-pytorch/server_custom.py index 0c2851e2afee..ba9cdb11d694 100644 --- a/examples/app-pytorch/server_custom.py +++ b/examples/app-pytorch/server_custom.py @@ -13,6 +13,7 @@ Message, MessageType, Metrics, + DEFAULT_TTL, ) from flwr.common.recordset_compat import fitins_to_recordset, recordset_to_fitres from flwr.server import Driver, History @@ -89,7 +90,7 @@ def main(driver: Driver, context: Context) -> None: message_type=MessageType.TRAIN, dst_node_id=node_id, group_id=str(server_round), - ttl="", + ttl=DEFAULT_TTL, ) messages.append(message) diff --git a/examples/app-pytorch/server_low_level.py b/examples/app-pytorch/server_low_level.py index 560babac1b95..7ab79a4a04c8 100644 --- a/examples/app-pytorch/server_low_level.py +++ b/examples/app-pytorch/server_low_level.py @@ -3,7 +3,15 @@ import time import flwr as fl -from flwr.common import Context, NDArrays, Message, MessageType, Metrics, RecordSet +from flwr.common import ( + Context, + NDArrays, + Message, + MessageType, + Metrics, + RecordSet, + DEFAULT_TTL, +) from flwr.server import Driver @@ -30,7 +38,7 @@ def main(driver: Driver, context: Context) -> None: message_type=MessageType.TRAIN, dst_node_id=node_id, group_id=str(server_round), - ttl="", + ttl=DEFAULT_TTL, ) messages.append(message) diff --git a/src/proto/flwr/proto/task.proto b/src/proto/flwr/proto/task.proto index 423df76f1335..4c86ebae9562 100644 --- a/src/proto/flwr/proto/task.proto +++ b/src/proto/flwr/proto/task.proto @@ -27,7 +27,7 @@ message Task { Node consumer = 2; string created_at = 3; string delivered_at = 4; - string ttl = 5; + double ttl = 5; repeated string ancestry = 6; string task_type = 7; RecordSet recordset = 8; diff --git a/src/py/flwr/client/client_app.py b/src/py/flwr/client/client_app.py index ad7a01326991..0b56219807c6 100644 --- a/src/py/flwr/client/client_app.py +++ b/src/py/flwr/client/client_app.py @@ -115,7 +115,7 @@ def train(self) -> Callable[[ClientAppCallable], ClientAppCallable]: >>> def train(message: Message, context: Context) -> Message: >>> print("ClientApp training running") >>> # Create and return an echo reply message - >>> return message.create_reply(content=message.content(), ttl="") + >>> return message.create_reply(content=message.content()) """ def train_decorator(train_fn: ClientAppCallable) -> ClientAppCallable: @@ -143,7 +143,7 @@ def evaluate(self) -> Callable[[ClientAppCallable], ClientAppCallable]: >>> def evaluate(message: Message, context: Context) -> Message: >>> print("ClientApp evaluation running") >>> # Create and return an echo reply message - >>> return message.create_reply(content=message.content(), ttl="") + >>> return message.create_reply(content=message.content()) """ def evaluate_decorator(evaluate_fn: ClientAppCallable) -> ClientAppCallable: @@ -171,7 +171,7 @@ def query(self) -> Callable[[ClientAppCallable], ClientAppCallable]: >>> def query(message: Message, context: Context) -> Message: >>> print("ClientApp query running") >>> # Create and return an echo reply message - >>> return message.create_reply(content=message.content(), ttl="") + >>> return message.create_reply(content=message.content()) """ def query_decorator(query_fn: ClientAppCallable) -> ClientAppCallable: @@ -218,7 +218,7 @@ def _registration_error(fn_name: str) -> ValueError: >>> print("ClientApp {fn_name} running") >>> # Create and return an echo reply message >>> return message.create_reply( - >>> content=message.content(), ttl="" + >>> content=message.content() >>> ) """, ) diff --git a/src/py/flwr/client/grpc_client/connection.py b/src/py/flwr/client/grpc_client/connection.py index 163a58542c9e..4431b53d2592 100644 --- a/src/py/flwr/client/grpc_client/connection.py +++ b/src/py/flwr/client/grpc_client/connection.py @@ -23,6 +23,7 @@ from typing import Callable, Iterator, Optional, Tuple, Union, cast from flwr.common import ( + DEFAULT_TTL, GRPC_MAX_MESSAGE_LENGTH, ConfigsRecord, Message, @@ -180,7 +181,7 @@ def receive() -> Message: dst_node_id=0, reply_to_message="", group_id="", - ttl="", + ttl=DEFAULT_TTL, message_type=message_type, ), content=recordset, diff --git a/src/py/flwr/client/grpc_client/connection_test.py b/src/py/flwr/client/grpc_client/connection_test.py index b7737f511a2a..061e7d4377a0 100644 --- a/src/py/flwr/client/grpc_client/connection_test.py +++ b/src/py/flwr/client/grpc_client/connection_test.py @@ -23,7 +23,7 @@ import grpc -from flwr.common import ConfigsRecord, Message, Metadata, RecordSet +from flwr.common import DEFAULT_TTL, ConfigsRecord, Message, Metadata, RecordSet from flwr.common import recordset_compat as compat from flwr.common.constant import MessageTypeLegacy from flwr.common.retry_invoker import RetryInvoker, exponential @@ -50,7 +50,7 @@ dst_node_id=0, reply_to_message="", group_id="", - ttl="", + ttl=DEFAULT_TTL, message_type=MessageTypeLegacy.GET_PROPERTIES, ), content=compat.getpropertiesres_to_recordset( @@ -65,7 +65,7 @@ dst_node_id=0, reply_to_message="", group_id="", - ttl="", + ttl=DEFAULT_TTL, message_type="reconnect", ), content=RecordSet(configs_records={"config": ConfigsRecord({"reason": 0})}), diff --git a/src/py/flwr/client/message_handler/message_handler.py b/src/py/flwr/client/message_handler/message_handler.py index 9a5d70b1ac4d..87014f436cf7 100644 --- a/src/py/flwr/client/message_handler/message_handler.py +++ b/src/py/flwr/client/message_handler/message_handler.py @@ -81,7 +81,7 @@ def handle_control_message(message: Message) -> Tuple[Optional[Message], int]: reason = cast(int, disconnect_msg.disconnect_res.reason) recordset = RecordSet() recordset.configs_records["config"] = ConfigsRecord({"reason": reason}) - out_message = message.create_reply(recordset, ttl="") + out_message = message.create_reply(recordset) # Return TaskRes and sleep duration return out_message, sleep_duration @@ -143,7 +143,7 @@ def handle_legacy_message_from_msgtype( raise ValueError(f"Invalid message type: {message_type}") # Return Message - return message.create_reply(out_recordset, ttl="") + return message.create_reply(out_recordset) def _reconnect( diff --git a/src/py/flwr/client/message_handler/message_handler_test.py b/src/py/flwr/client/message_handler/message_handler_test.py index eaf16f7dc993..e3f6487421cc 100644 --- a/src/py/flwr/client/message_handler/message_handler_test.py +++ b/src/py/flwr/client/message_handler/message_handler_test.py @@ -23,6 +23,7 @@ from flwr.client import Client from flwr.client.typing import ClientFn from flwr.common import ( + DEFAULT_TTL, Code, Context, EvaluateIns, @@ -131,7 +132,7 @@ def test_client_without_get_properties() -> None: src_node_id=0, dst_node_id=1123, reply_to_message="", - ttl="", + ttl=DEFAULT_TTL, message_type=MessageTypeLegacy.GET_PROPERTIES, ), content=recordset, @@ -161,7 +162,7 @@ def test_client_without_get_properties() -> None: src_node_id=1123, dst_node_id=0, reply_to_message=message.metadata.message_id, - ttl="", + ttl=DEFAULT_TTL, message_type=MessageTypeLegacy.GET_PROPERTIES, ), content=expected_rs, @@ -184,7 +185,7 @@ def test_client_with_get_properties() -> None: src_node_id=0, dst_node_id=1123, reply_to_message="", - ttl="", + ttl=DEFAULT_TTL, message_type=MessageTypeLegacy.GET_PROPERTIES, ), content=recordset, @@ -214,7 +215,7 @@ def test_client_with_get_properties() -> None: src_node_id=1123, dst_node_id=0, reply_to_message=message.metadata.message_id, - ttl="", + ttl=DEFAULT_TTL, message_type=MessageTypeLegacy.GET_PROPERTIES, ), content=expected_rs, @@ -237,7 +238,7 @@ def setUp(self) -> None: dst_node_id=20, reply_to_message="", group_id="group1", - ttl="60", + ttl=DEFAULT_TTL, message_type="mock", ) self.valid_out_metadata = Metadata( @@ -247,7 +248,7 @@ def setUp(self) -> None: dst_node_id=10, reply_to_message="qwerty", group_id="group1", - ttl="60", + ttl=DEFAULT_TTL, message_type="mock", ) self.common_content = RecordSet() diff --git a/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py b/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py index 989d5f6e1361..5b196ad84321 100644 --- a/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py +++ b/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py @@ -187,7 +187,7 @@ def secaggplus_mod( # Return message out_content.configs_records[RECORD_KEY_CONFIGS] = ConfigsRecord(res, False) - return msg.create_reply(out_content, ttl="") + return msg.create_reply(out_content) def check_stage(current_stage: str, configs: ConfigsRecord) -> None: diff --git a/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod_test.py b/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod_test.py index db5ed67c02a4..36844a2983a1 100644 --- a/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod_test.py +++ b/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod_test.py @@ -19,7 +19,14 @@ from typing import Callable, Dict, List from flwr.client.mod import make_ffn -from flwr.common import ConfigsRecord, Context, Message, Metadata, RecordSet +from flwr.common import ( + DEFAULT_TTL, + ConfigsRecord, + Context, + Message, + Metadata, + RecordSet, +) from flwr.common.constant import MessageType from flwr.common.secure_aggregation.secaggplus_constants import ( RECORD_KEY_CONFIGS, @@ -38,7 +45,7 @@ def get_test_handler( """.""" def empty_ffn(_msg: Message, _2: Context) -> Message: - return _msg.create_reply(RecordSet(), ttl="") + return _msg.create_reply(RecordSet()) app = make_ffn(empty_ffn, [secaggplus_mod]) @@ -51,7 +58,7 @@ def func(configs: Dict[str, ConfigsRecordValues]) -> ConfigsRecord: dst_node_id=123, reply_to_message="", group_id="", - ttl="", + ttl=DEFAULT_TTL, message_type=MessageType.TRAIN, ), content=RecordSet( diff --git a/src/py/flwr/client/mod/utils_test.py b/src/py/flwr/client/mod/utils_test.py index e588b8b53b3b..4676a2c02c4b 100644 --- a/src/py/flwr/client/mod/utils_test.py +++ b/src/py/flwr/client/mod/utils_test.py @@ -20,6 +20,7 @@ from flwr.client.typing import ClientAppCallable, Mod from flwr.common import ( + DEFAULT_TTL, ConfigsRecord, Context, Message, @@ -84,7 +85,7 @@ def _get_dummy_flower_message() -> Message: src_node_id=0, dst_node_id=0, reply_to_message="", - ttl="", + ttl=DEFAULT_TTL, message_type="mock", ), ) diff --git a/src/py/flwr/common/__init__.py b/src/py/flwr/common/__init__.py index 9f9ff7ebc68a..2fb98c82dd6f 100644 --- a/src/py/flwr/common/__init__.py +++ b/src/py/flwr/common/__init__.py @@ -22,6 +22,7 @@ from .grpc import GRPC_MAX_MESSAGE_LENGTH from .logger import configure as configure from .logger import log as log +from .message import DEFAULT_TTL from .message import Error as Error from .message import Message as Message from .message import Metadata as Metadata @@ -87,6 +88,7 @@ "Message", "MessageType", "MessageTypeLegacy", + "DEFAULT_TTL", "Metadata", "Metrics", "MetricsAggregationFn", diff --git a/src/py/flwr/common/message.py b/src/py/flwr/common/message.py index 88cf750f1a94..25607179764d 100644 --- a/src/py/flwr/common/message.py +++ b/src/py/flwr/common/message.py @@ -20,6 +20,8 @@ from .record import RecordSet +DEFAULT_TTL = 3600 + @dataclass class Metadata: # pylint: disable=too-many-instance-attributes @@ -40,8 +42,8 @@ class Metadata: # pylint: disable=too-many-instance-attributes group_id : str An identifier for grouping messages. In some settings, this is used as the FL round. - ttl : str - Time-to-live for this message. + ttl : float + Time-to-live for this message in seconds. message_type : str A string that encodes the action to be executed on the receiving end. @@ -57,7 +59,7 @@ class Metadata: # pylint: disable=too-many-instance-attributes _dst_node_id: int _reply_to_message: str _group_id: str - _ttl: str + _ttl: float _message_type: str _partition_id: int | None @@ -69,7 +71,7 @@ def __init__( # pylint: disable=too-many-arguments dst_node_id: int, reply_to_message: str, group_id: str, - ttl: str, + ttl: float, message_type: str, partition_id: int | None = None, ) -> None: @@ -124,12 +126,12 @@ def group_id(self, value: str) -> None: self._group_id = value @property - def ttl(self) -> str: + def ttl(self) -> float: """Time-to-live for this message.""" return self._ttl @ttl.setter - def ttl(self, value: str) -> None: + def ttl(self, value: float) -> None: """Set ttl.""" self._ttl = value @@ -266,7 +268,7 @@ def has_error(self) -> bool: """Return True if message has an error, else False.""" return self._error is not None - def _create_reply_metadata(self, ttl: str) -> Metadata: + def _create_reply_metadata(self, ttl: float) -> Metadata: """Construct metadata for a reply message.""" return Metadata( run_id=self.metadata.run_id, @@ -283,7 +285,7 @@ def _create_reply_metadata(self, ttl: str) -> Metadata: def create_error_reply( self, error: Error, - ttl: str, + ttl: float, ) -> Message: """Construct a reply message indicating an error happened. @@ -291,14 +293,14 @@ def create_error_reply( ---------- error : Error The error that was encountered. - ttl : str - Time-to-live for this message. + ttl : float + Time-to-live for this message in seconds. """ # Create reply with error message = Message(metadata=self._create_reply_metadata(ttl), error=error) return message - def create_reply(self, content: RecordSet, ttl: str) -> Message: + def create_reply(self, content: RecordSet, ttl: float | None = None) -> Message: """Create a reply to this message with specified content and TTL. The method generates a new `Message` as a reply to this message. @@ -309,14 +311,18 @@ def create_reply(self, content: RecordSet, ttl: str) -> Message: ---------- content : RecordSet The content for the reply message. - ttl : str - Time-to-live for this message. + ttl : Optional[float] (default: None) + Time-to-live for this message in seconds. If unset, it will use + the `common.DEFAULT_TTL` value. Returns ------- Message A new `Message` instance representing the reply. """ + if ttl is None: + ttl = DEFAULT_TTL + return Message( metadata=self._create_reply_metadata(ttl), content=content, diff --git a/src/py/flwr/common/serde_test.py b/src/py/flwr/common/serde_test.py index 8596e5d2f330..fc12ce95328f 100644 --- a/src/py/flwr/common/serde_test.py +++ b/src/py/flwr/common/serde_test.py @@ -219,7 +219,7 @@ def metadata(self) -> Metadata: src_node_id=self.rng.randint(0, 1 << 63), dst_node_id=self.rng.randint(0, 1 << 63), reply_to_message=self.get_str(64), - ttl=self.get_str(10), + ttl=self.rng.randint(1, 1 << 30), message_type=self.get_str(10), ) diff --git a/src/py/flwr/proto/task_pb2.py b/src/py/flwr/proto/task_pb2.py index 4d5f863e88dd..abf7d72d7174 100644 --- a/src/py/flwr/proto/task_pb2.py +++ b/src/py/flwr/proto/task_pb2.py @@ -18,7 +18,7 @@ from flwr.proto import error_pb2 as flwr_dot_proto_dot_error__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66lwr/proto/task.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x1a\x66lwr/proto/recordset.proto\x1a\x1a\x66lwr/proto/transport.proto\x1a\x16\x66lwr/proto/error.proto\"\xf6\x01\n\x04Task\x12\"\n\x08producer\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\"\n\x08\x63onsumer\x18\x02 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x12\n\ncreated_at\x18\x03 \x01(\t\x12\x14\n\x0c\x64\x65livered_at\x18\x04 \x01(\t\x12\x0b\n\x03ttl\x18\x05 \x01(\t\x12\x10\n\x08\x61ncestry\x18\x06 \x03(\t\x12\x11\n\ttask_type\x18\x07 \x01(\t\x12(\n\trecordset\x18\x08 \x01(\x0b\x32\x15.flwr.proto.RecordSet\x12 \n\x05\x65rror\x18\t \x01(\x0b\x32\x11.flwr.proto.Error\"\\\n\x07TaskIns\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x12\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Task\"\\\n\x07TaskRes\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x12\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Taskb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66lwr/proto/task.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x1a\x66lwr/proto/recordset.proto\x1a\x1a\x66lwr/proto/transport.proto\x1a\x16\x66lwr/proto/error.proto\"\xf6\x01\n\x04Task\x12\"\n\x08producer\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\"\n\x08\x63onsumer\x18\x02 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x12\n\ncreated_at\x18\x03 \x01(\t\x12\x14\n\x0c\x64\x65livered_at\x18\x04 \x01(\t\x12\x0b\n\x03ttl\x18\x05 \x01(\x01\x12\x10\n\x08\x61ncestry\x18\x06 \x03(\t\x12\x11\n\ttask_type\x18\x07 \x01(\t\x12(\n\trecordset\x18\x08 \x01(\x0b\x32\x15.flwr.proto.RecordSet\x12 \n\x05\x65rror\x18\t \x01(\x0b\x32\x11.flwr.proto.Error\"\\\n\x07TaskIns\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x12\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Task\"\\\n\x07TaskRes\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x12\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Taskb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) diff --git a/src/py/flwr/proto/task_pb2.pyi b/src/py/flwr/proto/task_pb2.pyi index b9c10139cfb3..735400eca701 100644 --- a/src/py/flwr/proto/task_pb2.pyi +++ b/src/py/flwr/proto/task_pb2.pyi @@ -31,7 +31,7 @@ class Task(google.protobuf.message.Message): def consumer(self) -> flwr.proto.node_pb2.Node: ... created_at: typing.Text delivered_at: typing.Text - ttl: typing.Text + ttl: builtins.float @property def ancestry(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[typing.Text]: ... task_type: typing.Text @@ -45,7 +45,7 @@ class Task(google.protobuf.message.Message): consumer: typing.Optional[flwr.proto.node_pb2.Node] = ..., created_at: typing.Text = ..., delivered_at: typing.Text = ..., - ttl: typing.Text = ..., + ttl: builtins.float = ..., ancestry: typing.Optional[typing.Iterable[typing.Text]] = ..., task_type: typing.Text = ..., recordset: typing.Optional[flwr.proto.recordset_pb2.RecordSet] = ..., diff --git a/src/py/flwr/server/compat/driver_client_proxy.py b/src/py/flwr/server/compat/driver_client_proxy.py index 84c67149fad7..99ba50d3e2d1 100644 --- a/src/py/flwr/server/compat/driver_client_proxy.py +++ b/src/py/flwr/server/compat/driver_client_proxy.py @@ -19,7 +19,7 @@ from typing import List, Optional from flwr import common -from flwr.common import MessageType, MessageTypeLegacy, RecordSet +from flwr.common import DEFAULT_TTL, MessageType, MessageTypeLegacy, RecordSet from flwr.common import recordset_compat as compat from flwr.common import serde from flwr.proto import driver_pb2, node_pb2, task_pb2 # pylint: disable=E0611 @@ -129,6 +129,7 @@ def _send_receive_recordset( ), task_type=task_type, recordset=serde.recordset_to_proto(recordset), + ttl=DEFAULT_TTL, ), ) push_task_ins_req = driver_pb2.PushTaskInsRequest( # pylint: disable=E1101 diff --git a/src/py/flwr/server/driver/driver.py b/src/py/flwr/server/driver/driver.py index 0098e0ce97c2..afebd90ea265 100644 --- a/src/py/flwr/server/driver/driver.py +++ b/src/py/flwr/server/driver/driver.py @@ -18,7 +18,7 @@ import time from typing import Iterable, List, Optional, Tuple -from flwr.common import Message, Metadata, RecordSet +from flwr.common import DEFAULT_TTL, Message, Metadata, RecordSet from flwr.common.serde import message_from_taskres, message_to_taskins from flwr.proto.driver_pb2 import ( # pylint: disable=E0611 CreateRunRequest, @@ -81,6 +81,7 @@ def _check_message(self, message: Message) -> None: and message.metadata.src_node_id == self.node.node_id and message.metadata.message_id == "" and message.metadata.reply_to_message == "" + and message.metadata.ttl > 0 ): raise ValueError(f"Invalid message: {message}") @@ -90,7 +91,7 @@ def create_message( # pylint: disable=too-many-arguments message_type: str, dst_node_id: int, group_id: str, - ttl: str, + ttl: float = DEFAULT_TTL, ) -> Message: """Create a new message with specified parameters. @@ -110,10 +111,10 @@ def create_message( # pylint: disable=too-many-arguments group_id : str The ID of the group to which this message is associated. In some settings, this is used as the FL round. - ttl : str + ttl : float (default: common.DEFAULT_TTL) Time-to-live for the round trip of this message, i.e., the time from sending - this message to receiving a reply. It specifies the duration for which the - message and its potential reply are considered valid. + this message to receiving a reply. It specifies in seconds the duration for + which the message and its potential reply are considered valid. Returns ------- diff --git a/src/py/flwr/server/driver/driver_test.py b/src/py/flwr/server/driver/driver_test.py index 5136f4f90210..3f1cd552250f 100644 --- a/src/py/flwr/server/driver/driver_test.py +++ b/src/py/flwr/server/driver/driver_test.py @@ -19,7 +19,7 @@ import unittest from unittest.mock import Mock, patch -from flwr.common import RecordSet +from flwr.common import DEFAULT_TTL, RecordSet from flwr.common.message import Error from flwr.common.serde import error_to_proto, recordset_to_proto from flwr.proto.driver_pb2 import ( # pylint: disable=E0611 @@ -99,7 +99,8 @@ def test_push_messages_valid(self) -> None: mock_response = Mock(task_ids=["id1", "id2"]) self.mock_grpc_driver.push_task_ins.return_value = mock_response msgs = [ - self.driver.create_message(RecordSet(), "", 0, "", "") for _ in range(2) + self.driver.create_message(RecordSet(), "", 0, "", DEFAULT_TTL) + for _ in range(2) ] # Execute @@ -121,7 +122,8 @@ def test_push_messages_invalid(self) -> None: mock_response = Mock(task_ids=["id1", "id2"]) self.mock_grpc_driver.push_task_ins.return_value = mock_response msgs = [ - self.driver.create_message(RecordSet(), "", 0, "", "") for _ in range(2) + self.driver.create_message(RecordSet(), "", 0, "", DEFAULT_TTL) + for _ in range(2) ] # Use invalid run_id msgs[1].metadata._run_id += 1 # pylint: disable=protected-access @@ -170,7 +172,7 @@ def test_send_and_receive_messages_complete(self) -> None: task_res_list=[TaskRes(task=Task(ancestry=["id1"], error=error_proto))] ) self.mock_grpc_driver.pull_task_res.return_value = mock_response - msgs = [self.driver.create_message(RecordSet(), "", 0, "", "")] + msgs = [self.driver.create_message(RecordSet(), "", 0, "", DEFAULT_TTL)] # Execute ret_msgs = list(self.driver.send_and_receive(msgs)) @@ -187,7 +189,7 @@ def test_send_and_receive_messages_timeout(self) -> None: self.mock_grpc_driver.push_task_ins.return_value = mock_response mock_response = Mock(task_res_list=[]) self.mock_grpc_driver.pull_task_res.return_value = mock_response - msgs = [self.driver.create_message(RecordSet(), "", 0, "", "")] + msgs = [self.driver.create_message(RecordSet(), "", 0, "", DEFAULT_TTL)] # Execute with patch("time.sleep", side_effect=lambda t: sleep_fn(t * 0.01)): diff --git a/src/py/flwr/server/superlink/fleet/vce/backend/raybackend_test.py b/src/py/flwr/server/superlink/fleet/vce/backend/raybackend_test.py index 2610307bb749..dcac0b81d666 100644 --- a/src/py/flwr/server/superlink/fleet/vce/backend/raybackend_test.py +++ b/src/py/flwr/server/superlink/fleet/vce/backend/raybackend_test.py @@ -25,6 +25,7 @@ from flwr.client import Client, NumPyClient from flwr.client.client_app import ClientApp, LoadClientAppError from flwr.common import ( + DEFAULT_TTL, Config, ConfigsRecord, Context, @@ -111,7 +112,7 @@ def _create_message_and_context() -> Tuple[Message, Context, float]: src_node_id=0, dst_node_id=0, reply_to_message="", - ttl="", + ttl=DEFAULT_TTL, message_type=MessageTypeLegacy.GET_PROPERTIES, ), ) diff --git a/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py b/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py index 8c37399ae295..2c917c3eed27 100644 --- a/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py +++ b/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py @@ -26,7 +26,13 @@ from unittest import IsolatedAsyncioTestCase from uuid import UUID -from flwr.common import GetPropertiesIns, Message, MessageTypeLegacy, Metadata +from flwr.common import ( + DEFAULT_TTL, + GetPropertiesIns, + Message, + MessageTypeLegacy, + Metadata, +) from flwr.common.recordset_compat import getpropertiesins_to_recordset from flwr.common.serde import message_from_taskres, message_to_taskins from flwr.server.superlink.fleet.vce.vce_api import ( @@ -97,7 +103,7 @@ def register_messages_into_state( src_node_id=0, dst_node_id=dst_node_id, # indicate destination node reply_to_message="", - ttl="", + ttl=DEFAULT_TTL, message_type=( "a bad message" if erroneous_message diff --git a/src/py/flwr/server/superlink/state/in_memory_state.py b/src/py/flwr/server/superlink/state/in_memory_state.py index ac1ab158e254..7bff8ab4befc 100644 --- a/src/py/flwr/server/superlink/state/in_memory_state.py +++ b/src/py/flwr/server/superlink/state/in_memory_state.py @@ -17,7 +17,7 @@ import os import threading -from datetime import datetime, timedelta +from datetime import datetime from logging import ERROR from typing import Dict, List, Optional, Set from uuid import UUID, uuid4 @@ -50,15 +50,13 @@ def store_task_ins(self, task_ins: TaskIns) -> Optional[UUID]: log(ERROR, "`run_id` is invalid") return None - # Create task_id, created_at and ttl + # Create task_id and created_at task_id = uuid4() created_at: datetime = now() - ttl: datetime = created_at + timedelta(hours=24) # Store TaskIns task_ins.task_id = str(task_id) task_ins.task.created_at = created_at.isoformat() - task_ins.task.ttl = ttl.isoformat() with self.lock: self.task_ins_store[task_id] = task_ins @@ -113,15 +111,13 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: log(ERROR, "`run_id` is invalid") return None - # Create task_id, created_at and ttl + # Create task_id and created_at task_id = uuid4() created_at: datetime = now() - ttl: datetime = created_at + timedelta(hours=24) # Store TaskRes task_res.task_id = str(task_id) task_res.task.created_at = created_at.isoformat() - task_res.task.ttl = ttl.isoformat() with self.lock: self.task_res_store[task_id] = task_res diff --git a/src/py/flwr/server/superlink/state/sqlite_state.py b/src/py/flwr/server/superlink/state/sqlite_state.py index 224c16cdf013..25d138f94203 100644 --- a/src/py/flwr/server/superlink/state/sqlite_state.py +++ b/src/py/flwr/server/superlink/state/sqlite_state.py @@ -18,7 +18,7 @@ import os import re import sqlite3 -from datetime import datetime, timedelta +from datetime import datetime from logging import DEBUG, ERROR from typing import Any, Dict, List, Optional, Set, Tuple, Union, cast from uuid import UUID, uuid4 @@ -54,7 +54,7 @@ consumer_node_id INTEGER, created_at TEXT, delivered_at TEXT, - ttl TEXT, + ttl REAL, ancestry TEXT, task_type TEXT, recordset BLOB, @@ -74,7 +74,7 @@ consumer_node_id INTEGER, created_at TEXT, delivered_at TEXT, - ttl TEXT, + ttl REAL, ancestry TEXT, task_type TEXT, recordset BLOB, @@ -185,15 +185,13 @@ def store_task_ins(self, task_ins: TaskIns) -> Optional[UUID]: log(ERROR, errors) return None - # Create task_id, created_at and ttl + # Create task_id and created_at task_id = uuid4() created_at: datetime = now() - ttl: datetime = created_at + timedelta(hours=24) # Store TaskIns task_ins.task_id = str(task_id) task_ins.task.created_at = created_at.isoformat() - task_ins.task.ttl = ttl.isoformat() data = (task_ins_to_dict(task_ins),) columns = ", ".join([f":{key}" for key in data[0]]) query = f"INSERT INTO task_ins VALUES({columns});" @@ -320,15 +318,13 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: log(ERROR, errors) return None - # Create task_id, created_at and ttl + # Create task_id and created_at task_id = uuid4() created_at: datetime = now() - ttl: datetime = created_at + timedelta(hours=24) # Store TaskIns task_res.task_id = str(task_id) task_res.task.created_at = created_at.isoformat() - task_res.task.ttl = ttl.isoformat() data = (task_res_to_dict(task_res),) columns = ", ".join([f":{key}" for key in data[0]]) query = f"INSERT INTO task_res VALUES({columns});" diff --git a/src/py/flwr/server/superlink/state/state_test.py b/src/py/flwr/server/superlink/state/state_test.py index d0470a7ce7f7..01ac64de1380 100644 --- a/src/py/flwr/server/superlink/state/state_test.py +++ b/src/py/flwr/server/superlink/state/state_test.py @@ -22,6 +22,7 @@ from typing import List from uuid import uuid4 +from flwr.common import DEFAULT_TTL from flwr.proto.node_pb2 import Node # pylint: disable=E0611 from flwr.proto.recordset_pb2 import RecordSet # pylint: disable=E0611 from flwr.proto.task_pb2 import Task, TaskIns, TaskRes # pylint: disable=E0611 @@ -73,7 +74,6 @@ def test_store_task_ins_one(self) -> None: assert task_ins.task.created_at == "" # pylint: disable=no-member assert task_ins.task.delivered_at == "" # pylint: disable=no-member - assert task_ins.task.ttl == "" # pylint: disable=no-member # Execute state.store_task_ins(task_ins=task_ins) @@ -91,7 +91,6 @@ def test_store_task_ins_one(self) -> None: assert actual_task.created_at != "" assert actual_task.delivered_at != "" - assert actual_task.ttl != "" assert datetime.fromisoformat(actual_task.created_at) > datetime( 2020, 1, 1, tzinfo=timezone.utc @@ -99,9 +98,7 @@ def test_store_task_ins_one(self) -> None: assert datetime.fromisoformat(actual_task.delivered_at) > datetime( 2020, 1, 1, tzinfo=timezone.utc ) - assert datetime.fromisoformat(actual_task.ttl) > datetime( - 2020, 1, 1, tzinfo=timezone.utc - ) + assert actual_task.ttl > 0 def test_store_and_delete_tasks(self) -> None: """Test delete_tasks.""" @@ -420,6 +417,7 @@ def create_task_ins( consumer=consumer, task_type="mock", recordset=RecordSet(parameters={}, metrics={}, configs={}), + ttl=DEFAULT_TTL, ), ) return task @@ -442,6 +440,7 @@ def create_task_res( ancestry=ancestry, task_type="mock", recordset=RecordSet(parameters={}, metrics={}, configs={}), + ttl=DEFAULT_TTL, ), ) return task_res diff --git a/src/py/flwr/server/utils/validator.py b/src/py/flwr/server/utils/validator.py index f9b271beafdc..285807d8d0e7 100644 --- a/src/py/flwr/server/utils/validator.py +++ b/src/py/flwr/server/utils/validator.py @@ -36,8 +36,8 @@ def validate_task_ins_or_res(tasks_ins_res: Union[TaskIns, TaskRes]) -> List[str validation_errors.append("`created_at` must be an empty str") if tasks_ins_res.task.delivered_at != "": validation_errors.append("`delivered_at` must be an empty str") - if tasks_ins_res.task.ttl != "": - validation_errors.append("`ttl` must be an empty str") + if tasks_ins_res.task.ttl <= 0: + validation_errors.append("`ttl` must be higher than zero") # TaskIns specific if isinstance(tasks_ins_res, TaskIns): @@ -66,8 +66,11 @@ def validate_task_ins_or_res(tasks_ins_res: Union[TaskIns, TaskRes]) -> List[str # Content check if tasks_ins_res.task.task_type == "": validation_errors.append("`task_type` MUST be set") - if not tasks_ins_res.task.HasField("recordset"): - validation_errors.append("`recordset` MUST be set") + if not ( + tasks_ins_res.task.HasField("recordset") + ^ tasks_ins_res.task.HasField("error") + ): + validation_errors.append("Either `recordset` or `error` MUST be set") # Ancestors if len(tasks_ins_res.task.ancestry) != 0: @@ -106,8 +109,11 @@ def validate_task_ins_or_res(tasks_ins_res: Union[TaskIns, TaskRes]) -> List[str # Content check if tasks_ins_res.task.task_type == "": validation_errors.append("`task_type` MUST be set") - if not tasks_ins_res.task.HasField("recordset"): - validation_errors.append("`recordset` MUST be set") + if not ( + tasks_ins_res.task.HasField("recordset") + ^ tasks_ins_res.task.HasField("error") + ): + validation_errors.append("Either `recordset` or `error` MUST be set") # Ancestors if len(tasks_ins_res.task.ancestry) == 0: diff --git a/src/py/flwr/server/utils/validator_test.py b/src/py/flwr/server/utils/validator_test.py index 8e0849508020..926103c6b09a 100644 --- a/src/py/flwr/server/utils/validator_test.py +++ b/src/py/flwr/server/utils/validator_test.py @@ -18,6 +18,7 @@ import unittest from typing import List, Tuple +from flwr.common import DEFAULT_TTL from flwr.proto.node_pb2 import Node # pylint: disable=E0611 from flwr.proto.recordset_pb2 import RecordSet # pylint: disable=E0611 from flwr.proto.task_pb2 import Task, TaskIns, TaskRes # pylint: disable=E0611 @@ -96,6 +97,7 @@ def create_task_ins( consumer=consumer, task_type="mock", recordset=RecordSet(parameters={}, metrics={}, configs={}), + ttl=DEFAULT_TTL, ), ) return task @@ -117,6 +119,7 @@ def create_task_res( ancestry=ancestry, task_type="mock", recordset=RecordSet(parameters={}, metrics={}, configs={}), + ttl=DEFAULT_TTL, ), ) return task_res diff --git a/src/py/flwr/server/workflow/default_workflows.py b/src/py/flwr/server/workflow/default_workflows.py index 876ae56dcadc..42b1151f9835 100644 --- a/src/py/flwr/server/workflow/default_workflows.py +++ b/src/py/flwr/server/workflow/default_workflows.py @@ -21,7 +21,7 @@ from typing import Optional, cast import flwr.common.recordset_compat as compat -from flwr.common import ConfigsRecord, Context, GetParametersIns, log +from flwr.common import DEFAULT_TTL, ConfigsRecord, Context, GetParametersIns, log from flwr.common.constant import MessageType, MessageTypeLegacy from ..compat.app_utils import start_update_client_manager_thread @@ -127,7 +127,7 @@ def default_init_params_workflow(driver: Driver, context: Context) -> None: message_type=MessageTypeLegacy.GET_PARAMETERS, dst_node_id=random_client.node_id, group_id="0", - ttl="", + ttl=DEFAULT_TTL, ) ] ) @@ -226,7 +226,7 @@ def default_fit_workflow( # pylint: disable=R0914 message_type=MessageType.TRAIN, dst_node_id=proxy.node_id, group_id=str(current_round), - ttl="", + ttl=DEFAULT_TTL, ) for proxy, fitins in client_instructions ] @@ -306,7 +306,7 @@ def default_evaluate_workflow(driver: Driver, context: Context) -> None: message_type=MessageType.EVALUATE, dst_node_id=proxy.node_id, group_id=str(current_round), - ttl="", + ttl=DEFAULT_TTL, ) for proxy, evalins in client_instructions ] diff --git a/src/py/flwr/server/workflow/secure_aggregation/secaggplus_workflow.py b/src/py/flwr/server/workflow/secure_aggregation/secaggplus_workflow.py index 42ee9c15f1cd..326947b653ff 100644 --- a/src/py/flwr/server/workflow/secure_aggregation/secaggplus_workflow.py +++ b/src/py/flwr/server/workflow/secure_aggregation/secaggplus_workflow.py @@ -22,6 +22,7 @@ import flwr.common.recordset_compat as compat from flwr.common import ( + DEFAULT_TTL, ConfigsRecord, Context, FitRes, @@ -373,7 +374,7 @@ def make(nid: int) -> Message: message_type=MessageType.TRAIN, dst_node_id=nid, group_id=str(cfg[WorkflowKey.CURRENT_ROUND]), - ttl="", + ttl=DEFAULT_TTL, ) log( @@ -421,7 +422,7 @@ def make(nid: int) -> Message: message_type=MessageType.TRAIN, dst_node_id=nid, group_id=str(cfg[WorkflowKey.CURRENT_ROUND]), - ttl="", + ttl=DEFAULT_TTL, ) # Broadcast public keys to clients and receive secret key shares @@ -492,7 +493,7 @@ def make(nid: int) -> Message: message_type=MessageType.TRAIN, dst_node_id=nid, group_id=str(cfg[WorkflowKey.CURRENT_ROUND]), - ttl="", + ttl=DEFAULT_TTL, ) log( @@ -563,7 +564,7 @@ def make(nid: int) -> Message: message_type=MessageType.TRAIN, dst_node_id=nid, group_id=str(current_round), - ttl="", + ttl=DEFAULT_TTL, ) log( diff --git a/src/py/flwr/simulation/ray_transport/ray_client_proxy.py b/src/py/flwr/simulation/ray_transport/ray_client_proxy.py index c3493163ac52..5e344eb087ee 100644 --- a/src/py/flwr/simulation/ray_transport/ray_client_proxy.py +++ b/src/py/flwr/simulation/ray_transport/ray_client_proxy.py @@ -23,7 +23,7 @@ from flwr.client import ClientFn from flwr.client.client_app import ClientApp from flwr.client.node_state import NodeState -from flwr.common import Message, Metadata, RecordSet +from flwr.common import DEFAULT_TTL, Message, Metadata, RecordSet from flwr.common.constant import MessageType, MessageTypeLegacy from flwr.common.logger import log from flwr.common.recordset_compat import ( @@ -105,7 +105,7 @@ def _wrap_recordset_in_message( src_node_id=0, dst_node_id=int(self.cid), reply_to_message="", - ttl=str(timeout) if timeout else "", + ttl=timeout if timeout else DEFAULT_TTL, message_type=message_type, partition_id=int(self.cid), ), diff --git a/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py b/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py index 22c5425cd9fd..9680b3846f1d 100644 --- a/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py +++ b/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py @@ -24,6 +24,7 @@ from flwr.client import Client, NumPyClient from flwr.client.client_app import ClientApp from flwr.common import ( + DEFAULT_TTL, Config, ConfigsRecord, Context, @@ -202,7 +203,7 @@ def _load_app() -> ClientApp: src_node_id=0, dst_node_id=12345, reply_to_message="", - ttl="", + ttl=DEFAULT_TTL, message_type=MessageTypeLegacy.GET_PROPERTIES, partition_id=int(cid), ), From 8075108fc10779da092b2fc6b7c729ea7bad0dd4 Mon Sep 17 00:00:00 2001 From: Adam Narozniak <51029327+adam-narozniak@users.noreply.github.com> Date: Wed, 27 Mar 2024 14:50:44 +0100 Subject: [PATCH 39/57] Bump up the release version to 0.1.0 in docs (#3177) --- datasets/doc/source/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/datasets/doc/source/conf.py b/datasets/doc/source/conf.py index e5c61b5559cb..755147bc9e1d 100644 --- a/datasets/doc/source/conf.py +++ b/datasets/doc/source/conf.py @@ -38,7 +38,7 @@ author = "The Flower Authors" # The full version, including alpha/beta/rc tags -release = "0.0.2" +release = "0.1.0" # -- General configuration --------------------------------------------------- From 3b20f73a1978a95149165f185690bc425d63b510 Mon Sep 17 00:00:00 2001 From: Heng Pan Date: Wed, 27 Mar 2024 16:15:25 +0000 Subject: [PATCH 40/57] Add ping_interval to PingRequest (#3179) --- src/proto/flwr/proto/fleet.proto | 5 ++++- src/py/flwr/proto/fleet_pb2.py | 36 ++++++++++++++++---------------- src/py/flwr/proto/fleet_pb2.pyi | 5 ++++- 3 files changed, 26 insertions(+), 20 deletions(-) diff --git a/src/proto/flwr/proto/fleet.proto b/src/proto/flwr/proto/fleet.proto index fcb301181f5a..fa65f3ee9fed 100644 --- a/src/proto/flwr/proto/fleet.proto +++ b/src/proto/flwr/proto/fleet.proto @@ -45,7 +45,10 @@ message DeleteNodeRequest { Node node = 1; } message DeleteNodeResponse {} // Ping messages -message PingRequest { Node node = 1; } +message PingRequest { + Node node = 1; + double ping_interval = 2; +} message PingResponse { bool success = 1; } // PullTaskIns messages diff --git a/src/py/flwr/proto/fleet_pb2.py b/src/py/flwr/proto/fleet_pb2.py index dbf64fb850a5..546987f1c807 100644 --- a/src/py/flwr/proto/fleet_pb2.py +++ b/src/py/flwr/proto/fleet_pb2.py @@ -16,7 +16,7 @@ from flwr.proto import task_pb2 as flwr_dot_proto_dot_task__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x16\x66lwr/proto/fleet.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x15\x66lwr/proto/task.proto\"\x13\n\x11\x43reateNodeRequest\"4\n\x12\x43reateNodeResponse\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"3\n\x11\x44\x65leteNodeRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"\x14\n\x12\x44\x65leteNodeResponse\"-\n\x0bPingRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"\x1f\n\x0cPingResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\"F\n\x12PullTaskInsRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"k\n\x13PullTaskInsResponse\x12(\n\treconnect\x18\x01 \x01(\x0b\x32\x15.flwr.proto.Reconnect\x12*\n\rtask_ins_list\x18\x02 \x03(\x0b\x32\x13.flwr.proto.TaskIns\"@\n\x12PushTaskResRequest\x12*\n\rtask_res_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskRes\"\xae\x01\n\x13PushTaskResResponse\x12(\n\treconnect\x18\x01 \x01(\x0b\x32\x15.flwr.proto.Reconnect\x12=\n\x07results\x18\x02 \x03(\x0b\x32,.flwr.proto.PushTaskResResponse.ResultsEntry\x1a.\n\x0cResultsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\r:\x02\x38\x01\"\x1e\n\tReconnect\x12\x11\n\treconnect\x18\x01 \x01(\x04\x32\x86\x03\n\x05\x46leet\x12M\n\nCreateNode\x12\x1d.flwr.proto.CreateNodeRequest\x1a\x1e.flwr.proto.CreateNodeResponse\"\x00\x12M\n\nDeleteNode\x12\x1d.flwr.proto.DeleteNodeRequest\x1a\x1e.flwr.proto.DeleteNodeResponse\"\x00\x12;\n\x04Ping\x12\x17.flwr.proto.PingRequest\x1a\x18.flwr.proto.PingResponse\"\x00\x12P\n\x0bPullTaskIns\x12\x1e.flwr.proto.PullTaskInsRequest\x1a\x1f.flwr.proto.PullTaskInsResponse\"\x00\x12P\n\x0bPushTaskRes\x12\x1e.flwr.proto.PushTaskResRequest\x1a\x1f.flwr.proto.PushTaskResResponse\"\x00\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x16\x66lwr/proto/fleet.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x15\x66lwr/proto/task.proto\"\x13\n\x11\x43reateNodeRequest\"4\n\x12\x43reateNodeResponse\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"3\n\x11\x44\x65leteNodeRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"\x14\n\x12\x44\x65leteNodeResponse\"D\n\x0bPingRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x15\n\rping_interval\x18\x02 \x01(\x01\"\x1f\n\x0cPingResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\"F\n\x12PullTaskInsRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"k\n\x13PullTaskInsResponse\x12(\n\treconnect\x18\x01 \x01(\x0b\x32\x15.flwr.proto.Reconnect\x12*\n\rtask_ins_list\x18\x02 \x03(\x0b\x32\x13.flwr.proto.TaskIns\"@\n\x12PushTaskResRequest\x12*\n\rtask_res_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskRes\"\xae\x01\n\x13PushTaskResResponse\x12(\n\treconnect\x18\x01 \x01(\x0b\x32\x15.flwr.proto.Reconnect\x12=\n\x07results\x18\x02 \x03(\x0b\x32,.flwr.proto.PushTaskResResponse.ResultsEntry\x1a.\n\x0cResultsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\r:\x02\x38\x01\"\x1e\n\tReconnect\x12\x11\n\treconnect\x18\x01 \x01(\x04\x32\x86\x03\n\x05\x46leet\x12M\n\nCreateNode\x12\x1d.flwr.proto.CreateNodeRequest\x1a\x1e.flwr.proto.CreateNodeResponse\"\x00\x12M\n\nDeleteNode\x12\x1d.flwr.proto.DeleteNodeRequest\x1a\x1e.flwr.proto.DeleteNodeResponse\"\x00\x12;\n\x04Ping\x12\x17.flwr.proto.PingRequest\x1a\x18.flwr.proto.PingResponse\"\x00\x12P\n\x0bPullTaskIns\x12\x1e.flwr.proto.PullTaskInsRequest\x1a\x1f.flwr.proto.PullTaskInsResponse\"\x00\x12P\n\x0bPushTaskRes\x12\x1e.flwr.proto.PushTaskResRequest\x1a\x1f.flwr.proto.PushTaskResResponse\"\x00\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -34,21 +34,21 @@ _globals['_DELETENODERESPONSE']._serialized_start=212 _globals['_DELETENODERESPONSE']._serialized_end=232 _globals['_PINGREQUEST']._serialized_start=234 - _globals['_PINGREQUEST']._serialized_end=279 - _globals['_PINGRESPONSE']._serialized_start=281 - _globals['_PINGRESPONSE']._serialized_end=312 - _globals['_PULLTASKINSREQUEST']._serialized_start=314 - _globals['_PULLTASKINSREQUEST']._serialized_end=384 - _globals['_PULLTASKINSRESPONSE']._serialized_start=386 - _globals['_PULLTASKINSRESPONSE']._serialized_end=493 - _globals['_PUSHTASKRESREQUEST']._serialized_start=495 - _globals['_PUSHTASKRESREQUEST']._serialized_end=559 - _globals['_PUSHTASKRESRESPONSE']._serialized_start=562 - _globals['_PUSHTASKRESRESPONSE']._serialized_end=736 - _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_start=690 - _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_end=736 - _globals['_RECONNECT']._serialized_start=738 - _globals['_RECONNECT']._serialized_end=768 - _globals['_FLEET']._serialized_start=771 - _globals['_FLEET']._serialized_end=1161 + _globals['_PINGREQUEST']._serialized_end=302 + _globals['_PINGRESPONSE']._serialized_start=304 + _globals['_PINGRESPONSE']._serialized_end=335 + _globals['_PULLTASKINSREQUEST']._serialized_start=337 + _globals['_PULLTASKINSREQUEST']._serialized_end=407 + _globals['_PULLTASKINSRESPONSE']._serialized_start=409 + _globals['_PULLTASKINSRESPONSE']._serialized_end=516 + _globals['_PUSHTASKRESREQUEST']._serialized_start=518 + _globals['_PUSHTASKRESREQUEST']._serialized_end=582 + _globals['_PUSHTASKRESRESPONSE']._serialized_start=585 + _globals['_PUSHTASKRESRESPONSE']._serialized_end=759 + _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_start=713 + _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_end=759 + _globals['_RECONNECT']._serialized_start=761 + _globals['_RECONNECT']._serialized_end=791 + _globals['_FLEET']._serialized_start=794 + _globals['_FLEET']._serialized_end=1184 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/fleet_pb2.pyi b/src/py/flwr/proto/fleet_pb2.pyi index 39edb61ca0d7..e5c5b7366464 100644 --- a/src/py/flwr/proto/fleet_pb2.pyi +++ b/src/py/flwr/proto/fleet_pb2.pyi @@ -57,14 +57,17 @@ class PingRequest(google.protobuf.message.Message): """Ping messages""" DESCRIPTOR: google.protobuf.descriptor.Descriptor NODE_FIELD_NUMBER: builtins.int + PING_INTERVAL_FIELD_NUMBER: builtins.int @property def node(self) -> flwr.proto.node_pb2.Node: ... + ping_interval: builtins.float def __init__(self, *, node: typing.Optional[flwr.proto.node_pb2.Node] = ..., + ping_interval: builtins.float = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["node",b"node"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["node",b"node"]) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["node",b"node","ping_interval",b"ping_interval"]) -> None: ... global___PingRequest = PingRequest class PingResponse(google.protobuf.message.Message): From 9a36d8a82de856d1be442488786180d8beedc2c9 Mon Sep 17 00:00:00 2001 From: Javier Date: Wed, 27 Mar 2024 18:20:27 +0000 Subject: [PATCH 41/57] Add `pushed_at` into `Task` (#3173) --- src/proto/flwr/proto/task.proto | 11 ++++++----- src/py/flwr/proto/task_pb2.py | 12 ++++++------ src/py/flwr/proto/task_pb2.pyi | 5 ++++- .../flwr/server/superlink/driver/driver_servicer.py | 6 ++++++ .../fleet/message_handler/message_handler.py | 4 ++++ .../flwr/server/superlink/fleet/vce/vce_api_test.py | 4 ++++ src/py/flwr/server/superlink/state/sqlite_state.py | 8 +++++++- .../flwr/server/superlink/state/sqlite_state_test.py | 1 + src/py/flwr/server/superlink/state/state_test.py | 3 +++ src/py/flwr/server/utils/validator.py | 5 ++++- src/py/flwr/server/utils/validator_test.py | 5 +++++ 11 files changed, 50 insertions(+), 14 deletions(-) diff --git a/src/proto/flwr/proto/task.proto b/src/proto/flwr/proto/task.proto index 4c86ebae9562..25e65e59cedc 100644 --- a/src/proto/flwr/proto/task.proto +++ b/src/proto/flwr/proto/task.proto @@ -27,11 +27,12 @@ message Task { Node consumer = 2; string created_at = 3; string delivered_at = 4; - double ttl = 5; - repeated string ancestry = 6; - string task_type = 7; - RecordSet recordset = 8; - Error error = 9; + double pushed_at = 5; + double ttl = 6; + repeated string ancestry = 7; + string task_type = 8; + RecordSet recordset = 9; + Error error = 10; } message TaskIns { diff --git a/src/py/flwr/proto/task_pb2.py b/src/py/flwr/proto/task_pb2.py index abf7d72d7174..3546f01efded 100644 --- a/src/py/flwr/proto/task_pb2.py +++ b/src/py/flwr/proto/task_pb2.py @@ -18,7 +18,7 @@ from flwr.proto import error_pb2 as flwr_dot_proto_dot_error__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66lwr/proto/task.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x1a\x66lwr/proto/recordset.proto\x1a\x1a\x66lwr/proto/transport.proto\x1a\x16\x66lwr/proto/error.proto\"\xf6\x01\n\x04Task\x12\"\n\x08producer\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\"\n\x08\x63onsumer\x18\x02 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x12\n\ncreated_at\x18\x03 \x01(\t\x12\x14\n\x0c\x64\x65livered_at\x18\x04 \x01(\t\x12\x0b\n\x03ttl\x18\x05 \x01(\x01\x12\x10\n\x08\x61ncestry\x18\x06 \x03(\t\x12\x11\n\ttask_type\x18\x07 \x01(\t\x12(\n\trecordset\x18\x08 \x01(\x0b\x32\x15.flwr.proto.RecordSet\x12 \n\x05\x65rror\x18\t \x01(\x0b\x32\x11.flwr.proto.Error\"\\\n\x07TaskIns\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x12\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Task\"\\\n\x07TaskRes\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x12\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Taskb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66lwr/proto/task.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x1a\x66lwr/proto/recordset.proto\x1a\x1a\x66lwr/proto/transport.proto\x1a\x16\x66lwr/proto/error.proto\"\x89\x02\n\x04Task\x12\"\n\x08producer\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\"\n\x08\x63onsumer\x18\x02 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x12\n\ncreated_at\x18\x03 \x01(\t\x12\x14\n\x0c\x64\x65livered_at\x18\x04 \x01(\t\x12\x11\n\tpushed_at\x18\x05 \x01(\x01\x12\x0b\n\x03ttl\x18\x06 \x01(\x01\x12\x10\n\x08\x61ncestry\x18\x07 \x03(\t\x12\x11\n\ttask_type\x18\x08 \x01(\t\x12(\n\trecordset\x18\t \x01(\x0b\x32\x15.flwr.proto.RecordSet\x12 \n\x05\x65rror\x18\n \x01(\x0b\x32\x11.flwr.proto.Error\"\\\n\x07TaskIns\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x12\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Task\"\\\n\x07TaskRes\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x12\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Taskb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -26,9 +26,9 @@ if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None _globals['_TASK']._serialized_start=141 - _globals['_TASK']._serialized_end=387 - _globals['_TASKINS']._serialized_start=389 - _globals['_TASKINS']._serialized_end=481 - _globals['_TASKRES']._serialized_start=483 - _globals['_TASKRES']._serialized_end=575 + _globals['_TASK']._serialized_end=406 + _globals['_TASKINS']._serialized_start=408 + _globals['_TASKINS']._serialized_end=500 + _globals['_TASKRES']._serialized_start=502 + _globals['_TASKRES']._serialized_end=594 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/task_pb2.pyi b/src/py/flwr/proto/task_pb2.pyi index 735400eca701..8f0549ceddc9 100644 --- a/src/py/flwr/proto/task_pb2.pyi +++ b/src/py/flwr/proto/task_pb2.pyi @@ -20,6 +20,7 @@ class Task(google.protobuf.message.Message): CONSUMER_FIELD_NUMBER: builtins.int CREATED_AT_FIELD_NUMBER: builtins.int DELIVERED_AT_FIELD_NUMBER: builtins.int + PUSHED_AT_FIELD_NUMBER: builtins.int TTL_FIELD_NUMBER: builtins.int ANCESTRY_FIELD_NUMBER: builtins.int TASK_TYPE_FIELD_NUMBER: builtins.int @@ -31,6 +32,7 @@ class Task(google.protobuf.message.Message): def consumer(self) -> flwr.proto.node_pb2.Node: ... created_at: typing.Text delivered_at: typing.Text + pushed_at: builtins.float ttl: builtins.float @property def ancestry(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[typing.Text]: ... @@ -45,6 +47,7 @@ class Task(google.protobuf.message.Message): consumer: typing.Optional[flwr.proto.node_pb2.Node] = ..., created_at: typing.Text = ..., delivered_at: typing.Text = ..., + pushed_at: builtins.float = ..., ttl: builtins.float = ..., ancestry: typing.Optional[typing.Iterable[typing.Text]] = ..., task_type: typing.Text = ..., @@ -52,7 +55,7 @@ class Task(google.protobuf.message.Message): error: typing.Optional[flwr.proto.error_pb2.Error] = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["consumer",b"consumer","error",b"error","producer",b"producer","recordset",b"recordset"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["ancestry",b"ancestry","consumer",b"consumer","created_at",b"created_at","delivered_at",b"delivered_at","error",b"error","producer",b"producer","recordset",b"recordset","task_type",b"task_type","ttl",b"ttl"]) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["ancestry",b"ancestry","consumer",b"consumer","created_at",b"created_at","delivered_at",b"delivered_at","error",b"error","producer",b"producer","pushed_at",b"pushed_at","recordset",b"recordset","task_type",b"task_type","ttl",b"ttl"]) -> None: ... global___Task = Task class TaskIns(google.protobuf.message.Message): diff --git a/src/py/flwr/server/superlink/driver/driver_servicer.py b/src/py/flwr/server/superlink/driver/driver_servicer.py index 59e51ef52d8e..c5e8d055b708 100644 --- a/src/py/flwr/server/superlink/driver/driver_servicer.py +++ b/src/py/flwr/server/superlink/driver/driver_servicer.py @@ -15,6 +15,7 @@ """Driver API servicer.""" +import time from logging import DEBUG, INFO from typing import List, Optional, Set from uuid import UUID @@ -72,6 +73,11 @@ def PushTaskIns( """Push a set of TaskIns.""" log(DEBUG, "DriverServicer.PushTaskIns") + # Set pushed_at (timestamp in seconds) + pushed_at = time.time() + for task_ins in request.task_ins_list: + task_ins.task.pushed_at = pushed_at + # Validate request _raise_if(len(request.task_ins_list) == 0, "`task_ins_list` must not be empty") for task_ins in request.task_ins_list: diff --git a/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py b/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py index 2e696dde78e1..d4e63a8f2d46 100644 --- a/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py +++ b/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py @@ -15,6 +15,7 @@ """Fleet API message handlers.""" +import time from typing import List, Optional from uuid import UUID @@ -87,6 +88,9 @@ def push_task_res(request: PushTaskResRequest, state: State) -> PushTaskResRespo task_res: TaskRes = request.task_res_list[0] # pylint: enable=no-member + # Set pushed_at (timestamp in seconds) + task_res.task.pushed_at = time.time() + # Store TaskRes in State task_id: Optional[UUID] = state.store_task_res(task_res=task_res) diff --git a/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py b/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py index 2c917c3eed27..9e063e33ff81 100644 --- a/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py +++ b/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py @@ -17,6 +17,7 @@ import asyncio import threading +import time from itertools import cycle from json import JSONDecodeError from math import pi @@ -113,6 +114,9 @@ def register_messages_into_state( ) # Convert Message to TaskIns taskins = message_to_taskins(message) + # Normally recorded by the driver servicer + # but since we don't have one in this test, we do this manually + taskins.task.pushed_at = time.time() # Instert in state task_id = state.store_task_ins(taskins) if task_id: diff --git a/src/py/flwr/server/superlink/state/sqlite_state.py b/src/py/flwr/server/superlink/state/sqlite_state.py index 25d138f94203..7d8cd2e8bda2 100644 --- a/src/py/flwr/server/superlink/state/sqlite_state.py +++ b/src/py/flwr/server/superlink/state/sqlite_state.py @@ -54,6 +54,7 @@ consumer_node_id INTEGER, created_at TEXT, delivered_at TEXT, + pushed_at REAL, ttl REAL, ancestry TEXT, task_type TEXT, @@ -74,6 +75,7 @@ consumer_node_id INTEGER, created_at TEXT, delivered_at TEXT, + pushed_at REAL, ttl REAL, ancestry TEXT, task_type TEXT, @@ -318,7 +320,7 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: log(ERROR, errors) return None - # Create task_id and created_at + # Create task_id task_id = uuid4() created_at: datetime = now() @@ -540,6 +542,7 @@ def task_ins_to_dict(task_msg: TaskIns) -> Dict[str, Any]: "consumer_node_id": task_msg.task.consumer.node_id, "created_at": task_msg.task.created_at, "delivered_at": task_msg.task.delivered_at, + "pushed_at": task_msg.task.pushed_at, "ttl": task_msg.task.ttl, "ancestry": ",".join(task_msg.task.ancestry), "task_type": task_msg.task.task_type, @@ -560,6 +563,7 @@ def task_res_to_dict(task_msg: TaskRes) -> Dict[str, Any]: "consumer_node_id": task_msg.task.consumer.node_id, "created_at": task_msg.task.created_at, "delivered_at": task_msg.task.delivered_at, + "pushed_at": task_msg.task.pushed_at, "ttl": task_msg.task.ttl, "ancestry": ",".join(task_msg.task.ancestry), "task_type": task_msg.task.task_type, @@ -588,6 +592,7 @@ def dict_to_task_ins(task_dict: Dict[str, Any]) -> TaskIns: ), created_at=task_dict["created_at"], delivered_at=task_dict["delivered_at"], + pushed_at=task_dict["pushed_at"], ttl=task_dict["ttl"], ancestry=task_dict["ancestry"].split(","), task_type=task_dict["task_type"], @@ -617,6 +622,7 @@ def dict_to_task_res(task_dict: Dict[str, Any]) -> TaskRes: ), created_at=task_dict["created_at"], delivered_at=task_dict["delivered_at"], + pushed_at=task_dict["pushed_at"], ttl=task_dict["ttl"], ancestry=task_dict["ancestry"].split(","), task_type=task_dict["task_type"], diff --git a/src/py/flwr/server/superlink/state/sqlite_state_test.py b/src/py/flwr/server/superlink/state/sqlite_state_test.py index 9eef71e396e3..20927df1cf12 100644 --- a/src/py/flwr/server/superlink/state/sqlite_state_test.py +++ b/src/py/flwr/server/superlink/state/sqlite_state_test.py @@ -38,6 +38,7 @@ def test_ins_res_to_dict(self) -> None: "consumer_node_id", "created_at", "delivered_at", + "pushed_at", "ttl", "ancestry", "task_type", diff --git a/src/py/flwr/server/superlink/state/state_test.py b/src/py/flwr/server/superlink/state/state_test.py index 01ac64de1380..f5e9adab2647 100644 --- a/src/py/flwr/server/superlink/state/state_test.py +++ b/src/py/flwr/server/superlink/state/state_test.py @@ -16,6 +16,7 @@ # pylint: disable=invalid-name, disable=R0904 import tempfile +import time import unittest from abc import abstractmethod from datetime import datetime, timezone @@ -420,6 +421,7 @@ def create_task_ins( ttl=DEFAULT_TTL, ), ) + task.task.pushed_at = time.time() return task @@ -443,6 +445,7 @@ def create_task_res( ttl=DEFAULT_TTL, ), ) + task_res.task.pushed_at = time.time() return task_res diff --git a/src/py/flwr/server/utils/validator.py b/src/py/flwr/server/utils/validator.py index 285807d8d0e7..d8b287b0f674 100644 --- a/src/py/flwr/server/utils/validator.py +++ b/src/py/flwr/server/utils/validator.py @@ -31,13 +31,16 @@ def validate_task_ins_or_res(tasks_ins_res: Union[TaskIns, TaskRes]) -> List[str if not tasks_ins_res.HasField("task"): validation_errors.append("`task` does not set field `task`") - # Created/delivered/TTL + # Created/delivered/TTL/Pushed if tasks_ins_res.task.created_at != "": validation_errors.append("`created_at` must be an empty str") if tasks_ins_res.task.delivered_at != "": validation_errors.append("`delivered_at` must be an empty str") if tasks_ins_res.task.ttl <= 0: validation_errors.append("`ttl` must be higher than zero") + if tasks_ins_res.task.pushed_at < 1711497600.0: + # unix timestamp of 27 March 2024 00h:00m:00s UTC + validation_errors.append("`pushed_at` is not a recent timestamp") # TaskIns specific if isinstance(tasks_ins_res, TaskIns): diff --git a/src/py/flwr/server/utils/validator_test.py b/src/py/flwr/server/utils/validator_test.py index 926103c6b09a..c896af998bea 100644 --- a/src/py/flwr/server/utils/validator_test.py +++ b/src/py/flwr/server/utils/validator_test.py @@ -15,6 +15,7 @@ """Validator tests.""" +import time import unittest from typing import List, Tuple @@ -100,6 +101,8 @@ def create_task_ins( ttl=DEFAULT_TTL, ), ) + + task.task.pushed_at = time.time() return task @@ -122,4 +125,6 @@ def create_task_res( ttl=DEFAULT_TTL, ), ) + + task_res.task.pushed_at = time.time() return task_res From 83266176f1dfb5a1f4e354fc7b75400f1cff4dc6 Mon Sep 17 00:00:00 2001 From: Heng Pan Date: Wed, 27 Mar 2024 20:04:57 +0000 Subject: [PATCH 42/57] Update `InMemoryState` and `SqliteState` for the `Ping` protocol. (#3178) --- .../server/superlink/state/in_memory_state.py | 51 +++++++++++++------ .../server/superlink/state/sqlite_state.py | 36 ++++++++++--- src/py/flwr/server/superlink/state/state.py | 19 +++++++ .../flwr/server/superlink/state/state_test.py | 24 ++++++++- 4 files changed, 107 insertions(+), 23 deletions(-) diff --git a/src/py/flwr/server/superlink/state/in_memory_state.py b/src/py/flwr/server/superlink/state/in_memory_state.py index 7bff8ab4befc..cba4ab98a6d5 100644 --- a/src/py/flwr/server/superlink/state/in_memory_state.py +++ b/src/py/flwr/server/superlink/state/in_memory_state.py @@ -17,9 +17,10 @@ import os import threading +import time from datetime import datetime from logging import ERROR -from typing import Dict, List, Optional, Set +from typing import Dict, List, Optional, Set, Tuple from uuid import UUID, uuid4 from flwr.common import log, now @@ -32,7 +33,8 @@ class InMemoryState(State): """In-memory State implementation.""" def __init__(self) -> None: - self.node_ids: Set[int] = set() + # Map node_id to (online_until, ping_interval) + self.node_ids: Dict[int, Tuple[float, float]] = {} self.run_ids: Set[int] = set() self.task_ins_store: Dict[UUID, TaskIns] = {} self.task_res_store: Dict[UUID, TaskRes] = {} @@ -190,17 +192,21 @@ def create_node(self) -> int: # Sample a random int64 as node_id node_id: int = int.from_bytes(os.urandom(8), "little", signed=True) - if node_id not in self.node_ids: - self.node_ids.add(node_id) - return node_id + with self.lock: + if node_id not in self.node_ids: + # Default ping interval is 30s + # TODO: change 1e9 to 30s # pylint: disable=W0511 + self.node_ids[node_id] = (time.time() + 1e9, 1e9) + return node_id log(ERROR, "Unexpected node registration failure.") return 0 def delete_node(self, node_id: int) -> None: """Delete a client node.""" - if node_id not in self.node_ids: - raise ValueError(f"Node {node_id} not found") - self.node_ids.remove(node_id) + with self.lock: + if node_id not in self.node_ids: + raise ValueError(f"Node {node_id} not found") + del self.node_ids[node_id] def get_nodes(self, run_id: int) -> Set[int]: """Return all available client nodes. @@ -210,17 +216,32 @@ def get_nodes(self, run_id: int) -> Set[int]: If the provided `run_id` does not exist or has no matching nodes, an empty `Set` MUST be returned. """ - if run_id not in self.run_ids: - return set() - return self.node_ids + with self.lock: + if run_id not in self.run_ids: + return set() + current_time = time.time() + return { + node_id + for node_id, (online_until, _) in self.node_ids.items() + if online_until > current_time + } def create_run(self) -> int: """Create one run.""" # Sample a random int64 as run_id - run_id: int = int.from_bytes(os.urandom(8), "little", signed=True) + with self.lock: + run_id: int = int.from_bytes(os.urandom(8), "little", signed=True) - if run_id not in self.run_ids: - self.run_ids.add(run_id) - return run_id + if run_id not in self.run_ids: + self.run_ids.add(run_id) + return run_id log(ERROR, "Unexpected run creation failure.") return 0 + + def acknowledge_ping(self, node_id: int, ping_interval: float) -> bool: + """Acknowledge a ping received from a node, serving as a heartbeat.""" + with self.lock: + if node_id in self.node_ids: + self.node_ids[node_id] = (time.time() + ping_interval, ping_interval) + return True + return False diff --git a/src/py/flwr/server/superlink/state/sqlite_state.py b/src/py/flwr/server/superlink/state/sqlite_state.py index 7d8cd2e8bda2..e1c1215000b9 100644 --- a/src/py/flwr/server/superlink/state/sqlite_state.py +++ b/src/py/flwr/server/superlink/state/sqlite_state.py @@ -18,6 +18,7 @@ import os import re import sqlite3 +import time from datetime import datetime from logging import DEBUG, ERROR from typing import Any, Dict, List, Optional, Set, Tuple, Union, cast @@ -33,10 +34,16 @@ SQL_CREATE_TABLE_NODE = """ CREATE TABLE IF NOT EXISTS node( - node_id INTEGER UNIQUE + node_id INTEGER UNIQUE, + online_until REAL, + ping_interval REAL ); """ +SQL_CREATE_INDEX_ONLINE_UNTIL = """ +CREATE INDEX IF NOT EXISTS idx_online_until ON node (online_until); +""" + SQL_CREATE_TABLE_RUN = """ CREATE TABLE IF NOT EXISTS run( run_id INTEGER UNIQUE @@ -84,7 +91,7 @@ ); """ -DictOrTuple = Union[Tuple[Any], Dict[str, Any]] +DictOrTuple = Union[Tuple[Any, ...], Dict[str, Any]] class SqliteState(State): @@ -125,6 +132,7 @@ def initialize(self, log_queries: bool = False) -> List[Tuple[str]]: cur.execute(SQL_CREATE_TABLE_TASK_INS) cur.execute(SQL_CREATE_TABLE_TASK_RES) cur.execute(SQL_CREATE_TABLE_NODE) + cur.execute(SQL_CREATE_INDEX_ONLINE_UNTIL) res = cur.execute("SELECT name FROM sqlite_schema;") return res.fetchall() @@ -470,9 +478,14 @@ def create_node(self) -> int: # Sample a random int64 as node_id node_id: int = int.from_bytes(os.urandom(8), "little", signed=True) - query = "INSERT INTO node VALUES(:node_id);" + query = ( + "INSERT INTO node (node_id, online_until, ping_interval) VALUES (?, ?, ?)" + ) + try: - self.query(query, {"node_id": node_id}) + # Default ping interval is 30s + # TODO: change 1e9 to 30s # pylint: disable=W0511 + self.query(query, (node_id, time.time() + 1e9, 1e9)) except sqlite3.IntegrityError: log(ERROR, "Unexpected node registration failure.") return 0 @@ -497,8 +510,8 @@ def get_nodes(self, run_id: int) -> Set[int]: return set() # Get nodes - query = "SELECT * FROM node;" - rows = self.query(query) + query = "SELECT node_id FROM node WHERE online_until > ?;" + rows = self.query(query, (time.time(),)) result: Set[int] = {row["node_id"] for row in rows} return result @@ -517,6 +530,17 @@ def create_run(self) -> int: log(ERROR, "Unexpected run creation failure.") return 0 + def acknowledge_ping(self, node_id: int, ping_interval: float) -> bool: + """Acknowledge a ping received from a node, serving as a heartbeat.""" + # Update `online_until` and `ping_interval` for the given `node_id` + query = "UPDATE node SET online_until = ?, ping_interval = ? WHERE node_id = ?;" + try: + self.query(query, (time.time() + ping_interval, ping_interval, node_id)) + return True + except sqlite3.IntegrityError: + log(ERROR, "`node_id` does not exist.") + return False + def dict_factory( cursor: sqlite3.Cursor, diff --git a/src/py/flwr/server/superlink/state/state.py b/src/py/flwr/server/superlink/state/state.py index 9337ae6d8624..313290eb1022 100644 --- a/src/py/flwr/server/superlink/state/state.py +++ b/src/py/flwr/server/superlink/state/state.py @@ -152,3 +152,22 @@ def get_nodes(self, run_id: int) -> Set[int]: @abc.abstractmethod def create_run(self) -> int: """Create one run.""" + + @abc.abstractmethod + def acknowledge_ping(self, node_id: int, ping_interval: float) -> bool: + """Acknowledge a ping received from a node, serving as a heartbeat. + + Parameters + ---------- + node_id : int + The `node_id` from which the ping was received. + ping_interval : float + The interval (in seconds) from the current timestamp within which the next + ping from this node must be received. This acts as a hard deadline to ensure + an accurate assessment of the node's availability. + + Returns + ------- + is_acknowledged : bool + True if the ping is successfully acknowledged; otherwise, False. + """ diff --git a/src/py/flwr/server/superlink/state/state_test.py b/src/py/flwr/server/superlink/state/state_test.py index f5e9adab2647..6ab511d3f847 100644 --- a/src/py/flwr/server/superlink/state/state_test.py +++ b/src/py/flwr/server/superlink/state/state_test.py @@ -21,6 +21,7 @@ from abc import abstractmethod from datetime import datetime, timezone from typing import List +from unittest.mock import patch from uuid import uuid4 from flwr.common import DEFAULT_TTL @@ -396,6 +397,25 @@ def test_num_task_res(self) -> None: # Assert assert num == 2 + def test_acknowledge_ping(self) -> None: + """Test if acknowledge_ping works and if get_nodes return online nodes.""" + # Prepare + state: State = self.state_factory() + run_id = state.create_run() + node_ids = [state.create_node() for _ in range(100)] + for node_id in node_ids[:70]: + state.acknowledge_ping(node_id, ping_interval=30) + for node_id in node_ids[70:]: + state.acknowledge_ping(node_id, ping_interval=90) + + # Execute + current_time = time.time() + with patch("time.time", side_effect=lambda: current_time + 50): + actual_node_ids = state.get_nodes(run_id) + + # Assert + self.assertSetEqual(actual_node_ids, set(node_ids[70:])) + def create_task_ins( consumer_node_id: int, @@ -479,7 +499,7 @@ def test_initialize(self) -> None: result = state.query("SELECT name FROM sqlite_schema;") # Assert - assert len(result) == 8 + assert len(result) == 9 class SqliteFileBasedTest(StateTest, unittest.TestCase): @@ -504,7 +524,7 @@ def test_initialize(self) -> None: result = state.query("SELECT name FROM sqlite_schema;") # Assert - assert len(result) == 8 + assert len(result) == 9 if __name__ == "__main__": From c8da9f34d6ab08313ea1f094f98ec567766ce964 Mon Sep 17 00:00:00 2001 From: Javier Date: Wed, 27 Mar 2024 21:38:59 +0000 Subject: [PATCH 43/57] Add `created_at` to `Metadata` (#3174) --- src/proto/flwr/proto/task.proto | 2 +- .../client/message_handler/message_handler.py | 1 + .../message_handler/message_handler_test.py | 35 +++++++++++++++++-- src/py/flwr/common/message.py | 15 ++++++++ src/py/flwr/common/message_test.py | 8 +++-- src/py/flwr/common/serde.py | 10 ++++-- src/py/flwr/proto/task_pb2.py | 2 +- src/py/flwr/proto/task_pb2.pyi | 4 +-- .../flwr/server/compat/driver_client_proxy.py | 7 ++++ .../server/superlink/state/in_memory_state.py | 9 ++--- .../server/superlink/state/sqlite_state.py | 11 ++---- .../flwr/server/superlink/state/state_test.py | 9 +++-- src/py/flwr/server/utils/validator.py | 9 +++-- src/py/flwr/server/utils/validator_test.py | 2 ++ 14 files changed, 92 insertions(+), 32 deletions(-) diff --git a/src/proto/flwr/proto/task.proto b/src/proto/flwr/proto/task.proto index 25e65e59cedc..cf77d110acab 100644 --- a/src/proto/flwr/proto/task.proto +++ b/src/proto/flwr/proto/task.proto @@ -25,7 +25,7 @@ import "flwr/proto/error.proto"; message Task { Node producer = 1; Node consumer = 2; - string created_at = 3; + double created_at = 3; string delivered_at = 4; double pushed_at = 5; double ttl = 6; diff --git a/src/py/flwr/client/message_handler/message_handler.py b/src/py/flwr/client/message_handler/message_handler.py index 87014f436cf7..e5acbe0cc9d0 100644 --- a/src/py/flwr/client/message_handler/message_handler.py +++ b/src/py/flwr/client/message_handler/message_handler.py @@ -172,6 +172,7 @@ def validate_out_message(out_message: Message, in_message_metadata: Metadata) -> and out_meta.reply_to_message == in_meta.message_id and out_meta.group_id == in_meta.group_id and out_meta.message_type == in_meta.message_type + and out_meta.created_at > in_meta.created_at ): return True return False diff --git a/src/py/flwr/client/message_handler/message_handler_test.py b/src/py/flwr/client/message_handler/message_handler_test.py index e3f6487421cc..2a510b291c49 100644 --- a/src/py/flwr/client/message_handler/message_handler_test.py +++ b/src/py/flwr/client/message_handler/message_handler_test.py @@ -15,6 +15,7 @@ """Client-side message handler tests.""" +import time import unittest import uuid from copy import copy @@ -169,7 +170,18 @@ def test_client_without_get_properties() -> None: ) assert actual_msg.content == expected_msg.content - assert actual_msg.metadata == expected_msg.metadata + # metadata.created_at will differ so let's exclude it from checks + attrs = actual_msg.metadata.__annotations__ + attrs_keys = list(attrs.keys()) + attrs_keys.remove("_created_at") + # metadata.created_at will differ so let's exclude it from checks + for attr in attrs_keys: + assert getattr(actual_msg.metadata, attr) == getattr( + expected_msg.metadata, attr + ) + + # Ensure the message created last has a higher timestamp + assert actual_msg.metadata.created_at < expected_msg.metadata.created_at def test_client_with_get_properties() -> None: @@ -222,7 +234,17 @@ def test_client_with_get_properties() -> None: ) assert actual_msg.content == expected_msg.content - assert actual_msg.metadata == expected_msg.metadata + attrs = actual_msg.metadata.__annotations__ + attrs_keys = list(attrs.keys()) + attrs_keys.remove("_created_at") + # metadata.created_at will differ so let's exclude it from checks + for attr in attrs_keys: + assert getattr(actual_msg.metadata, attr) == getattr( + expected_msg.metadata, attr + ) + + # Ensure the message created last has a higher timestamp + assert actual_msg.metadata.created_at < expected_msg.metadata.created_at class TestMessageValidation(unittest.TestCase): @@ -241,6 +263,11 @@ def setUp(self) -> None: ttl=DEFAULT_TTL, message_type="mock", ) + # We need to set created_at in this way + # since this `self.in_metadata` is used for tests + # without it ever being part of a Message + self.in_metadata.created_at = time.time() + self.valid_out_metadata = Metadata( run_id=123, message_id="", @@ -281,6 +308,10 @@ def test_invalid_message_run_id(self) -> None: value = 999 elif isinstance(value, str): value = "999" + elif isinstance(value, float): + if attr == "_created_at": + # make it be in 1h the past + value = value - 3600 setattr(invalid_metadata, attr, value) # Add to list invalid_metadata_list.append(invalid_metadata) diff --git a/src/py/flwr/common/message.py b/src/py/flwr/common/message.py index 25607179764d..6e0ab9149828 100644 --- a/src/py/flwr/common/message.py +++ b/src/py/flwr/common/message.py @@ -16,6 +16,7 @@ from __future__ import annotations +import time from dataclasses import dataclass from .record import RecordSet @@ -62,6 +63,7 @@ class Metadata: # pylint: disable=too-many-instance-attributes _ttl: float _message_type: str _partition_id: int | None + _created_at: float # Unix timestamp (in seconds) to be set upon message creation def __init__( # pylint: disable=too-many-arguments self, @@ -125,6 +127,16 @@ def group_id(self, value: str) -> None: """Set group_id.""" self._group_id = value + @property + def created_at(self) -> float: + """Unix timestamp when the message was created.""" + return self._created_at + + @created_at.setter + def created_at(self, value: float) -> None: + """Set creation timestamp for this messages.""" + self._created_at = value + @property def ttl(self) -> float: """Time-to-live for this message.""" @@ -214,6 +226,9 @@ def __init__( ) -> None: self._metadata = metadata + # Set message creation timestamp + self._metadata.created_at = time.time() + if not (content is None) ^ (error is None): raise ValueError("Either `content` or `error` must be set, but not both.") diff --git a/src/py/flwr/common/message_test.py b/src/py/flwr/common/message_test.py index ba628bb3235a..cd5a7d72272f 100644 --- a/src/py/flwr/common/message_test.py +++ b/src/py/flwr/common/message_test.py @@ -14,7 +14,7 @@ # ============================================================================== """Message tests.""" - +import time from contextlib import ExitStack from typing import Any, Callable @@ -62,12 +62,16 @@ def test_message_creation( if context: stack.enter_context(context) - _ = Message( + current_time = time.time() + message = Message( metadata=metadata, content=None if content_fn is None else content_fn(maker), error=None if error_fn is None else error_fn(0), ) + assert message.metadata.created_at > current_time + assert message.metadata.created_at < time.time() + def create_message_with_content() -> Message: """Create a Message with content.""" diff --git a/src/py/flwr/common/serde.py b/src/py/flwr/common/serde.py index 6c7a077d2f9f..84932b806aff 100644 --- a/src/py/flwr/common/serde.py +++ b/src/py/flwr/common/serde.py @@ -575,6 +575,7 @@ def message_to_taskins(message: Message) -> TaskIns: task=Task( producer=Node(node_id=0, anonymous=True), # Assume driver node consumer=Node(node_id=md.dst_node_id, anonymous=False), + created_at=md.created_at, ttl=md.ttl, ancestry=[md.reply_to_message] if md.reply_to_message != "" else [], task_type=md.message_type, @@ -601,7 +602,7 @@ def message_from_taskins(taskins: TaskIns) -> Message: ) # Construct Message - return Message( + message = Message( metadata=metadata, content=( recordset_from_proto(taskins.task.recordset) @@ -614,6 +615,8 @@ def message_from_taskins(taskins: TaskIns) -> Message: else None ), ) + message.metadata.created_at = taskins.task.created_at + return message def message_to_taskres(message: Message) -> TaskRes: @@ -626,6 +629,7 @@ def message_to_taskres(message: Message) -> TaskRes: task=Task( producer=Node(node_id=md.src_node_id, anonymous=False), consumer=Node(node_id=0, anonymous=True), # Assume driver node + created_at=md.created_at, ttl=md.ttl, ancestry=[md.reply_to_message] if md.reply_to_message != "" else [], task_type=md.message_type, @@ -652,7 +656,7 @@ def message_from_taskres(taskres: TaskRes) -> Message: ) # Construct the Message - return Message( + message = Message( metadata=metadata, content=( recordset_from_proto(taskres.task.recordset) @@ -665,3 +669,5 @@ def message_from_taskres(taskres: TaskRes) -> Message: else None ), ) + message.metadata.created_at = taskres.task.created_at + return message diff --git a/src/py/flwr/proto/task_pb2.py b/src/py/flwr/proto/task_pb2.py index 3546f01efded..5f6e9e7be583 100644 --- a/src/py/flwr/proto/task_pb2.py +++ b/src/py/flwr/proto/task_pb2.py @@ -18,7 +18,7 @@ from flwr.proto import error_pb2 as flwr_dot_proto_dot_error__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66lwr/proto/task.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x1a\x66lwr/proto/recordset.proto\x1a\x1a\x66lwr/proto/transport.proto\x1a\x16\x66lwr/proto/error.proto\"\x89\x02\n\x04Task\x12\"\n\x08producer\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\"\n\x08\x63onsumer\x18\x02 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x12\n\ncreated_at\x18\x03 \x01(\t\x12\x14\n\x0c\x64\x65livered_at\x18\x04 \x01(\t\x12\x11\n\tpushed_at\x18\x05 \x01(\x01\x12\x0b\n\x03ttl\x18\x06 \x01(\x01\x12\x10\n\x08\x61ncestry\x18\x07 \x03(\t\x12\x11\n\ttask_type\x18\x08 \x01(\t\x12(\n\trecordset\x18\t \x01(\x0b\x32\x15.flwr.proto.RecordSet\x12 \n\x05\x65rror\x18\n \x01(\x0b\x32\x11.flwr.proto.Error\"\\\n\x07TaskIns\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x12\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Task\"\\\n\x07TaskRes\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x12\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Taskb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66lwr/proto/task.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x1a\x66lwr/proto/recordset.proto\x1a\x1a\x66lwr/proto/transport.proto\x1a\x16\x66lwr/proto/error.proto\"\x89\x02\n\x04Task\x12\"\n\x08producer\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\"\n\x08\x63onsumer\x18\x02 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x12\n\ncreated_at\x18\x03 \x01(\x01\x12\x14\n\x0c\x64\x65livered_at\x18\x04 \x01(\t\x12\x11\n\tpushed_at\x18\x05 \x01(\x01\x12\x0b\n\x03ttl\x18\x06 \x01(\x01\x12\x10\n\x08\x61ncestry\x18\x07 \x03(\t\x12\x11\n\ttask_type\x18\x08 \x01(\t\x12(\n\trecordset\x18\t \x01(\x0b\x32\x15.flwr.proto.RecordSet\x12 \n\x05\x65rror\x18\n \x01(\x0b\x32\x11.flwr.proto.Error\"\\\n\x07TaskIns\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x12\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Task\"\\\n\x07TaskRes\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x12\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Taskb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) diff --git a/src/py/flwr/proto/task_pb2.pyi b/src/py/flwr/proto/task_pb2.pyi index 8f0549ceddc9..455791ac9e6e 100644 --- a/src/py/flwr/proto/task_pb2.pyi +++ b/src/py/flwr/proto/task_pb2.pyi @@ -30,7 +30,7 @@ class Task(google.protobuf.message.Message): def producer(self) -> flwr.proto.node_pb2.Node: ... @property def consumer(self) -> flwr.proto.node_pb2.Node: ... - created_at: typing.Text + created_at: builtins.float delivered_at: typing.Text pushed_at: builtins.float ttl: builtins.float @@ -45,7 +45,7 @@ class Task(google.protobuf.message.Message): *, producer: typing.Optional[flwr.proto.node_pb2.Node] = ..., consumer: typing.Optional[flwr.proto.node_pb2.Node] = ..., - created_at: typing.Text = ..., + created_at: builtins.float = ..., delivered_at: typing.Text = ..., pushed_at: builtins.float = ..., ttl: builtins.float = ..., diff --git a/src/py/flwr/server/compat/driver_client_proxy.py b/src/py/flwr/server/compat/driver_client_proxy.py index 99ba50d3e2d1..7fdc07d620f2 100644 --- a/src/py/flwr/server/compat/driver_client_proxy.py +++ b/src/py/flwr/server/compat/driver_client_proxy.py @@ -132,6 +132,13 @@ def _send_receive_recordset( ttl=DEFAULT_TTL, ), ) + + # This would normally be recorded upon common.Message creation + # but this compatibility stack doesn't create Messages, + # so we need to inject `created_at` manually (needed for + # taskins validation by server.utils.validator) + task_ins.task.created_at = time.time() + push_task_ins_req = driver_pb2.PushTaskInsRequest( # pylint: disable=E1101 task_ins_list=[task_ins] ) diff --git a/src/py/flwr/server/superlink/state/in_memory_state.py b/src/py/flwr/server/superlink/state/in_memory_state.py index cba4ab98a6d5..6fc57707ac36 100644 --- a/src/py/flwr/server/superlink/state/in_memory_state.py +++ b/src/py/flwr/server/superlink/state/in_memory_state.py @@ -18,7 +18,6 @@ import os import threading import time -from datetime import datetime from logging import ERROR from typing import Dict, List, Optional, Set, Tuple from uuid import UUID, uuid4 @@ -52,13 +51,11 @@ def store_task_ins(self, task_ins: TaskIns) -> Optional[UUID]: log(ERROR, "`run_id` is invalid") return None - # Create task_id and created_at + # Create task_id task_id = uuid4() - created_at: datetime = now() # Store TaskIns task_ins.task_id = str(task_id) - task_ins.task.created_at = created_at.isoformat() with self.lock: self.task_ins_store[task_id] = task_ins @@ -113,13 +110,11 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: log(ERROR, "`run_id` is invalid") return None - # Create task_id and created_at + # Create task_id task_id = uuid4() - created_at: datetime = now() # Store TaskRes task_res.task_id = str(task_id) - task_res.task.created_at = created_at.isoformat() with self.lock: self.task_res_store[task_id] = task_res diff --git a/src/py/flwr/server/superlink/state/sqlite_state.py b/src/py/flwr/server/superlink/state/sqlite_state.py index e1c1215000b9..6996d51d2a9b 100644 --- a/src/py/flwr/server/superlink/state/sqlite_state.py +++ b/src/py/flwr/server/superlink/state/sqlite_state.py @@ -19,7 +19,6 @@ import re import sqlite3 import time -from datetime import datetime from logging import DEBUG, ERROR from typing import Any, Dict, List, Optional, Set, Tuple, Union, cast from uuid import UUID, uuid4 @@ -59,7 +58,7 @@ producer_node_id INTEGER, consumer_anonymous BOOLEAN, consumer_node_id INTEGER, - created_at TEXT, + created_at REAL, delivered_at TEXT, pushed_at REAL, ttl REAL, @@ -80,7 +79,7 @@ producer_node_id INTEGER, consumer_anonymous BOOLEAN, consumer_node_id INTEGER, - created_at TEXT, + created_at REAL, delivered_at TEXT, pushed_at REAL, ttl REAL, @@ -195,13 +194,11 @@ def store_task_ins(self, task_ins: TaskIns) -> Optional[UUID]: log(ERROR, errors) return None - # Create task_id and created_at + # Create task_id task_id = uuid4() - created_at: datetime = now() # Store TaskIns task_ins.task_id = str(task_id) - task_ins.task.created_at = created_at.isoformat() data = (task_ins_to_dict(task_ins),) columns = ", ".join([f":{key}" for key in data[0]]) query = f"INSERT INTO task_ins VALUES({columns});" @@ -330,11 +327,9 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: # Create task_id task_id = uuid4() - created_at: datetime = now() # Store TaskIns task_res.task_id = str(task_id) - task_res.task.created_at = created_at.isoformat() data = (task_res_to_dict(task_res),) columns = ", ".join([f":{key}" for key in data[0]]) query = f"INSERT INTO task_res VALUES({columns});" diff --git a/src/py/flwr/server/superlink/state/state_test.py b/src/py/flwr/server/superlink/state/state_test.py index 6ab511d3f847..1757cfac4255 100644 --- a/src/py/flwr/server/superlink/state/state_test.py +++ b/src/py/flwr/server/superlink/state/state_test.py @@ -74,7 +74,7 @@ def test_store_task_ins_one(self) -> None: consumer_node_id=consumer_node_id, anonymous=False, run_id=run_id ) - assert task_ins.task.created_at == "" # pylint: disable=no-member + assert task_ins.task.created_at < time.time() # pylint: disable=no-member assert task_ins.task.delivered_at == "" # pylint: disable=no-member # Execute @@ -91,12 +91,9 @@ def test_store_task_ins_one(self) -> None: actual_task = actual_task_ins.task - assert actual_task.created_at != "" assert actual_task.delivered_at != "" - assert datetime.fromisoformat(actual_task.created_at) > datetime( - 2020, 1, 1, tzinfo=timezone.utc - ) + assert actual_task.created_at < actual_task.pushed_at assert datetime.fromisoformat(actual_task.delivered_at) > datetime( 2020, 1, 1, tzinfo=timezone.utc ) @@ -439,6 +436,7 @@ def create_task_ins( task_type="mock", recordset=RecordSet(parameters={}, metrics={}, configs={}), ttl=DEFAULT_TTL, + created_at=time.time(), ), ) task.task.pushed_at = time.time() @@ -463,6 +461,7 @@ def create_task_res( task_type="mock", recordset=RecordSet(parameters={}, metrics={}, configs={}), ttl=DEFAULT_TTL, + created_at=time.time(), ), ) task_res.task.pushed_at = time.time() diff --git a/src/py/flwr/server/utils/validator.py b/src/py/flwr/server/utils/validator.py index d8b287b0f674..c0b0ec85761c 100644 --- a/src/py/flwr/server/utils/validator.py +++ b/src/py/flwr/server/utils/validator.py @@ -32,8 +32,13 @@ def validate_task_ins_or_res(tasks_ins_res: Union[TaskIns, TaskRes]) -> List[str validation_errors.append("`task` does not set field `task`") # Created/delivered/TTL/Pushed - if tasks_ins_res.task.created_at != "": - validation_errors.append("`created_at` must be an empty str") + if ( + tasks_ins_res.task.created_at < 1711497600.0 + ): # unix timestamp of 27 March 2024 00h:00m:00s UTC + validation_errors.append( + "`created_at` must be a float that records the unix timestamp " + "in seconds when the message was created." + ) if tasks_ins_res.task.delivered_at != "": validation_errors.append("`delivered_at` must be an empty str") if tasks_ins_res.task.ttl <= 0: diff --git a/src/py/flwr/server/utils/validator_test.py b/src/py/flwr/server/utils/validator_test.py index c896af998bea..61fe094c23d4 100644 --- a/src/py/flwr/server/utils/validator_test.py +++ b/src/py/flwr/server/utils/validator_test.py @@ -99,6 +99,7 @@ def create_task_ins( task_type="mock", recordset=RecordSet(parameters={}, metrics={}, configs={}), ttl=DEFAULT_TTL, + created_at=time.time(), ), ) @@ -123,6 +124,7 @@ def create_task_res( task_type="mock", recordset=RecordSet(parameters={}, metrics={}, configs={}), ttl=DEFAULT_TTL, + created_at=time.time(), ), ) From 3f282d47d3f4f83438bf82a37616962cfce7df89 Mon Sep 17 00:00:00 2001 From: Javier Date: Thu, 28 Mar 2024 13:23:44 +0000 Subject: [PATCH 44/57] Update default `TTL` logic in message replies (#3180) --- .../message_handler/message_handler_test.py | 4 +- src/py/flwr/common/message.py | 52 ++++++++++++++----- src/py/flwr/common/message_test.py | 52 +++++++++++++++++-- 3 files changed, 89 insertions(+), 19 deletions(-) diff --git a/src/py/flwr/client/message_handler/message_handler_test.py b/src/py/flwr/client/message_handler/message_handler_test.py index 2a510b291c49..5244951c8a48 100644 --- a/src/py/flwr/client/message_handler/message_handler_test.py +++ b/src/py/flwr/client/message_handler/message_handler_test.py @@ -163,7 +163,7 @@ def test_client_without_get_properties() -> None: src_node_id=1123, dst_node_id=0, reply_to_message=message.metadata.message_id, - ttl=DEFAULT_TTL, + ttl=actual_msg.metadata.ttl, # computed based on [message].create_reply() message_type=MessageTypeLegacy.GET_PROPERTIES, ), content=expected_rs, @@ -227,7 +227,7 @@ def test_client_with_get_properties() -> None: src_node_id=1123, dst_node_id=0, reply_to_message=message.metadata.message_id, - ttl=DEFAULT_TTL, + ttl=actual_msg.metadata.ttl, # computed based on [message].create_reply() message_type=MessageTypeLegacy.GET_PROPERTIES, ), content=expected_rs, diff --git a/src/py/flwr/common/message.py b/src/py/flwr/common/message.py index 6e0ab9149828..7707f3c72de1 100644 --- a/src/py/flwr/common/message.py +++ b/src/py/flwr/common/message.py @@ -297,22 +297,33 @@ def _create_reply_metadata(self, ttl: float) -> Metadata: partition_id=self.metadata.partition_id, ) - def create_error_reply( - self, - error: Error, - ttl: float, - ) -> Message: + def create_error_reply(self, error: Error, ttl: float | None = None) -> Message: """Construct a reply message indicating an error happened. Parameters ---------- error : Error The error that was encountered. - ttl : float - Time-to-live for this message in seconds. + ttl : Optional[float] (default: None) + Time-to-live for this message in seconds. If unset, it will be set based + on the remaining time for the received message before it expires. This + follows the equation: + + ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at) """ + # If no TTL passed, use default for message creation (will update after + # message creation) + ttl_ = DEFAULT_TTL if ttl is None else ttl # Create reply with error - message = Message(metadata=self._create_reply_metadata(ttl), error=error) + message = Message(metadata=self._create_reply_metadata(ttl_), error=error) + + if ttl is None: + # Set TTL equal to the remaining time for the received message to expire + ttl = self.metadata.ttl - ( + message.metadata.created_at - self.metadata.created_at + ) + message.metadata.ttl = ttl + return message def create_reply(self, content: RecordSet, ttl: float | None = None) -> Message: @@ -327,18 +338,31 @@ def create_reply(self, content: RecordSet, ttl: float | None = None) -> Message: content : RecordSet The content for the reply message. ttl : Optional[float] (default: None) - Time-to-live for this message in seconds. If unset, it will use - the `common.DEFAULT_TTL` value. + Time-to-live for this message in seconds. If unset, it will be set based + on the remaining time for the received message before it expires. This + follows the equation: + + ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at) Returns ------- Message A new `Message` instance representing the reply. """ - if ttl is None: - ttl = DEFAULT_TTL + # If no TTL passed, use default for message creation (will update after + # message creation) + ttl_ = DEFAULT_TTL if ttl is None else ttl - return Message( - metadata=self._create_reply_metadata(ttl), + message = Message( + metadata=self._create_reply_metadata(ttl_), content=content, ) + + if ttl is None: + # Set TTL equal to the remaining time for the received message to expire + ttl = self.metadata.ttl - ( + message.metadata.created_at - self.metadata.created_at + ) + message.metadata.ttl = ttl + + return message diff --git a/src/py/flwr/common/message_test.py b/src/py/flwr/common/message_test.py index cd5a7d72272f..1a5da0517352 100644 --- a/src/py/flwr/common/message_test.py +++ b/src/py/flwr/common/message_test.py @@ -16,7 +16,7 @@ import time from contextlib import ExitStack -from typing import Any, Callable +from typing import Any, Callable, Optional import pytest @@ -73,17 +73,21 @@ def test_message_creation( assert message.metadata.created_at < time.time() -def create_message_with_content() -> Message: +def create_message_with_content(ttl: Optional[float] = None) -> Message: """Create a Message with content.""" maker = RecordMaker(state=2) metadata = maker.metadata() + if ttl: + metadata.ttl = ttl return Message(metadata=metadata, content=RecordSet()) -def create_message_with_error() -> Message: +def create_message_with_error(ttl: Optional[float] = None) -> Message: """Create a Message with error.""" maker = RecordMaker(state=2) metadata = maker.metadata() + if ttl: + metadata.ttl = ttl return Message(metadata=metadata, error=Error(code=1)) @@ -111,3 +115,45 @@ def test_altering_message( message.error = Error(code=123) if message.has_error(): message.content = RecordSet() + + +@pytest.mark.parametrize( + "message_creation_fn,ttl,reply_ttl", + [ + (create_message_with_content, 1e6, None), + (create_message_with_error, 1e6, None), + (create_message_with_content, 1e6, 3600), + (create_message_with_error, 1e6, 3600), + ], +) +def test_create_reply( + message_creation_fn: Callable[ + [float], + Message, + ], + ttl: float, + reply_ttl: Optional[float], +) -> None: + """Test reply creation from message.""" + message: Message = message_creation_fn(ttl) + + time.sleep(0.1) + + if message.has_error(): + dummy_error = Error(code=0, reason="it crashed") + reply_message = message.create_error_reply(dummy_error, ttl=reply_ttl) + else: + reply_message = message.create_reply(content=RecordSet(), ttl=reply_ttl) + + # Ensure reply has a higher timestamp + assert message.metadata.created_at < reply_message.metadata.created_at + if reply_ttl: + # Ensure the TTL is the one specify upon reply creation + assert reply_message.metadata.ttl == reply_ttl + else: + # Ensure reply ttl is lower (since it uses remaining time left) + assert message.metadata.ttl > reply_message.metadata.ttl + + assert message.metadata.src_node_id == reply_message.metadata.dst_node_id + assert message.metadata.dst_node_id == reply_message.metadata.src_node_id + assert reply_message.metadata.reply_to_message == message.metadata.message_id From 540adefbe770de52ee8429a0586b1cb7caa3d561 Mon Sep 17 00:00:00 2001 From: "Daniel J. Beutel" Date: Thu, 28 Mar 2024 14:35:16 +0100 Subject: [PATCH 45/57] Handle ClientApp exception (#2846) Co-authored-by: Daniel J. Beutel Co-authored-by: jafermarq --- examples/app-pytorch/server_custom.py | 33 ++++--- src/py/flwr/client/app.py | 59 +++++++------ .../flwr/server/compat/driver_client_proxy.py | 16 ++++ .../server/compat/driver_client_proxy_test.py | 85 ++++++++++++++++++- 4 files changed, 154 insertions(+), 39 deletions(-) diff --git a/examples/app-pytorch/server_custom.py b/examples/app-pytorch/server_custom.py index ba9cdb11d694..67c1bce99c55 100644 --- a/examples/app-pytorch/server_custom.py +++ b/examples/app-pytorch/server_custom.py @@ -103,15 +103,19 @@ def main(driver: Driver, context: Context) -> None: all_replies: List[Message] = [] while True: replies = driver.pull_messages(message_ids=message_ids) - print(f"Got {len(replies)} results") + for res in replies: + print(f"Got 1 {'result' if res.has_content() else 'error'}") all_replies += replies if len(all_replies) == len(message_ids): break + print("Pulling messages...") time.sleep(3) - # Collect correct results + # Filter correct results all_fitres = [ - recordset_to_fitres(msg.content, keep_input=True) for msg in all_replies + recordset_to_fitres(msg.content, keep_input=True) + for msg in all_replies + if msg.has_content() ] print(f"Received {len(all_fitres)} results") @@ -128,16 +132,21 @@ def main(driver: Driver, context: Context) -> None: ) metrics_results.append((fitres.num_examples, fitres.metrics)) - # Aggregate parameters (FedAvg) - parameters_aggregated = ndarrays_to_parameters(aggregate(weights_results)) - parameters = parameters_aggregated + if len(weights_results) > 0: + # Aggregate parameters (FedAvg) + parameters_aggregated = ndarrays_to_parameters(aggregate(weights_results)) + parameters = parameters_aggregated - # Aggregate metrics - metrics_aggregated = weighted_average(metrics_results) - history.add_metrics_distributed_fit( - server_round=server_round, metrics=metrics_aggregated - ) - print("Round ", server_round, " metrics: ", metrics_aggregated) + # Aggregate metrics + metrics_aggregated = weighted_average(metrics_results) + history.add_metrics_distributed_fit( + server_round=server_round, metrics=metrics_aggregated + ) + print("Round ", server_round, " metrics: ", metrics_aggregated) + else: + print( + f"Round {server_round} got {len(weights_results)} results. Skipping aggregation..." + ) # Slow down the start of the next round time.sleep(sleep_time) diff --git a/src/py/flwr/client/app.py b/src/py/flwr/client/app.py index c8287afc0fd0..d4bd8e2e39e9 100644 --- a/src/py/flwr/client/app.py +++ b/src/py/flwr/client/app.py @@ -14,11 +14,10 @@ # ============================================================================== """Flower client app.""" - import argparse import sys import time -from logging import DEBUG, INFO, WARN +from logging import DEBUG, ERROR, INFO, WARN from pathlib import Path from typing import Callable, ContextManager, Optional, Tuple, Type, Union @@ -38,6 +37,7 @@ ) from flwr.common.exit_handlers import register_exit_handlers from flwr.common.logger import log, warn_deprecated_feature, warn_experimental_feature +from flwr.common.message import Error from flwr.common.object_ref import load_app, validate from flwr.common.retry_invoker import RetryInvoker, exponential @@ -482,32 +482,43 @@ def _load_client_app() -> ClientApp: # Retrieve context for this run context = node_state.retrieve_context(run_id=message.metadata.run_id) - # Load ClientApp instance - client_app: ClientApp = load_client_app_fn() + # Create an error reply message that will never be used to prevent + # the used-before-assignment linting error + reply_message = message.create_error_reply( + error=Error(code=0, reason="Unknown") + ) - # Handle task message - out_message = client_app(message=message, context=context) + # Handle app loading and task message + try: + # Load ClientApp instance + client_app: ClientApp = load_client_app_fn() - # Update node state - node_state.update_context( - run_id=message.metadata.run_id, - context=context, - ) + reply_message = client_app(message=message, context=context) + # Update node state + node_state.update_context( + run_id=message.metadata.run_id, + context=context, + ) + except Exception as ex: # pylint: disable=broad-exception-caught + log(ERROR, "ClientApp raised an exception", exc_info=ex) + + # Legacy grpc-bidi + if transport in ["grpc-bidi", None]: + # Raise exception, crash process + raise ex + + # Don't update/change NodeState + + # Create error message + # Reason example: ":<'division by zero'>" + reason = str(type(ex)) + ":<'" + str(ex) + "'>" + reply_message = message.create_error_reply( + error=Error(code=0, reason=reason) + ) # Send - send(out_message) - log( - INFO, - "[RUN %s, ROUND %s]", - out_message.metadata.run_id, - out_message.metadata.group_id, - ) - log( - INFO, - "Sent: %s reply to message %s", - out_message.metadata.message_type, - message.metadata.message_id, - ) + send(reply_message) + log(INFO, "Sent reply") # Unregister node if delete_node is not None: diff --git a/src/py/flwr/server/compat/driver_client_proxy.py b/src/py/flwr/server/compat/driver_client_proxy.py index 7fdc07d620f2..58341c7bb8f3 100644 --- a/src/py/flwr/server/compat/driver_client_proxy.py +++ b/src/py/flwr/server/compat/driver_client_proxy.py @@ -170,8 +170,24 @@ def _send_receive_recordset( ) if len(task_res_list) == 1: task_res = task_res_list[0] + + # This will raise an Exception if task_res carries an `error` + validate_task_res(task_res=task_res) + return serde.recordset_from_proto(task_res.task.recordset) if timeout is not None and time.time() > start_time + timeout: raise RuntimeError("Timeout reached") time.sleep(SLEEP_TIME) + + +def validate_task_res( + task_res: task_pb2.TaskRes, # pylint: disable=E1101 +) -> None: + """Validate if a TaskRes is empty or not.""" + if not task_res.HasField("task"): + raise ValueError("Invalid TaskRes, field `task` missing") + if task_res.task.HasField("error"): + raise ValueError("Exception during client-side task execution") + if not task_res.task.HasField("recordset"): + raise ValueError("Invalid TaskRes, both `recordset` and `error` are missing") diff --git a/src/py/flwr/server/compat/driver_client_proxy_test.py b/src/py/flwr/server/compat/driver_client_proxy_test.py index 3494049c1064..57b35fc61a3e 100644 --- a/src/py/flwr/server/compat/driver_client_proxy_test.py +++ b/src/py/flwr/server/compat/driver_client_proxy_test.py @@ -38,9 +38,14 @@ Properties, Status, ) -from flwr.proto import driver_pb2, node_pb2, task_pb2 # pylint: disable=E0611 - -from .driver_client_proxy import DriverClientProxy +from flwr.proto import ( # pylint: disable=E0611 + driver_pb2, + error_pb2, + node_pb2, + recordset_pb2, + task_pb2, +) +from flwr.server.compat.driver_client_proxy import DriverClientProxy, validate_task_res MESSAGE_PARAMETERS = Parameters(tensors=[b"abc"], tensor_type="np") @@ -243,3 +248,77 @@ def test_evaluate(self) -> None: # Assert assert 0.0 == evaluate_res.loss assert 0 == evaluate_res.num_examples + + def test_validate_task_res_valid(self) -> None: + """Test valid TaskRes.""" + metrics_record = recordset_pb2.MetricsRecord( # pylint: disable=E1101 + data={ + "loss": recordset_pb2.MetricsRecordValue( # pylint: disable=E1101 + double=1.0 + ) + } + ) + task_res = task_pb2.TaskRes( # pylint: disable=E1101 + task_id="554bd3c8-8474-4b93-a7db-c7bec1bf0012", + group_id="", + run_id=0, + task=task_pb2.Task( # pylint: disable=E1101 + recordset=recordset_pb2.RecordSet( # pylint: disable=E1101 + parameters={}, + metrics={"loss": metrics_record}, + configs={}, + ) + ), + ) + + # Execute & assert + try: + validate_task_res(task_res=task_res) + except ValueError: + self.fail() + + def test_validate_task_res_missing_task(self) -> None: + """Test invalid TaskRes (missing task).""" + # Prepare + task_res = task_pb2.TaskRes( # pylint: disable=E1101 + task_id="554bd3c8-8474-4b93-a7db-c7bec1bf0012", + group_id="", + run_id=0, + ) + + # Execute & assert + with self.assertRaises(ValueError): + validate_task_res(task_res=task_res) + + def test_validate_task_res_missing_recordset(self) -> None: + """Test invalid TaskRes (missing recordset).""" + # Prepare + task_res = task_pb2.TaskRes( # pylint: disable=E1101 + task_id="554bd3c8-8474-4b93-a7db-c7bec1bf0012", + group_id="", + run_id=0, + task=task_pb2.Task(), # pylint: disable=E1101 + ) + + # Execute & assert + with self.assertRaises(ValueError): + validate_task_res(task_res=task_res) + + def test_validate_task_res_missing_content(self) -> None: + """Test invalid TaskRes (missing content).""" + # Prepare + task_res = task_pb2.TaskRes( # pylint: disable=E1101 + task_id="554bd3c8-8474-4b93-a7db-c7bec1bf0012", + group_id="", + run_id=0, + task=task_pb2.Task( # pylint: disable=E1101 + error=error_pb2.Error( # pylint: disable=E1101 + code=0, + reason="Some reason", + ) + ), + ) + + # Execute & assert + with self.assertRaises(ValueError): + validate_task_res(task_res=task_res) From 67ca7ab7e29863343d7984b441d7b44f7acc0e70 Mon Sep 17 00:00:00 2001 From: tabdar-khan <71217662+tabdar-khan@users.noreply.github.com> Date: Thu, 28 Mar 2024 17:01:26 +0100 Subject: [PATCH 46/57] Add a pre-commit hook (#3150) --- .pre-commit-config.yaml | 18 +++++++++++++ ...-tutorial-get-started-as-a-contributor.rst | 27 +++++++++++++++++++ pyproject.toml | 1 + 3 files changed, 46 insertions(+) create mode 100644 .pre-commit-config.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 000000000000..ad6cb69f3052 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,18 @@ +repos: + - repo: local + hooks: + - id: format-code + name: Format Code + entry: ./dev/format.sh + language: script + # Ensures the script runs from the repository root: + pass_filenames: false + stages: [commit] + + - id: run-tests + name: Run Tests + entry: ./dev/test.sh + language: script + # Ensures the script runs from the repository root: + pass_filenames: false + stages: [commit] diff --git a/doc/source/contributor-tutorial-get-started-as-a-contributor.rst b/doc/source/contributor-tutorial-get-started-as-a-contributor.rst index 9136fea96bf6..43f9739987ac 100644 --- a/doc/source/contributor-tutorial-get-started-as-a-contributor.rst +++ b/doc/source/contributor-tutorial-get-started-as-a-contributor.rst @@ -102,6 +102,33 @@ Run Linters and Tests $ ./dev/test.sh +Add a pre-commit hook +~~~~~~~~~~~~~~~~~~~~~ + +Developers may integrate a pre-commit hook into their workflow utilizing the `pre-commit `_ library. The pre-commit hook is configured to execute two primary operations: ``./dev/format.sh`` and ``./dev/test.sh`` scripts. + +There are multiple ways developers can use this: + +1. Install the pre-commit hook to your local git directory by simply running: + + :: + + $ pre-commit install + + - Each ``git commit`` will trigger the execution of formatting and linting/test scripts. + - If in a hurry, bypass the hook using ``--no-verify`` with the ``git commit`` command. + :: + + $ git commit --no-verify -m "Add new feature" + +2. For developers who prefer not to install the hook permanently, it is possible to execute a one-time check prior to committing changes by using the following command: + + :: + + $ pre-commit run --all-files + + This executes the formatting and linting checks/tests on all the files without modifying the default behavior of ``git commit``. + Run Github Actions (CI) locally ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/pyproject.toml b/pyproject.toml index dc8b293bc880..3c211e9cf8d8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -127,6 +127,7 @@ check-wheel-contents = "==0.4.0" GitPython = "==3.1.32" PyGithub = "==2.1.1" licensecheck = "==2024" +pre-commit = "==3.5.0" [tool.isort] line_length = 88 From 531e0e31991aede82389b6342986836b568989f7 Mon Sep 17 00:00:00 2001 From: Javier Date: Thu, 28 Mar 2024 18:05:12 +0000 Subject: [PATCH 47/57] Handle `ClientApp` exception simulation (#3075) --- .../superlink/fleet/vce/backend/raybackend.py | 9 +-- .../server/superlink/fleet/vce/vce_api.py | 66 ++++++++++++------- .../superlink/fleet/vce/vce_api_test.py | 31 +-------- .../simulation/ray_transport/ray_actor.py | 8 ++- 4 files changed, 55 insertions(+), 59 deletions(-) diff --git a/src/py/flwr/server/superlink/fleet/vce/backend/raybackend.py b/src/py/flwr/server/superlink/fleet/vce/backend/raybackend.py index 8ef0d54622ae..9bede09edf09 100644 --- a/src/py/flwr/server/superlink/fleet/vce/backend/raybackend.py +++ b/src/py/flwr/server/superlink/fleet/vce/backend/raybackend.py @@ -20,7 +20,7 @@ import ray -from flwr.client.client_app import ClientApp, LoadClientAppError +from flwr.client.client_app import ClientApp from flwr.common.context import Context from flwr.common.logger import log from flwr.common.message import Message @@ -151,7 +151,6 @@ async def process_message( ) await future - # Fetch result ( out_mssg, @@ -160,13 +159,15 @@ async def process_message( return out_mssg, updated_context - except LoadClientAppError as load_ex: + except Exception as ex: log( ERROR, "An exception was raised when processing a message by %s", self.__class__.__name__, ) - raise load_ex + # add actor back into pool + await self.pool.add_actor_back_to_pool(future) + raise ex async def terminate(self) -> None: """Terminate all actors in actor pool.""" diff --git a/src/py/flwr/server/superlink/fleet/vce/vce_api.py b/src/py/flwr/server/superlink/fleet/vce/vce_api.py index a693c968d0e8..9736ae0fb57f 100644 --- a/src/py/flwr/server/superlink/fleet/vce/vce_api.py +++ b/src/py/flwr/server/superlink/fleet/vce/vce_api.py @@ -14,9 +14,10 @@ # ============================================================================== """Fleet Simulation Engine API.""" - import asyncio import json +import sys +import time import traceback from logging import DEBUG, ERROR, INFO, WARN from typing import Callable, Dict, List, Optional @@ -24,6 +25,7 @@ from flwr.client.client_app import ClientApp, LoadClientAppError from flwr.client.node_state import NodeState from flwr.common.logger import log +from flwr.common.message import Error from flwr.common.object_ref import load_app from flwr.common.serde import message_from_taskins, message_to_taskres from flwr.proto.task_pb2 import TaskIns # pylint: disable=E0611 @@ -59,6 +61,7 @@ async def worker( """Get TaskIns from queue and pass it to an actor in the pool to execute it.""" state = state_factory.state() while True: + out_mssg = None try: task_ins: TaskIns = await queue.get() node_id = task_ins.task.consumer.node_id @@ -82,24 +85,25 @@ async def worker( task_ins.run_id, context=updated_context ) - # Convert to TaskRes - task_res = message_to_taskres(out_mssg) - # Store TaskRes in state - state.store_task_res(task_res) - except asyncio.CancelledError as e: - log(DEBUG, "Async worker: %s", e) + log(DEBUG, "Terminating async worker: %s", e) break - except LoadClientAppError as app_ex: - log(ERROR, "Async worker: %s", app_ex) - log(ERROR, traceback.format_exc()) - raise - + # Exceptions aren't raised but reported as an error message except Exception as ex: # pylint: disable=broad-exception-caught log(ERROR, ex) log(ERROR, traceback.format_exc()) - break + reason = str(type(ex)) + ":<'" + str(ex) + "'>" + error = Error(code=0, reason=reason) + out_mssg = message.create_error_reply(error=error) + + finally: + if out_mssg: + # Convert to TaskRes + task_res = message_to_taskres(out_mssg) + # Store TaskRes in state + task_res.task.pushed_at = time.time() + state.store_task_res(task_res) async def add_taskins_to_queue( @@ -218,7 +222,7 @@ async def run( await backend.terminate() -# pylint: disable=too-many-arguments,unused-argument,too-many-locals +# pylint: disable=too-many-arguments,unused-argument,too-many-locals,too-many-branches def start_vce( backend_name: str, backend_config_json_stream: str, @@ -300,12 +304,14 @@ def backend_fn() -> Backend: """Instantiate a Backend.""" return backend_type(backend_config, work_dir=app_dir) - log(INFO, "client_app_attr = %s", client_app_attr) - # Load ClientApp if needed def _load() -> ClientApp: if client_app_attr: + + if app_dir is not None: + sys.path.insert(0, app_dir) + app: ClientApp = load_app(client_app_attr, LoadClientAppError) if not isinstance(app, ClientApp): @@ -319,13 +325,23 @@ def _load() -> ClientApp: app_fn = _load - asyncio.run( - run( - app_fn, - backend_fn, - nodes_mapping, - state_factory, - node_states, - f_stop, + try: + # Test if ClientApp can be loaded + _ = app_fn() + + # Run main simulation loop + asyncio.run( + run( + app_fn, + backend_fn, + nodes_mapping, + state_factory, + node_states, + f_stop, + ) ) - ) + except LoadClientAppError as loadapp_ex: + f_stop.set() # set termination event + raise loadapp_ex + except Exception as ex: + raise ex diff --git a/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py b/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py index 9e063e33ff81..66c3c21326d5 100644 --- a/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py +++ b/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py @@ -27,6 +27,7 @@ from unittest import IsolatedAsyncioTestCase from uuid import UUID +from flwr.client.client_app import LoadClientAppError from flwr.common import ( DEFAULT_TTL, GetPropertiesIns, @@ -53,7 +54,6 @@ def terminate_simulation(f_stop: asyncio.Event, sleep_duration: int) -> None: def init_state_factory_nodes_mapping( num_nodes: int, num_messages: int, - erroneous_message: Optional[bool] = False, ) -> Tuple[StateFactory, NodeToPartitionMapping, Dict[UUID, float]]: """Instatiate StateFactory, register nodes and pre-insert messages in the state.""" # Register a state and a run_id in it @@ -68,7 +68,6 @@ def init_state_factory_nodes_mapping( nodes_mapping=nodes_mapping, run_id=run_id, num_messages=num_messages, - erroneous_message=erroneous_message, ) return state_factory, nodes_mapping, expected_results @@ -79,7 +78,6 @@ def register_messages_into_state( nodes_mapping: NodeToPartitionMapping, run_id: int, num_messages: int, - erroneous_message: Optional[bool] = False, ) -> Dict[UUID, float]: """Register `num_messages` into the state factory.""" state: InMemoryState = state_factory.state() # type: ignore @@ -105,11 +103,7 @@ def register_messages_into_state( dst_node_id=dst_node_id, # indicate destination node reply_to_message="", ttl=DEFAULT_TTL, - message_type=( - "a bad message" - if erroneous_message - else MessageTypeLegacy.GET_PROPERTIES - ), + message_type=MessageTypeLegacy.GET_PROPERTIES, ), ) # Convert Message to TaskIns @@ -200,32 +194,13 @@ def test_erroneous_client_app_attr(self) -> None: state_factory, nodes_mapping, _ = init_state_factory_nodes_mapping( num_nodes=num_nodes, num_messages=num_messages ) - with self.assertRaises(RuntimeError): + with self.assertRaises(LoadClientAppError): start_and_shutdown( client_app_attr="totally_fictitious_app:client", state_factory=state_factory, nodes_mapping=nodes_mapping, ) - def test_erroneous_messages(self) -> None: - """Test handling of error in async worker (consumer). - - We register messages which will trigger an error when handling, triggering an - error. - """ - num_messages = 100 - num_nodes = 59 - - state_factory, nodes_mapping, _ = init_state_factory_nodes_mapping( - num_nodes=num_nodes, num_messages=num_messages, erroneous_message=True - ) - - with self.assertRaises(RuntimeError): - start_and_shutdown( - state_factory=state_factory, - nodes_mapping=nodes_mapping, - ) - def test_erroneous_backend_config(self) -> None: """Backend Config should be a JSON stream.""" with self.assertRaises(JSONDecodeError): diff --git a/src/py/flwr/simulation/ray_transport/ray_actor.py b/src/py/flwr/simulation/ray_transport/ray_actor.py index 08d0576e39f0..9773203628ab 100644 --- a/src/py/flwr/simulation/ray_transport/ray_actor.py +++ b/src/py/flwr/simulation/ray_transport/ray_actor.py @@ -493,13 +493,17 @@ async def submit( self._future_to_actor[future] = actor return future + async def add_actor_back_to_pool(self, future: Any) -> None: + """Ad actor assigned to run future back into the pool.""" + actor = self._future_to_actor.pop(future) + await self.pool.put(actor) + async def fetch_result_and_return_actor_to_pool( self, future: Any ) -> Tuple[Message, Context]: """Pull result given a future and add actor back to pool.""" # Get actor that ran job - actor = self._future_to_actor.pop(future) - await self.pool.put(actor) + await self.add_actor_back_to_pool(future) # Retrieve result for object store # Instead of doing ray.get(future) we await it _, out_mssg, updated_context = await future From 7ab8df048bc5df1ff6229b8e9611889b8dd851ab Mon Sep 17 00:00:00 2001 From: Heng Pan Date: Mon, 1 Apr 2024 13:34:04 +0100 Subject: [PATCH 48/57] Support custom wait function in `RetryInvoker` (#3183) --- src/py/flwr/client/app.py | 2 +- .../client/grpc_client/connection_test.py | 2 +- src/py/flwr/common/retry_invoker.py | 37 ++++++++++++------- src/py/flwr/common/retry_invoker_test.py | 4 +- 4 files changed, 28 insertions(+), 17 deletions(-) diff --git a/src/py/flwr/client/app.py b/src/py/flwr/client/app.py index d4bd8e2e39e9..644d37060d53 100644 --- a/src/py/flwr/client/app.py +++ b/src/py/flwr/client/app.py @@ -397,7 +397,7 @@ def _load_client_app() -> ClientApp: ) retry_invoker = RetryInvoker( - wait_factory=exponential, + wait_gen_factory=exponential, recoverable_exceptions=connection_error_type, max_tries=max_retries, max_time=max_wait_time, diff --git a/src/py/flwr/client/grpc_client/connection_test.py b/src/py/flwr/client/grpc_client/connection_test.py index 061e7d4377a0..ed622f55ff1e 100644 --- a/src/py/flwr/client/grpc_client/connection_test.py +++ b/src/py/flwr/client/grpc_client/connection_test.py @@ -132,7 +132,7 @@ def run_client() -> int: server_address=f"[::]:{port}", insecure=True, retry_invoker=RetryInvoker( - wait_factory=exponential, + wait_gen_factory=exponential, recoverable_exceptions=grpc.RpcError, max_tries=1, max_time=None, diff --git a/src/py/flwr/common/retry_invoker.py b/src/py/flwr/common/retry_invoker.py index 5441e766983a..7cec319e7906 100644 --- a/src/py/flwr/common/retry_invoker.py +++ b/src/py/flwr/common/retry_invoker.py @@ -107,7 +107,7 @@ class RetryInvoker: Parameters ---------- - wait_factory: Callable[[], Generator[float, None, None]] + wait_gen_factory: Callable[[], Generator[float, None, None]] A generator yielding successive wait times in seconds. If the generator is finite, the giveup event will be triggered when the generator raises `StopIteration`. @@ -129,12 +129,12 @@ class RetryInvoker: data class object detailing the invocation. on_giveup: Optional[Callable[[RetryState], None]] (default: None) A callable to be executed in the event that `max_tries` or `max_time` is - exceeded, `should_giveup` returns True, or `wait_factory()` generator raises + exceeded, `should_giveup` returns True, or `wait_gen_factory()` generator raises `StopInteration`. The parameter is a data class object detailing the invocation. jitter: Optional[Callable[[float], float]] (default: full_jitter) - A function of the value yielded by `wait_factory()` returning the actual time - to wait. This function helps distribute wait times stochastically to avoid + A function of the value yielded by `wait_gen_factory()` returning the actual + time to wait. This function helps distribute wait times stochastically to avoid timing collisions across concurrent clients. Wait times are jittered by default using the `full_jitter` function. To disable jittering, pass `jitter=None`. @@ -142,6 +142,13 @@ class RetryInvoker: A function accepting an exception instance, returning whether or not to give up prematurely before other give-up conditions are evaluated. If set to None, the strategy is to never give up prematurely. + wait_function: Optional[Callable[[float], None]] (default: None) + A function that defines how to wait between retry attempts. It accepts + one argument, the wait time in seconds, allowing the use of various waiting + mechanisms (e.g., asynchronous waits or event-based synchronization) suitable + for different execution environments. If set to `None`, the `wait_function` + defaults to `time.sleep`, which is ideal for synchronous operations. Custom + functions should manage execution flow to prevent blocking or interference. Examples -------- @@ -159,7 +166,7 @@ class RetryInvoker: # pylint: disable-next=too-many-arguments def __init__( self, - wait_factory: Callable[[], Generator[float, None, None]], + wait_gen_factory: Callable[[], Generator[float, None, None]], recoverable_exceptions: Union[Type[Exception], Tuple[Type[Exception], ...]], max_tries: Optional[int], max_time: Optional[float], @@ -169,8 +176,9 @@ def __init__( on_giveup: Optional[Callable[[RetryState], None]] = None, jitter: Optional[Callable[[float], float]] = full_jitter, should_giveup: Optional[Callable[[Exception], bool]] = None, + wait_function: Optional[Callable[[float], None]] = None, ) -> None: - self.wait_factory = wait_factory + self.wait_gen_factory = wait_gen_factory self.recoverable_exceptions = recoverable_exceptions self.max_tries = max_tries self.max_time = max_time @@ -179,6 +187,9 @@ def __init__( self.on_giveup = on_giveup self.jitter = jitter self.should_giveup = should_giveup + if wait_function is None: + wait_function = time.sleep + self.wait_function = wait_function # pylint: disable-next=too-many-locals def invoke( @@ -212,13 +223,13 @@ def invoke( Raises ------ Exception - If the number of tries exceeds `max_tries`, if the total time - exceeds `max_time`, if `wait_factory()` generator raises `StopInteration`, + If the number of tries exceeds `max_tries`, if the total time exceeds + `max_time`, if `wait_gen_factory()` generator raises `StopInteration`, or if the `should_giveup` returns True for a raised exception. Notes ----- - The time between retries is determined by the provided `wait_factory()` + The time between retries is determined by the provided `wait_gen_factory()` generator and can optionally be jittered using the `jitter` function. The recoverable exceptions that trigger a retry, as well as conditions to stop retries, are also determined by the class's initialization parameters. @@ -231,13 +242,13 @@ def try_call_event_handler( handler(cast(RetryState, ref_state[0])) try_cnt = 0 - wait_generator = self.wait_factory() - start = time.time() + wait_generator = self.wait_gen_factory() + start = time.monotonic() ref_state: List[Optional[RetryState]] = [None] while True: try_cnt += 1 - elapsed_time = time.time() - start + elapsed_time = time.monotonic() - start state = RetryState( target=target, args=args, @@ -282,7 +293,7 @@ def giveup_check(_exception: Exception) -> bool: try_call_event_handler(self.on_backoff) # Sleep - time.sleep(wait_time) + self.wait_function(state.actual_wait) else: # Trigger success event try_call_event_handler(self.on_success) diff --git a/src/py/flwr/common/retry_invoker_test.py b/src/py/flwr/common/retry_invoker_test.py index e67c0641e2ba..2259ae47ded4 100644 --- a/src/py/flwr/common/retry_invoker_test.py +++ b/src/py/flwr/common/retry_invoker_test.py @@ -35,8 +35,8 @@ def failing_function() -> None: @pytest.fixture(name="mock_time") def fixture_mock_time() -> Generator[MagicMock, None, None]: - """Mock time.time for controlled testing.""" - with patch("time.time") as mock_time: + """Mock time.monotonic for controlled testing.""" + with patch("time.monotonic") as mock_time: yield mock_time From 930c88654e987fc730353a3cbc0403b66e974ab6 Mon Sep 17 00:00:00 2001 From: Javier Date: Mon, 1 Apr 2024 15:17:18 +0100 Subject: [PATCH 49/57] Set deprecated baselines to use pillow 10.2.0 (#3186) --- baselines/flwr_baselines/pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/baselines/flwr_baselines/pyproject.toml b/baselines/flwr_baselines/pyproject.toml index f0b2ac84e66e..add99938d2a3 100644 --- a/baselines/flwr_baselines/pyproject.toml +++ b/baselines/flwr_baselines/pyproject.toml @@ -51,6 +51,7 @@ wget = "^3.2" virtualenv = "^20.24.6" pandas = "^1.5.3" pyhamcrest = "^2.0.4" +pillow = "==10.2.0" [tool.poetry.dev-dependencies] isort = "==5.13.2" From 41b491b59713c1524cbd6da8ce73b6910b036d3f Mon Sep 17 00:00:00 2001 From: Javier Date: Mon, 1 Apr 2024 15:29:22 +0100 Subject: [PATCH 50/57] Add delay to Simulation Engine termination (#3184) --- src/py/flwr/server/superlink/fleet/vce/vce_api.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/py/flwr/server/superlink/fleet/vce/vce_api.py b/src/py/flwr/server/superlink/fleet/vce/vce_api.py index 9736ae0fb57f..5fec10940343 100644 --- a/src/py/flwr/server/superlink/fleet/vce/vce_api.py +++ b/src/py/flwr/server/superlink/fleet/vce/vce_api.py @@ -223,6 +223,7 @@ async def run( # pylint: disable=too-many-arguments,unused-argument,too-many-locals,too-many-branches +# pylint: disable=too-many-statements def start_vce( backend_name: str, backend_config_json_stream: str, @@ -341,6 +342,13 @@ def _load() -> ClientApp: ) ) except LoadClientAppError as loadapp_ex: + f_stop_delay = 10 + log( + ERROR, + "LoadClientAppError exception encountered. Terminating simulation in %is", + f_stop_delay, + ) + time.sleep(f_stop_delay) f_stop.set() # set termination event raise loadapp_ex except Exception as ex: From 9842e41615a6ae8fb47c3aac78473bf31d8cb368 Mon Sep 17 00:00:00 2001 From: Heng Pan Date: Mon, 1 Apr 2024 18:03:22 +0100 Subject: [PATCH 51/57] Send ping from SuperNode (#3181) --- .../client/grpc_rere_client/connection.py | 94 +++++++++---- src/py/flwr/client/heartbeat.py | 72 ++++++++++ src/py/flwr/client/heartbeat_test.py | 59 ++++++++ src/py/flwr/client/rest_client/connection.py | 128 ++++++++++++++---- src/py/flwr/common/constant.py | 6 + .../fleet/message_handler/message_handler.py | 3 +- .../superlink/fleet/rest_rere/rest_api.py | 28 ++++ 7 files changed, 337 insertions(+), 53 deletions(-) create mode 100644 src/py/flwr/client/heartbeat.py create mode 100644 src/py/flwr/client/heartbeat_test.py diff --git a/src/py/flwr/client/grpc_rere_client/connection.py b/src/py/flwr/client/grpc_rere_client/connection.py index e6e22998b947..06573ffaafb7 100644 --- a/src/py/flwr/client/grpc_rere_client/connection.py +++ b/src/py/flwr/client/grpc_rere_client/connection.py @@ -15,15 +15,24 @@ """Contextmanager for a gRPC request-response channel to the Flower server.""" +import random +import threading from contextlib import contextmanager from copy import copy from logging import DEBUG, ERROR from pathlib import Path -from typing import Callable, Dict, Iterator, Optional, Tuple, Union, cast +from typing import Callable, Iterator, Optional, Tuple, Union, cast +from flwr.client.heartbeat import start_ping_loop from flwr.client.message_handler.message_handler import validate_out_message from flwr.client.message_handler.task_handler import get_task_ins, validate_task_ins from flwr.common import GRPC_MAX_MESSAGE_LENGTH +from flwr.common.constant import ( + PING_BASE_MULTIPLIER, + PING_CALL_TIMEOUT, + PING_DEFAULT_INTERVAL, + PING_RANDOM_RANGE, +) from flwr.common.grpc import create_channel from flwr.common.logger import log, warn_experimental_feature from flwr.common.message import Message, Metadata @@ -32,6 +41,8 @@ from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 CreateNodeRequest, DeleteNodeRequest, + PingRequest, + PingResponse, PullTaskInsRequest, PushTaskResRequest, ) @@ -39,9 +50,6 @@ from flwr.proto.node_pb2 import Node # pylint: disable=E0611 from flwr.proto.task_pb2 import TaskIns # pylint: disable=E0611 -KEY_NODE = "node" -KEY_METADATA = "in_message_metadata" - def on_channel_state_change(channel_connectivity: str) -> None: """Log channel connectivity.""" @@ -49,7 +57,7 @@ def on_channel_state_change(channel_connectivity: str) -> None: @contextmanager -def grpc_request_response( +def grpc_request_response( # pylint: disable=R0914, R0915 server_address: str, insecure: bool, retry_invoker: RetryInvoker, @@ -107,47 +115,81 @@ def grpc_request_response( max_message_length=max_message_length, ) channel.subscribe(on_channel_state_change) - stub = FleetStub(channel) - - # Necessary state to validate messages to be sent - state: Dict[str, Optional[Metadata]] = {KEY_METADATA: None} - # Enable create_node and delete_node to store node - node_store: Dict[str, Optional[Node]] = {KEY_NODE: None} + # Shared variables for inner functions + stub = FleetStub(channel) + metadata: Optional[Metadata] = None + node: Optional[Node] = None + ping_thread: Optional[threading.Thread] = None + ping_stop_event = threading.Event() ########################################################################### - # receive/send functions + # ping/create_node/delete_node/receive/send functions ########################################################################### + def ping() -> None: + # Get Node + if node is None: + log(ERROR, "Node instance missing") + return + + # Construct the ping request + req = PingRequest(node=node, ping_interval=PING_DEFAULT_INTERVAL) + + # Call FleetAPI + res: PingResponse = stub.Ping(req, timeout=PING_CALL_TIMEOUT) + + # Check if success + if not res.success: + raise RuntimeError("Ping failed unexpectedly.") + + # Wait + rd = random.uniform(*PING_RANDOM_RANGE) + next_interval: float = PING_DEFAULT_INTERVAL - PING_CALL_TIMEOUT + next_interval *= PING_BASE_MULTIPLIER + rd + if not ping_stop_event.is_set(): + ping_stop_event.wait(next_interval) + def create_node() -> None: """Set create_node.""" + # Call FleetAPI create_node_request = CreateNodeRequest() create_node_response = retry_invoker.invoke( stub.CreateNode, request=create_node_request, ) - node_store[KEY_NODE] = create_node_response.node + + # Remember the node and the ping-loop thread + nonlocal node, ping_thread + node = cast(Node, create_node_response.node) + ping_thread = start_ping_loop(ping, ping_stop_event) def delete_node() -> None: """Set delete_node.""" # Get Node - if node_store[KEY_NODE] is None: + nonlocal node + if node is None: log(ERROR, "Node instance missing") return - node: Node = cast(Node, node_store[KEY_NODE]) + # Stop the ping-loop thread + ping_stop_event.set() + if ping_thread is not None: + ping_thread.join() + + # Call FleetAPI delete_node_request = DeleteNodeRequest(node=node) retry_invoker.invoke(stub.DeleteNode, request=delete_node_request) - del node_store[KEY_NODE] + # Cleanup + node = None def receive() -> Optional[Message]: """Receive next task from server.""" # Get Node - if node_store[KEY_NODE] is None: + if node is None: log(ERROR, "Node instance missing") return None - node: Node = cast(Node, node_store[KEY_NODE]) # Request instructions (task) from server request = PullTaskInsRequest(node=node) @@ -167,7 +209,8 @@ def receive() -> Optional[Message]: in_message = message_from_taskins(task_ins) if task_ins else None # Remember `metadata` of the in message - state[KEY_METADATA] = copy(in_message.metadata) if in_message else None + nonlocal metadata + metadata = copy(in_message.metadata) if in_message else None # Return the message if available return in_message @@ -175,18 +218,18 @@ def receive() -> Optional[Message]: def send(message: Message) -> None: """Send task result back to server.""" # Get Node - if node_store[KEY_NODE] is None: + if node is None: log(ERROR, "Node instance missing") return - # Get incoming message - in_metadata = state[KEY_METADATA] - if in_metadata is None: + # Get the metadata of the incoming message + nonlocal metadata + if metadata is None: log(ERROR, "No current message") return # Validate out message - if not validate_out_message(message, in_metadata): + if not validate_out_message(message, metadata): log(ERROR, "Invalid out message") return @@ -197,7 +240,8 @@ def send(message: Message) -> None: request = PushTaskResRequest(task_res_list=[task_res]) _ = retry_invoker.invoke(stub.PushTaskRes, request) - state[KEY_METADATA] = None + # Cleanup + metadata = None try: # Yield methods diff --git a/src/py/flwr/client/heartbeat.py b/src/py/flwr/client/heartbeat.py new file mode 100644 index 000000000000..0cc979ddfd13 --- /dev/null +++ b/src/py/flwr/client/heartbeat.py @@ -0,0 +1,72 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Heartbeat utility functions.""" + + +import threading +from typing import Callable + +import grpc + +from flwr.common.constant import PING_CALL_TIMEOUT +from flwr.common.retry_invoker import RetryInvoker, RetryState, exponential + + +def _ping_loop(ping_fn: Callable[[], None], stop_event: threading.Event) -> None: + def wait_fn(wait_time: float) -> None: + if not stop_event.is_set(): + stop_event.wait(wait_time) + + def on_backoff(state: RetryState) -> None: + err = state.exception + if not isinstance(err, grpc.RpcError): + return + status_code = err.code() + # If ping call timeout is triggered + if status_code == grpc.StatusCode.DEADLINE_EXCEEDED: + # Avoid long wait time. + if state.actual_wait is None: + return + state.actual_wait = max(state.actual_wait - PING_CALL_TIMEOUT, 0.0) + + def wrapped_ping() -> None: + if not stop_event.is_set(): + ping_fn() + + retrier = RetryInvoker( + exponential, + grpc.RpcError, + max_tries=None, + max_time=None, + on_backoff=on_backoff, + wait_function=wait_fn, + ) + while not stop_event.is_set(): + retrier.invoke(wrapped_ping) + + +def start_ping_loop( + ping_fn: Callable[[], None], stop_event: threading.Event +) -> threading.Thread: + """Start a ping loop in a separate thread. + + This function initializes a new thread that runs a ping loop, allowing for + asynchronous ping operations. The loop can be terminated through the provided stop + event. + """ + thread = threading.Thread(target=_ping_loop, args=(ping_fn, stop_event)) + thread.start() + + return thread diff --git a/src/py/flwr/client/heartbeat_test.py b/src/py/flwr/client/heartbeat_test.py new file mode 100644 index 000000000000..286429e075b1 --- /dev/null +++ b/src/py/flwr/client/heartbeat_test.py @@ -0,0 +1,59 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Unit tests for heartbeat utility functions.""" + + +import threading +import time +import unittest +from unittest.mock import MagicMock + +from .heartbeat import start_ping_loop + + +class TestStartPingLoopWithFailures(unittest.TestCase): + """Test heartbeat utility functions.""" + + def test_ping_loop_terminates(self) -> None: + """Test if the ping loop thread terminates when flagged.""" + # Prepare + ping_fn = MagicMock() + stop_event = threading.Event() + + # Execute + thread = start_ping_loop(ping_fn, stop_event) + time.sleep(1) + stop_event.set() + thread.join(timeout=1) + + # Assert + self.assertTrue(ping_fn.called) + self.assertFalse(thread.is_alive()) + + def test_ping_loop_with_failures_terminates(self) -> None: + """Test if the ping loop thread with failures terminates when flagged.""" + # Prepare + ping_fn = MagicMock(side_effect=RuntimeError()) + stop_event = threading.Event() + + # Execute + thread = start_ping_loop(ping_fn, stop_event) + time.sleep(1) + stop_event.set() + thread.join(timeout=1) + + # Assert + self.assertTrue(ping_fn.called) + self.assertFalse(thread.is_alive()) diff --git a/src/py/flwr/client/rest_client/connection.py b/src/py/flwr/client/rest_client/connection.py index d2cc71ba3b3f..514635103f01 100644 --- a/src/py/flwr/client/rest_client/connection.py +++ b/src/py/flwr/client/rest_client/connection.py @@ -15,16 +15,25 @@ """Contextmanager for a REST request-response channel to the Flower server.""" +import random import sys +import threading from contextlib import contextmanager from copy import copy from logging import ERROR, INFO, WARN -from typing import Callable, Dict, Iterator, Optional, Tuple, Union, cast +from typing import Callable, Iterator, Optional, Tuple, Union +from flwr.client.heartbeat import start_ping_loop from flwr.client.message_handler.message_handler import validate_out_message from flwr.client.message_handler.task_handler import get_task_ins, validate_task_ins from flwr.common import GRPC_MAX_MESSAGE_LENGTH -from flwr.common.constant import MISSING_EXTRA_REST +from flwr.common.constant import ( + MISSING_EXTRA_REST, + PING_BASE_MULTIPLIER, + PING_CALL_TIMEOUT, + PING_DEFAULT_INTERVAL, + PING_RANDOM_RANGE, +) from flwr.common.logger import log from flwr.common.message import Message, Metadata from flwr.common.retry_invoker import RetryInvoker @@ -33,6 +42,8 @@ CreateNodeRequest, CreateNodeResponse, DeleteNodeRequest, + PingRequest, + PingResponse, PullTaskInsRequest, PullTaskInsResponse, PushTaskResRequest, @@ -47,19 +58,15 @@ sys.exit(MISSING_EXTRA_REST) -KEY_NODE = "node" -KEY_METADATA = "in_message_metadata" - - PATH_CREATE_NODE: str = "api/v0/fleet/create-node" PATH_DELETE_NODE: str = "api/v0/fleet/delete-node" PATH_PULL_TASK_INS: str = "api/v0/fleet/pull-task-ins" PATH_PUSH_TASK_RES: str = "api/v0/fleet/push-task-res" +PATH_PING: str = "api/v0/fleet/ping" @contextmanager -# pylint: disable-next=too-many-statements -def http_request_response( +def http_request_response( # pylint: disable=R0914, R0915 server_address: str, insecure: bool, # pylint: disable=unused-argument retry_invoker: RetryInvoker, @@ -127,16 +134,71 @@ def http_request_response( "must be provided as a string path to the client.", ) - # Necessary state to validate messages to be sent - state: Dict[str, Optional[Metadata]] = {KEY_METADATA: None} - - # Enable create_node and delete_node to store node - node_store: Dict[str, Optional[Node]] = {KEY_NODE: None} + # Shared variables for inner functions + metadata: Optional[Metadata] = None + node: Optional[Node] = None + ping_thread: Optional[threading.Thread] = None + ping_stop_event = threading.Event() ########################################################################### - # receive/send functions + # ping/create_node/delete_node/receive/send functions ########################################################################### + def ping() -> None: + # Get Node + if node is None: + log(ERROR, "Node instance missing") + return + + # Construct the ping request + req = PingRequest(node=node, ping_interval=PING_DEFAULT_INTERVAL) + req_bytes: bytes = req.SerializeToString() + + # Send the request + res = requests.post( + url=f"{base_url}/{PATH_PING}", + headers={ + "Accept": "application/protobuf", + "Content-Type": "application/protobuf", + }, + data=req_bytes, + verify=verify, + timeout=PING_CALL_TIMEOUT, + ) + + # Check status code and headers + if res.status_code != 200: + return + if "content-type" not in res.headers: + log( + WARN, + "[Node] POST /%s: missing header `Content-Type`", + PATH_PULL_TASK_INS, + ) + return + if res.headers["content-type"] != "application/protobuf": + log( + WARN, + "[Node] POST /%s: header `Content-Type` has wrong value", + PATH_PULL_TASK_INS, + ) + return + + # Deserialize ProtoBuf from bytes + ping_res = PingResponse() + ping_res.ParseFromString(res.content) + + # Check if success + if not ping_res.success: + raise RuntimeError("Ping failed unexpectedly.") + + # Wait + rd = random.uniform(*PING_RANDOM_RANGE) + next_interval: float = PING_DEFAULT_INTERVAL - PING_CALL_TIMEOUT + next_interval *= PING_BASE_MULTIPLIER + rd + if not ping_stop_event.is_set(): + ping_stop_event.wait(next_interval) + def create_node() -> None: """Set create_node.""" create_node_req_proto = CreateNodeRequest() @@ -175,15 +237,25 @@ def create_node() -> None: # Deserialize ProtoBuf from bytes create_node_response_proto = CreateNodeResponse() create_node_response_proto.ParseFromString(res.content) - # pylint: disable-next=no-member - node_store[KEY_NODE] = create_node_response_proto.node + + # Remember the node and the ping-loop thread + nonlocal node, ping_thread + node = create_node_response_proto.node + ping_thread = start_ping_loop(ping, ping_stop_event) def delete_node() -> None: """Set delete_node.""" - if node_store[KEY_NODE] is None: + nonlocal node + if node is None: log(ERROR, "Node instance missing") return - node: Node = cast(Node, node_store[KEY_NODE]) + + # Stop the ping-loop thread + ping_stop_event.set() + if ping_thread is not None: + ping_thread.join() + + # Send DeleteNode request delete_node_req_proto = DeleteNodeRequest(node=node) delete_node_req_req_bytes: bytes = delete_node_req_proto.SerializeToString() res = retry_invoker.invoke( @@ -215,13 +287,15 @@ def delete_node() -> None: PATH_PULL_TASK_INS, ) + # Cleanup + node = None + def receive() -> Optional[Message]: """Receive next task from server.""" # Get Node - if node_store[KEY_NODE] is None: + if node is None: log(ERROR, "Node instance missing") return None - node: Node = cast(Node, node_store[KEY_NODE]) # Request instructions (task) from server pull_task_ins_req_proto = PullTaskInsRequest(node=node) @@ -273,29 +347,29 @@ def receive() -> Optional[Message]: task_ins = None # Return the Message if available + nonlocal metadata message = None - state[KEY_METADATA] = None if task_ins is not None: message = message_from_taskins(task_ins) - state[KEY_METADATA] = copy(message.metadata) + metadata = copy(message.metadata) log(INFO, "[Node] POST /%s: success", PATH_PULL_TASK_INS) return message def send(message: Message) -> None: """Send task result back to server.""" # Get Node - if node_store[KEY_NODE] is None: + if node is None: log(ERROR, "Node instance missing") return # Get incoming message - in_metadata = state[KEY_METADATA] - if in_metadata is None: + nonlocal metadata + if metadata is None: log(ERROR, "No current message") return # Validate out message - if not validate_out_message(message, in_metadata): + if not validate_out_message(message, metadata): log(ERROR, "Invalid out message") return @@ -321,7 +395,7 @@ def send(message: Message) -> None: timeout=None, ) - state[KEY_METADATA] = None + metadata = None # Check status code and headers if res.status_code != 200: diff --git a/src/py/flwr/common/constant.py b/src/py/flwr/common/constant.py index 7d30a10f5881..99ba2d1d1c63 100644 --- a/src/py/flwr/common/constant.py +++ b/src/py/flwr/common/constant.py @@ -36,6 +36,12 @@ TRANSPORT_TYPE_VCE, ] +# Constants for ping +PING_DEFAULT_INTERVAL = 30 +PING_CALL_TIMEOUT = 5 +PING_BASE_MULTIPLIER = 0.8 +PING_RANDOM_RANGE = (-0.1, 0.1) + class MessageType: """Message type.""" diff --git a/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py b/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py index d4e63a8f2d46..9fa7656198e5 100644 --- a/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py +++ b/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py @@ -63,7 +63,8 @@ def ping( state: State, # pylint: disable=unused-argument ) -> PingResponse: """.""" - return PingResponse(success=True) + res = state.acknowledge_ping(request.node.node_id, request.ping_interval) + return PingResponse(success=res) def pull_task_ins(request: PullTaskInsRequest, state: State) -> PullTaskInsResponse: diff --git a/src/py/flwr/server/superlink/fleet/rest_rere/rest_api.py b/src/py/flwr/server/superlink/fleet/rest_rere/rest_api.py index b022b34c68c8..33d17ef1d579 100644 --- a/src/py/flwr/server/superlink/fleet/rest_rere/rest_api.py +++ b/src/py/flwr/server/superlink/fleet/rest_rere/rest_api.py @@ -21,6 +21,7 @@ from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 CreateNodeRequest, DeleteNodeRequest, + PingRequest, PullTaskInsRequest, PushTaskResRequest, ) @@ -152,11 +153,38 @@ async def push_task_res(request: Request) -> Response: # Check if token is need ) +async def ping(request: Request) -> Response: + """Ping.""" + _check_headers(request.headers) + + # Get the request body as raw bytes + ping_request_bytes: bytes = await request.body() + + # Deserialize ProtoBuf + ping_request_proto = PingRequest() + ping_request_proto.ParseFromString(ping_request_bytes) + + # Get state from app + state: State = app.state.STATE_FACTORY.state() + + # Handle message + ping_response_proto = message_handler.ping(request=ping_request_proto, state=state) + + # Return serialized ProtoBuf + ping_response_bytes = ping_response_proto.SerializeToString() + return Response( + status_code=200, + content=ping_response_bytes, + headers={"Content-Type": "application/protobuf"}, + ) + + routes = [ Route("/api/v0/fleet/create-node", create_node, methods=["POST"]), Route("/api/v0/fleet/delete-node", delete_node, methods=["POST"]), Route("/api/v0/fleet/pull-task-ins", pull_task_ins, methods=["POST"]), Route("/api/v0/fleet/push-task-res", push_task_res, methods=["POST"]), + Route("/api/v0/fleet/ping", ping, methods=["POST"]), ] app: Starlette = Starlette( From 94204242a737368926e0e48342ffe025dc7b3409 Mon Sep 17 00:00:00 2001 From: "Daniel J. Beutel" Date: Mon, 1 Apr 2024 19:42:54 +0200 Subject: [PATCH 52/57] Remove experimental / add preview feature warnings (#3187) --- src/py/flwr/client/app.py | 4 +--- src/py/flwr/client/client_app.py | 7 +++++++ src/py/flwr/client/grpc_rere_client/connection.py | 4 +--- src/py/flwr/common/logger.py | 8 ++++---- src/py/flwr/server/server_app.py | 3 +++ 5 files changed, 16 insertions(+), 10 deletions(-) diff --git a/src/py/flwr/client/app.py b/src/py/flwr/client/app.py index 644d37060d53..7104ba267f57 100644 --- a/src/py/flwr/client/app.py +++ b/src/py/flwr/client/app.py @@ -36,7 +36,7 @@ TRANSPORT_TYPES, ) from flwr.common.exit_handlers import register_exit_handlers -from flwr.common.logger import log, warn_deprecated_feature, warn_experimental_feature +from flwr.common.logger import log, warn_deprecated_feature from flwr.common.message import Error from flwr.common.object_ref import load_app, validate from flwr.common.retry_invoker import RetryInvoker, exponential @@ -385,8 +385,6 @@ def _load_client_app() -> ClientApp: return ClientApp(client_fn=client_fn) load_client_app_fn = _load_client_app - else: - warn_experimental_feature("`load_client_app_fn`") # At this point, only `load_client_app_fn` should be used # Both `client` and `client_fn` must not be used directly diff --git a/src/py/flwr/client/client_app.py b/src/py/flwr/client/client_app.py index 0b56219807c6..79e7720cbb8e 100644 --- a/src/py/flwr/client/client_app.py +++ b/src/py/flwr/client/client_app.py @@ -23,6 +23,7 @@ from flwr.client.mod.utils import make_ffn from flwr.client.typing import ClientFn, Mod from flwr.common import Context, Message, MessageType +from flwr.common.logger import warn_preview_feature from .typing import ClientAppCallable @@ -123,6 +124,8 @@ def train_decorator(train_fn: ClientAppCallable) -> ClientAppCallable: if self._call: raise _registration_error(MessageType.TRAIN) + warn_preview_feature("ClientApp-register-train-function") + # Register provided function with the ClientApp object # Wrap mods around the wrapped step function self._train = make_ffn(train_fn, self._mods) @@ -151,6 +154,8 @@ def evaluate_decorator(evaluate_fn: ClientAppCallable) -> ClientAppCallable: if self._call: raise _registration_error(MessageType.EVALUATE) + warn_preview_feature("ClientApp-register-evaluate-function") + # Register provided function with the ClientApp object # Wrap mods around the wrapped step function self._evaluate = make_ffn(evaluate_fn, self._mods) @@ -179,6 +184,8 @@ def query_decorator(query_fn: ClientAppCallable) -> ClientAppCallable: if self._call: raise _registration_error(MessageType.QUERY) + warn_preview_feature("ClientApp-register-query-function") + # Register provided function with the ClientApp object # Wrap mods around the wrapped step function self._query = make_ffn(query_fn, self._mods) diff --git a/src/py/flwr/client/grpc_rere_client/connection.py b/src/py/flwr/client/grpc_rere_client/connection.py index 06573ffaafb7..ba8b0d022685 100644 --- a/src/py/flwr/client/grpc_rere_client/connection.py +++ b/src/py/flwr/client/grpc_rere_client/connection.py @@ -34,7 +34,7 @@ PING_RANDOM_RANGE, ) from flwr.common.grpc import create_channel -from flwr.common.logger import log, warn_experimental_feature +from flwr.common.logger import log from flwr.common.message import Message, Metadata from flwr.common.retry_invoker import RetryInvoker from flwr.common.serde import message_from_taskins, message_to_taskres @@ -103,8 +103,6 @@ def grpc_request_response( # pylint: disable=R0914, R0915 create_node : Optional[Callable] delete_node : Optional[Callable] """ - warn_experimental_feature("`grpc-rere`") - if isinstance(root_certificates, str): root_certificates = Path(root_certificates).read_bytes() diff --git a/src/py/flwr/common/logger.py b/src/py/flwr/common/logger.py index 2bc41773ed61..258809ce062f 100644 --- a/src/py/flwr/common/logger.py +++ b/src/py/flwr/common/logger.py @@ -164,13 +164,13 @@ def configure( log = logger.log # pylint: disable=invalid-name -def warn_experimental_feature(name: str) -> None: - """Warn the user when they use an experimental feature.""" +def warn_preview_feature(name: str) -> None: + """Warn the user when they use a preview feature.""" log( WARN, - """EXPERIMENTAL FEATURE: %s + """PREVIEW FEATURE: %s - This is an experimental feature. It could change significantly or be removed + This is a preview feature. It could change significantly or be removed entirely in future versions of Flower. """, name, diff --git a/src/py/flwr/server/server_app.py b/src/py/flwr/server/server_app.py index 1b2eab87fdaa..ea2eb3fd1a69 100644 --- a/src/py/flwr/server/server_app.py +++ b/src/py/flwr/server/server_app.py @@ -18,6 +18,7 @@ from typing import Callable, Optional from flwr.common import Context, RecordSet +from flwr.common.logger import warn_preview_feature from flwr.server.strategy import Strategy from .client_manager import ClientManager @@ -120,6 +121,8 @@ def main_decorator(main_fn: ServerAppCallable) -> ServerAppCallable: """, ) + warn_preview_feature("ServerApp-register-main-function") + # Register provided function with the ServerApp object self._main = main_fn From 29ac32f7029bad2427f3527180b57110c4f5b6d4 Mon Sep 17 00:00:00 2001 From: Heng Pan Date: Tue, 2 Apr 2024 09:49:59 +0100 Subject: [PATCH 53/57] Add ping interval to create node request (#3189) --- src/proto/flwr/proto/fleet.proto | 2 +- .../client/grpc_rere_client/connection.py | 2 +- src/py/flwr/client/rest_client/connection.py | 2 +- src/py/flwr/proto/fleet_pb2.py | 52 +++++++++---------- src/py/flwr/proto/fleet_pb2.pyi | 5 ++ 5 files changed, 34 insertions(+), 29 deletions(-) diff --git a/src/proto/flwr/proto/fleet.proto b/src/proto/flwr/proto/fleet.proto index fa65f3ee9fed..0ef0fea5c6d6 100644 --- a/src/proto/flwr/proto/fleet.proto +++ b/src/proto/flwr/proto/fleet.proto @@ -37,7 +37,7 @@ service Fleet { } // CreateNode messages -message CreateNodeRequest {} +message CreateNodeRequest { double ping_interval = 1; } message CreateNodeResponse { Node node = 1; } // DeleteNode messages diff --git a/src/py/flwr/client/grpc_rere_client/connection.py b/src/py/flwr/client/grpc_rere_client/connection.py index ba8b0d022685..25e075f40af7 100644 --- a/src/py/flwr/client/grpc_rere_client/connection.py +++ b/src/py/flwr/client/grpc_rere_client/connection.py @@ -151,7 +151,7 @@ def ping() -> None: def create_node() -> None: """Set create_node.""" # Call FleetAPI - create_node_request = CreateNodeRequest() + create_node_request = CreateNodeRequest(ping_interval=PING_DEFAULT_INTERVAL) create_node_response = retry_invoker.invoke( stub.CreateNode, request=create_node_request, diff --git a/src/py/flwr/client/rest_client/connection.py b/src/py/flwr/client/rest_client/connection.py index 514635103f01..0e6a7ef554e6 100644 --- a/src/py/flwr/client/rest_client/connection.py +++ b/src/py/flwr/client/rest_client/connection.py @@ -201,7 +201,7 @@ def ping() -> None: def create_node() -> None: """Set create_node.""" - create_node_req_proto = CreateNodeRequest() + create_node_req_proto = CreateNodeRequest(ping_interval=PING_DEFAULT_INTERVAL) create_node_req_bytes: bytes = create_node_req_proto.SerializeToString() res = retry_invoker.invoke( diff --git a/src/py/flwr/proto/fleet_pb2.py b/src/py/flwr/proto/fleet_pb2.py index 546987f1c807..06d90c5d1a44 100644 --- a/src/py/flwr/proto/fleet_pb2.py +++ b/src/py/flwr/proto/fleet_pb2.py @@ -16,7 +16,7 @@ from flwr.proto import task_pb2 as flwr_dot_proto_dot_task__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x16\x66lwr/proto/fleet.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x15\x66lwr/proto/task.proto\"\x13\n\x11\x43reateNodeRequest\"4\n\x12\x43reateNodeResponse\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"3\n\x11\x44\x65leteNodeRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"\x14\n\x12\x44\x65leteNodeResponse\"D\n\x0bPingRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x15\n\rping_interval\x18\x02 \x01(\x01\"\x1f\n\x0cPingResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\"F\n\x12PullTaskInsRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"k\n\x13PullTaskInsResponse\x12(\n\treconnect\x18\x01 \x01(\x0b\x32\x15.flwr.proto.Reconnect\x12*\n\rtask_ins_list\x18\x02 \x03(\x0b\x32\x13.flwr.proto.TaskIns\"@\n\x12PushTaskResRequest\x12*\n\rtask_res_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskRes\"\xae\x01\n\x13PushTaskResResponse\x12(\n\treconnect\x18\x01 \x01(\x0b\x32\x15.flwr.proto.Reconnect\x12=\n\x07results\x18\x02 \x03(\x0b\x32,.flwr.proto.PushTaskResResponse.ResultsEntry\x1a.\n\x0cResultsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\r:\x02\x38\x01\"\x1e\n\tReconnect\x12\x11\n\treconnect\x18\x01 \x01(\x04\x32\x86\x03\n\x05\x46leet\x12M\n\nCreateNode\x12\x1d.flwr.proto.CreateNodeRequest\x1a\x1e.flwr.proto.CreateNodeResponse\"\x00\x12M\n\nDeleteNode\x12\x1d.flwr.proto.DeleteNodeRequest\x1a\x1e.flwr.proto.DeleteNodeResponse\"\x00\x12;\n\x04Ping\x12\x17.flwr.proto.PingRequest\x1a\x18.flwr.proto.PingResponse\"\x00\x12P\n\x0bPullTaskIns\x12\x1e.flwr.proto.PullTaskInsRequest\x1a\x1f.flwr.proto.PullTaskInsResponse\"\x00\x12P\n\x0bPushTaskRes\x12\x1e.flwr.proto.PushTaskResRequest\x1a\x1f.flwr.proto.PushTaskResResponse\"\x00\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x16\x66lwr/proto/fleet.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x15\x66lwr/proto/task.proto\"*\n\x11\x43reateNodeRequest\x12\x15\n\rping_interval\x18\x01 \x01(\x01\"4\n\x12\x43reateNodeResponse\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"3\n\x11\x44\x65leteNodeRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"\x14\n\x12\x44\x65leteNodeResponse\"D\n\x0bPingRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x15\n\rping_interval\x18\x02 \x01(\x01\"\x1f\n\x0cPingResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\"F\n\x12PullTaskInsRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"k\n\x13PullTaskInsResponse\x12(\n\treconnect\x18\x01 \x01(\x0b\x32\x15.flwr.proto.Reconnect\x12*\n\rtask_ins_list\x18\x02 \x03(\x0b\x32\x13.flwr.proto.TaskIns\"@\n\x12PushTaskResRequest\x12*\n\rtask_res_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskRes\"\xae\x01\n\x13PushTaskResResponse\x12(\n\treconnect\x18\x01 \x01(\x0b\x32\x15.flwr.proto.Reconnect\x12=\n\x07results\x18\x02 \x03(\x0b\x32,.flwr.proto.PushTaskResResponse.ResultsEntry\x1a.\n\x0cResultsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\r:\x02\x38\x01\"\x1e\n\tReconnect\x12\x11\n\treconnect\x18\x01 \x01(\x04\x32\x86\x03\n\x05\x46leet\x12M\n\nCreateNode\x12\x1d.flwr.proto.CreateNodeRequest\x1a\x1e.flwr.proto.CreateNodeResponse\"\x00\x12M\n\nDeleteNode\x12\x1d.flwr.proto.DeleteNodeRequest\x1a\x1e.flwr.proto.DeleteNodeResponse\"\x00\x12;\n\x04Ping\x12\x17.flwr.proto.PingRequest\x1a\x18.flwr.proto.PingResponse\"\x00\x12P\n\x0bPullTaskIns\x12\x1e.flwr.proto.PullTaskInsRequest\x1a\x1f.flwr.proto.PullTaskInsResponse\"\x00\x12P\n\x0bPushTaskRes\x12\x1e.flwr.proto.PushTaskResRequest\x1a\x1f.flwr.proto.PushTaskResResponse\"\x00\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -26,29 +26,29 @@ _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._options = None _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_options = b'8\001' _globals['_CREATENODEREQUEST']._serialized_start=84 - _globals['_CREATENODEREQUEST']._serialized_end=103 - _globals['_CREATENODERESPONSE']._serialized_start=105 - _globals['_CREATENODERESPONSE']._serialized_end=157 - _globals['_DELETENODEREQUEST']._serialized_start=159 - _globals['_DELETENODEREQUEST']._serialized_end=210 - _globals['_DELETENODERESPONSE']._serialized_start=212 - _globals['_DELETENODERESPONSE']._serialized_end=232 - _globals['_PINGREQUEST']._serialized_start=234 - _globals['_PINGREQUEST']._serialized_end=302 - _globals['_PINGRESPONSE']._serialized_start=304 - _globals['_PINGRESPONSE']._serialized_end=335 - _globals['_PULLTASKINSREQUEST']._serialized_start=337 - _globals['_PULLTASKINSREQUEST']._serialized_end=407 - _globals['_PULLTASKINSRESPONSE']._serialized_start=409 - _globals['_PULLTASKINSRESPONSE']._serialized_end=516 - _globals['_PUSHTASKRESREQUEST']._serialized_start=518 - _globals['_PUSHTASKRESREQUEST']._serialized_end=582 - _globals['_PUSHTASKRESRESPONSE']._serialized_start=585 - _globals['_PUSHTASKRESRESPONSE']._serialized_end=759 - _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_start=713 - _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_end=759 - _globals['_RECONNECT']._serialized_start=761 - _globals['_RECONNECT']._serialized_end=791 - _globals['_FLEET']._serialized_start=794 - _globals['_FLEET']._serialized_end=1184 + _globals['_CREATENODEREQUEST']._serialized_end=126 + _globals['_CREATENODERESPONSE']._serialized_start=128 + _globals['_CREATENODERESPONSE']._serialized_end=180 + _globals['_DELETENODEREQUEST']._serialized_start=182 + _globals['_DELETENODEREQUEST']._serialized_end=233 + _globals['_DELETENODERESPONSE']._serialized_start=235 + _globals['_DELETENODERESPONSE']._serialized_end=255 + _globals['_PINGREQUEST']._serialized_start=257 + _globals['_PINGREQUEST']._serialized_end=325 + _globals['_PINGRESPONSE']._serialized_start=327 + _globals['_PINGRESPONSE']._serialized_end=358 + _globals['_PULLTASKINSREQUEST']._serialized_start=360 + _globals['_PULLTASKINSREQUEST']._serialized_end=430 + _globals['_PULLTASKINSRESPONSE']._serialized_start=432 + _globals['_PULLTASKINSRESPONSE']._serialized_end=539 + _globals['_PUSHTASKRESREQUEST']._serialized_start=541 + _globals['_PUSHTASKRESREQUEST']._serialized_end=605 + _globals['_PUSHTASKRESRESPONSE']._serialized_start=608 + _globals['_PUSHTASKRESRESPONSE']._serialized_end=782 + _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_start=736 + _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_end=782 + _globals['_RECONNECT']._serialized_start=784 + _globals['_RECONNECT']._serialized_end=814 + _globals['_FLEET']._serialized_start=817 + _globals['_FLEET']._serialized_end=1207 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/fleet_pb2.pyi b/src/py/flwr/proto/fleet_pb2.pyi index e5c5b7366464..5989f45c5c60 100644 --- a/src/py/flwr/proto/fleet_pb2.pyi +++ b/src/py/flwr/proto/fleet_pb2.pyi @@ -16,8 +16,13 @@ DESCRIPTOR: google.protobuf.descriptor.FileDescriptor class CreateNodeRequest(google.protobuf.message.Message): """CreateNode messages""" DESCRIPTOR: google.protobuf.descriptor.Descriptor + PING_INTERVAL_FIELD_NUMBER: builtins.int + ping_interval: builtins.float def __init__(self, + *, + ping_interval: builtins.float = ..., ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["ping_interval",b"ping_interval"]) -> None: ... global___CreateNodeRequest = CreateNodeRequest class CreateNodeResponse(google.protobuf.message.Message): From 01735671818dd31ba1d0318db7b366f0e84f1a01 Mon Sep 17 00:00:00 2001 From: Javier Date: Tue, 2 Apr 2024 10:04:13 +0100 Subject: [PATCH 54/57] Add minimal `ErrorCodes` definition (#3185) Co-authored-by: Daniel J. Beutel --- src/py/flwr/common/constant.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/py/flwr/common/constant.py b/src/py/flwr/common/constant.py index 99ba2d1d1c63..3ee60f6222f9 100644 --- a/src/py/flwr/common/constant.py +++ b/src/py/flwr/common/constant.py @@ -74,3 +74,14 @@ class SType: def __new__(cls) -> SType: """Prevent instantiation.""" raise TypeError(f"{cls.__name__} cannot be instantiated.") + + +class ErrorCode: + """Error codes for Message's Error.""" + + UNKNOWN = 0 + CLIENT_APP_RAISED_EXCEPTION = 1 + + def __new__(cls) -> ErrorCode: + """Prevent instantiation.""" + raise TypeError(f"{cls.__name__} cannot be instantiated.") From 4b5701707fc6a5bc7ab82bb93a8e2e657c159918 Mon Sep 17 00:00:00 2001 From: Heng Pan Date: Tue, 2 Apr 2024 11:10:15 +0100 Subject: [PATCH 55/57] Set `ping_interval` in `State.create_node` (#3190) --- src/py/flwr/common/constant.py | 1 + .../superlink/fleet/message_handler/message_handler.py | 2 +- src/py/flwr/server/superlink/fleet/vce/vce_api.py | 3 ++- src/py/flwr/server/superlink/state/in_memory_state.py | 6 ++---- src/py/flwr/server/superlink/state/sqlite_state.py | 6 ++---- src/py/flwr/server/superlink/state/state.py | 2 +- src/py/flwr/server/superlink/state/state_test.py | 8 ++++---- 7 files changed, 13 insertions(+), 15 deletions(-) diff --git a/src/py/flwr/common/constant.py b/src/py/flwr/common/constant.py index 3ee60f6222f9..dd100ba25d25 100644 --- a/src/py/flwr/common/constant.py +++ b/src/py/flwr/common/constant.py @@ -41,6 +41,7 @@ PING_CALL_TIMEOUT = 5 PING_BASE_MULTIPLIER = 0.8 PING_RANDOM_RANGE = (-0.1, 0.1) +PING_MAX_INTERVAL = 1e300 class MessageType: diff --git a/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py b/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py index 9fa7656198e5..39edd606b464 100644 --- a/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py +++ b/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py @@ -43,7 +43,7 @@ def create_node( ) -> CreateNodeResponse: """.""" # Create node - node_id = state.create_node() + node_id = state.create_node(ping_interval=request.ping_interval) return CreateNodeResponse(node=Node(node_id=node_id, anonymous=False)) diff --git a/src/py/flwr/server/superlink/fleet/vce/vce_api.py b/src/py/flwr/server/superlink/fleet/vce/vce_api.py index 5fec10940343..ea74bf492ab9 100644 --- a/src/py/flwr/server/superlink/fleet/vce/vce_api.py +++ b/src/py/flwr/server/superlink/fleet/vce/vce_api.py @@ -24,6 +24,7 @@ from flwr.client.client_app import ClientApp, LoadClientAppError from flwr.client.node_state import NodeState +from flwr.common.constant import PING_MAX_INTERVAL from flwr.common.logger import log from flwr.common.message import Error from flwr.common.object_ref import load_app @@ -43,7 +44,7 @@ def _register_nodes( nodes_mapping: NodeToPartitionMapping = {} state = state_factory.state() for i in range(num_nodes): - node_id = state.create_node() + node_id = state.create_node(ping_interval=PING_MAX_INTERVAL) nodes_mapping[node_id] = i log(INFO, "Registered %i nodes", len(nodes_mapping)) return nodes_mapping diff --git a/src/py/flwr/server/superlink/state/in_memory_state.py b/src/py/flwr/server/superlink/state/in_memory_state.py index 6fc57707ac36..2ce6dcd4599a 100644 --- a/src/py/flwr/server/superlink/state/in_memory_state.py +++ b/src/py/flwr/server/superlink/state/in_memory_state.py @@ -182,16 +182,14 @@ def num_task_res(self) -> int: """ return len(self.task_res_store) - def create_node(self) -> int: + def create_node(self, ping_interval: float) -> int: """Create, store in state, and return `node_id`.""" # Sample a random int64 as node_id node_id: int = int.from_bytes(os.urandom(8), "little", signed=True) with self.lock: if node_id not in self.node_ids: - # Default ping interval is 30s - # TODO: change 1e9 to 30s # pylint: disable=W0511 - self.node_ids[node_id] = (time.time() + 1e9, 1e9) + self.node_ids[node_id] = (time.time() + ping_interval, ping_interval) return node_id log(ERROR, "Unexpected node registration failure.") return 0 diff --git a/src/py/flwr/server/superlink/state/sqlite_state.py b/src/py/flwr/server/superlink/state/sqlite_state.py index 6996d51d2a9b..b68d19bd96d9 100644 --- a/src/py/flwr/server/superlink/state/sqlite_state.py +++ b/src/py/flwr/server/superlink/state/sqlite_state.py @@ -468,7 +468,7 @@ def delete_tasks(self, task_ids: Set[UUID]) -> None: return None - def create_node(self) -> int: + def create_node(self, ping_interval: float) -> int: """Create, store in state, and return `node_id`.""" # Sample a random int64 as node_id node_id: int = int.from_bytes(os.urandom(8), "little", signed=True) @@ -478,9 +478,7 @@ def create_node(self) -> int: ) try: - # Default ping interval is 30s - # TODO: change 1e9 to 30s # pylint: disable=W0511 - self.query(query, (node_id, time.time() + 1e9, 1e9)) + self.query(query, (node_id, time.time() + ping_interval, ping_interval)) except sqlite3.IntegrityError: log(ERROR, "Unexpected node registration failure.") return 0 diff --git a/src/py/flwr/server/superlink/state/state.py b/src/py/flwr/server/superlink/state/state.py index 313290eb1022..b356cd47befa 100644 --- a/src/py/flwr/server/superlink/state/state.py +++ b/src/py/flwr/server/superlink/state/state.py @@ -132,7 +132,7 @@ def delete_tasks(self, task_ids: Set[UUID]) -> None: """Delete all delivered TaskIns/TaskRes pairs.""" @abc.abstractmethod - def create_node(self) -> int: + def create_node(self, ping_interval: float) -> int: """Create, store in state, and return `node_id`.""" @abc.abstractmethod diff --git a/src/py/flwr/server/superlink/state/state_test.py b/src/py/flwr/server/superlink/state/state_test.py index 1757cfac4255..8e49a380bb16 100644 --- a/src/py/flwr/server/superlink/state/state_test.py +++ b/src/py/flwr/server/superlink/state/state_test.py @@ -319,7 +319,7 @@ def test_create_node_and_get_nodes(self) -> None: # Execute for _ in range(10): - node_ids.append(state.create_node()) + node_ids.append(state.create_node(ping_interval=10)) retrieved_node_ids = state.get_nodes(run_id) # Assert @@ -331,7 +331,7 @@ def test_delete_node(self) -> None: # Prepare state: State = self.state_factory() run_id = state.create_run() - node_id = state.create_node() + node_id = state.create_node(ping_interval=10) # Execute state.delete_node(node_id) @@ -346,7 +346,7 @@ def test_get_nodes_invalid_run_id(self) -> None: state: State = self.state_factory() state.create_run() invalid_run_id = 61016 - state.create_node() + state.create_node(ping_interval=10) # Execute retrieved_node_ids = state.get_nodes(invalid_run_id) @@ -399,7 +399,7 @@ def test_acknowledge_ping(self) -> None: # Prepare state: State = self.state_factory() run_id = state.create_run() - node_ids = [state.create_node() for _ in range(100)] + node_ids = [state.create_node(ping_interval=10) for _ in range(100)] for node_id in node_ids[:70]: state.acknowledge_ping(node_id, ping_interval=30) for node_id in node_ids[70:]: From 987b1985ecd4e2a7fefbea501d9879fc47a964cb Mon Sep 17 00:00:00 2001 From: Adam Narozniak <51029327+adam-narozniak@users.noreply.github.com> Date: Tue, 2 Apr 2024 13:56:35 +0200 Subject: [PATCH 56/57] Bumpy up datasets version (#3193) --- datasets/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/datasets/pyproject.toml b/datasets/pyproject.toml index 5800faf3f272..7dfa60138582 100644 --- a/datasets/pyproject.toml +++ b/datasets/pyproject.toml @@ -54,7 +54,7 @@ exclude = [ [tool.poetry.dependencies] python = "^3.8" numpy = "^1.21.0" -datasets = "^2.14.3" +datasets = "^2.14.6" pillow = { version = ">=6.2.1", optional = true } soundfile = { version = ">=0.12.1", optional = true } librosa = { version = ">=0.10.0.post2", optional = true } From f95d641cefabb326b389250e6a431ff92c4ccd60 Mon Sep 17 00:00:00 2001 From: Javier Date: Tue, 2 Apr 2024 13:39:29 +0100 Subject: [PATCH 57/57] Introduce `ClientAppException` (#3191) --- src/py/flwr/client/app.py | 33 ++++++++++++++----- src/py/flwr/client/client_app.py | 9 +++++ src/py/flwr/common/constant.py | 3 +- .../server/superlink/fleet/vce/vce_api.py | 17 +++++++--- .../simulation/ray_transport/ray_actor.py | 24 ++------------ 5 files changed, 50 insertions(+), 36 deletions(-) diff --git a/src/py/flwr/client/app.py b/src/py/flwr/client/app.py index 7104ba267f57..1720405ab867 100644 --- a/src/py/flwr/client/app.py +++ b/src/py/flwr/client/app.py @@ -34,6 +34,7 @@ TRANSPORT_TYPE_GRPC_RERE, TRANSPORT_TYPE_REST, TRANSPORT_TYPES, + ErrorCode, ) from flwr.common.exit_handlers import register_exit_handlers from flwr.common.logger import log, warn_deprecated_feature @@ -483,7 +484,7 @@ def _load_client_app() -> ClientApp: # Create an error reply message that will never be used to prevent # the used-before-assignment linting error reply_message = message.create_error_reply( - error=Error(code=0, reason="Unknown") + error=Error(code=ErrorCode.UNKNOWN, reason="Unknown") ) # Handle app loading and task message @@ -491,27 +492,41 @@ def _load_client_app() -> ClientApp: # Load ClientApp instance client_app: ClientApp = load_client_app_fn() + # Execute ClientApp reply_message = client_app(message=message, context=context) - # Update node state - node_state.update_context( - run_id=message.metadata.run_id, - context=context, - ) except Exception as ex: # pylint: disable=broad-exception-caught - log(ERROR, "ClientApp raised an exception", exc_info=ex) # Legacy grpc-bidi if transport in ["grpc-bidi", None]: + log(ERROR, "Client raised an exception.", exc_info=ex) # Raise exception, crash process raise ex # Don't update/change NodeState - # Create error message + e_code = ErrorCode.CLIENT_APP_RAISED_EXCEPTION # Reason example: ":<'division by zero'>" reason = str(type(ex)) + ":<'" + str(ex) + "'>" + exc_entity = "ClientApp" + if isinstance(ex, LoadClientAppError): + reason = ( + "An exception was raised when attempting to load " + "`ClientApp`" + ) + e_code = ErrorCode.LOAD_CLIENT_APP_EXCEPTION + exc_entity = "SuperNode" + + log(ERROR, "%s raised an exception", exc_entity, exc_info=ex) + + # Create error message reply_message = message.create_error_reply( - error=Error(code=0, reason=reason) + error=Error(code=e_code, reason=reason) + ) + else: + # No exception, update node state + node_state.update_context( + run_id=message.metadata.run_id, + context=context, ) # Send diff --git a/src/py/flwr/client/client_app.py b/src/py/flwr/client/client_app.py index 79e7720cbb8e..c9d337700147 100644 --- a/src/py/flwr/client/client_app.py +++ b/src/py/flwr/client/client_app.py @@ -28,6 +28,15 @@ from .typing import ClientAppCallable +class ClientAppException(Exception): + """Exception raised when an exception is raised while executing a ClientApp.""" + + def __init__(self, message: str): + ex_name = self.__class__.__name__ + self.message = f"\nException {ex_name} occurred. Message: " + message + super().__init__(self.message) + + class ClientApp: """Flower ClientApp. diff --git a/src/py/flwr/common/constant.py b/src/py/flwr/common/constant.py index dd100ba25d25..6a4061a72505 100644 --- a/src/py/flwr/common/constant.py +++ b/src/py/flwr/common/constant.py @@ -81,7 +81,8 @@ class ErrorCode: """Error codes for Message's Error.""" UNKNOWN = 0 - CLIENT_APP_RAISED_EXCEPTION = 1 + LOAD_CLIENT_APP_EXCEPTION = 1 + CLIENT_APP_RAISED_EXCEPTION = 2 def __new__(cls) -> ErrorCode: """Prevent instantiation.""" diff --git a/src/py/flwr/server/superlink/fleet/vce/vce_api.py b/src/py/flwr/server/superlink/fleet/vce/vce_api.py index ea74bf492ab9..9c27fca79c12 100644 --- a/src/py/flwr/server/superlink/fleet/vce/vce_api.py +++ b/src/py/flwr/server/superlink/fleet/vce/vce_api.py @@ -22,9 +22,9 @@ from logging import DEBUG, ERROR, INFO, WARN from typing import Callable, Dict, List, Optional -from flwr.client.client_app import ClientApp, LoadClientAppError +from flwr.client.client_app import ClientApp, ClientAppException, LoadClientAppError from flwr.client.node_state import NodeState -from flwr.common.constant import PING_MAX_INTERVAL +from flwr.common.constant import PING_MAX_INTERVAL, ErrorCode from flwr.common.logger import log from flwr.common.message import Error from flwr.common.object_ref import load_app @@ -94,9 +94,18 @@ async def worker( except Exception as ex: # pylint: disable=broad-exception-caught log(ERROR, ex) log(ERROR, traceback.format_exc()) + + if isinstance(ex, ClientAppException): + e_code = ErrorCode.CLIENT_APP_RAISED_EXCEPTION + elif isinstance(ex, LoadClientAppError): + e_code = ErrorCode.LOAD_CLIENT_APP_EXCEPTION + else: + e_code = ErrorCode.UNKNOWN + reason = str(type(ex)) + ":<'" + str(ex) + "'>" - error = Error(code=0, reason=reason) - out_mssg = message.create_error_reply(error=error) + out_mssg = message.create_error_reply( + error=Error(code=e_code, reason=reason) + ) finally: if out_mssg: diff --git a/src/py/flwr/simulation/ray_transport/ray_actor.py b/src/py/flwr/simulation/ray_transport/ray_actor.py index 9773203628ab..9caf0fc3e6c0 100644 --- a/src/py/flwr/simulation/ray_transport/ray_actor.py +++ b/src/py/flwr/simulation/ray_transport/ray_actor.py @@ -16,7 +16,6 @@ import asyncio import threading -import traceback from abc import ABC from logging import DEBUG, ERROR, WARNING from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union @@ -25,22 +24,13 @@ from ray import ObjectRef from ray.util.actor_pool import ActorPool -from flwr.client.client_app import ClientApp, LoadClientAppError +from flwr.client.client_app import ClientApp, ClientAppException, LoadClientAppError from flwr.common import Context, Message from flwr.common.logger import log ClientAppFn = Callable[[], ClientApp] -class ClientException(Exception): - """Raised when client side logic crashes with an exception.""" - - def __init__(self, message: str): - div = ">" * 7 - self.message = "\n" + div + "A ClientException occurred." + message - super().__init__(self.message) - - class VirtualClientEngineActor(ABC): """Abstract base class for VirtualClientEngine Actors.""" @@ -71,17 +61,7 @@ def run( raise load_ex except Exception as ex: - client_trace = traceback.format_exc() - mssg = ( - "\n\tSomething went wrong when running your client run." - "\n\tClient " - + cid - + " crashed when the " - + self.__class__.__name__ - + " was running its run." - "\n\tException triggered on the client side: " + client_trace, - ) - raise ClientException(str(mssg)) from ex + raise ClientAppException(str(ex)) from ex return cid, out_message, context