From 4a5eee48688e8f44a463e26a8f7a01156cbe79a7 Mon Sep 17 00:00:00 2001 From: Robert Steiner Date: Wed, 11 Sep 2024 11:15:47 +0200 Subject: [PATCH 1/7] docs(framework) Update node auth docs (#4160) Signed-off-by: Robert Steiner --- doc/source/how-to-authenticate-supernodes.rst | 10 +++++----- doc/source/how-to-enable-ssl-connections.rst | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/doc/source/how-to-authenticate-supernodes.rst b/doc/source/how-to-authenticate-supernodes.rst index 73987261b29f..9b001531ee33 100644 --- a/doc/source/how-to-authenticate-supernodes.rst +++ b/doc/source/how-to-authenticate-supernodes.rst @@ -28,13 +28,13 @@ Use the following terminal command to start a Flower :code:`SuperNode` that has .. code-block:: bash flower-superlink - --ssl-ca-certfile certificates/ca.crt - --ssl-certfile certificates/server.pem + --ssl-ca-certfile certificates/ca.crt + --ssl-certfile certificates/server.pem --ssl-keyfile certificates/server.key --auth-list-public-keys keys/client_public_keys.csv --auth-superlink-private-key keys/server_credentials --auth-superlink-public-key keys/server_credentials.pub - + Let's break down the authentication flags: 1. The first flag :code:`--auth-list-public-keys` expects a path to a CSV file storing all known node public keys. You need to store all known node public keys that are allowed to participate in a federation in one CSV file (:code:`.csv`). @@ -56,8 +56,8 @@ Similar to the long-running Flower server (:code:`SuperLink`), you can easily en Use the following terminal command to start an authenticated :code:`SuperNode`: .. code-block:: bash - - flower-client-app client:app + + flower-supernode --root-certificates certificates/ca.crt --superlink 127.0.0.1:9092 --auth-supernode-private-key keys/client_credentials diff --git a/doc/source/how-to-enable-ssl-connections.rst b/doc/source/how-to-enable-ssl-connections.rst index 1828f4ed3258..fc8e89914ac2 100644 --- a/doc/source/how-to-enable-ssl-connections.rst +++ b/doc/source/how-to-enable-ssl-connections.rst @@ -4,7 +4,7 @@ Enable SSL connections This guide describes how to a SSL-enabled secure Flower server (:code:`SuperLink`) can be started and how a Flower client (:code:`SuperNode`) can establish a secure connections to it. -A complete code example demonstrating a secure connection can be found +A complete code example demonstrating a secure connection can be found `here `_. The code example comes with a :code:`README.md` file which explains how to start it. Although it is @@ -42,9 +42,9 @@ Use the following terminal command to start a sever (SuperLink) that uses the pr .. code-block:: bash - flower-superlink - --ssl-ca-certfile certificates/ca.crt - --ssl-certfile certificates/server.pem + flower-superlink + --ssl-ca-certfile certificates/ca.crt + --ssl-certfile certificates/server.pem --ssl-keyfile certificates/server.key When providing certificates, the server expects a tuple of three certificates paths: CA certificate, server certificate and server private key. @@ -57,7 +57,7 @@ Use the following terminal command to start a client (SuperNode) that uses the p .. code-block:: bash - flower-client-app client:app + flower-supernode --root-certificates certificates/ca.crt --superlink 127.0.0.1:9092 From 28a41aa2573d04fc27d4c44ec1f3951963750176 Mon Sep 17 00:00:00 2001 From: Javier Date: Wed, 11 Sep 2024 11:34:18 +0200 Subject: [PATCH 2/7] fix(framework) Adjust framework name in templates docstrings (#4127) --- src/py/flwr/cli/new/new.py | 59 +++++++++++++++----------------------- 1 file changed, 23 insertions(+), 36 deletions(-) diff --git a/src/py/flwr/cli/new/new.py b/src/py/flwr/cli/new/new.py index 520f683a47d8..90e4970d5928 100644 --- a/src/py/flwr/cli/new/new.py +++ b/src/py/flwr/cli/new/new.py @@ -136,36 +136,23 @@ def new( username = prompt_text("Please provide your Flower username") if framework is not None: - framework_str_upper = str(framework.value) + framework_str = str(framework.value) else: - framework_value = prompt_options( + framework_str = prompt_options( "Please select ML framework by typing in the number", [mlf.value for mlf in MlFramework], ) - selected_value = [ - name - for name, value in vars(MlFramework).items() - if value == framework_value - ] - framework_str_upper = selected_value[0] - - framework_str = framework_str_upper.lower() llm_challenge_str = None - if framework_str == "flowertune": + if framework_str == MlFramework.FLOWERTUNE: llm_challenge_value = prompt_options( "Please select LLM challenge by typing in the number", sorted([challenge.value for challenge in LlmChallengeName]), ) - selected_value = [ - name - for name, value in vars(LlmChallengeName).items() - if value == llm_challenge_value - ] - llm_challenge_str = selected_value[0] - llm_challenge_str = llm_challenge_str.lower() + llm_challenge_str = llm_challenge_value.lower() - is_baseline_project = framework_str == "baseline" + if framework_str == MlFramework.BASELINE: + framework_str = "baseline" print( typer.style( @@ -176,19 +163,21 @@ def new( ) context = { - "framework_str": framework_str_upper, + "framework_str": framework_str, "import_name": import_name.replace("-", "_"), "package_name": package_name, "project_name": app_name, "username": username, } + template_name = framework_str.lower() + # List of files to render if llm_challenge_str: files = { ".gitignore": {"template": "app/.gitignore.tpl"}, - "pyproject.toml": {"template": f"app/pyproject.{framework_str}.toml.tpl"}, - "README.md": {"template": f"app/README.{framework_str}.md.tpl"}, + "pyproject.toml": {"template": f"app/pyproject.{template_name}.toml.tpl"}, + "README.md": {"template": f"app/README.{template_name}.md.tpl"}, f"{import_name}/__init__.py": {"template": "app/code/__init__.py.tpl"}, f"{import_name}/server_app.py": { "template": "app/code/flwr_tune/server_app.py.tpl" @@ -235,44 +224,42 @@ def new( files = { ".gitignore": {"template": "app/.gitignore.tpl"}, "README.md": {"template": "app/README.md.tpl"}, - "pyproject.toml": {"template": f"app/pyproject.{framework_str}.toml.tpl"}, + "pyproject.toml": {"template": f"app/pyproject.{template_name}.toml.tpl"}, f"{import_name}/__init__.py": {"template": "app/code/__init__.py.tpl"}, f"{import_name}/server_app.py": { - "template": f"app/code/server.{framework_str}.py.tpl" + "template": f"app/code/server.{template_name}.py.tpl" }, f"{import_name}/client_app.py": { - "template": f"app/code/client.{framework_str}.py.tpl" + "template": f"app/code/client.{template_name}.py.tpl" }, } # Depending on the framework, generate task.py file frameworks_with_tasks = [ - MlFramework.PYTORCH.value.lower(), - MlFramework.JAX.value.lower(), - MlFramework.HUGGINGFACE.value.lower(), - MlFramework.MLX.value.lower(), - MlFramework.TENSORFLOW.value.lower(), + MlFramework.PYTORCH.value, + MlFramework.JAX.value, + MlFramework.HUGGINGFACE.value, + MlFramework.MLX.value, + MlFramework.TENSORFLOW.value, ] if framework_str in frameworks_with_tasks: files[f"{import_name}/task.py"] = { - "template": f"app/code/task.{framework_str}.py.tpl" + "template": f"app/code/task.{template_name}.py.tpl" } - if is_baseline_project: + if framework_str == "baseline": # Include additional files for baseline template for file_name in ["model", "dataset", "strategy", "utils", "__init__"]: files[f"{import_name}/{file_name}.py"] = { - "template": f"app/code/{file_name}.{framework_str}.py.tpl" + "template": f"app/code/{file_name}.{template_name}.py.tpl" } # Replace README.md - files["README.md"]["template"] = f"app/README.{framework_str}.md.tpl" + files["README.md"]["template"] = f"app/README.{template_name}.md.tpl" # Add LICENSE files["LICENSE"] = {"template": "app/LICENSE.tpl"} - context["framework_str"] = "baseline" - for file_path, value in files.items(): render_and_create( file_path=project_dir / file_path, From c85417acd32f47c269747c86a1dd065be00c17f7 Mon Sep 17 00:00:00 2001 From: Javier Date: Wed, 11 Sep 2024 11:42:18 +0200 Subject: [PATCH 3/7] refactor(framework) Add buffer time after `ServerApp` thread initialization (#4119) --- src/py/flwr/simulation/run_simulation.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/src/py/flwr/simulation/run_simulation.py b/src/py/flwr/simulation/run_simulation.py index 38a6ee7d6c14..be6410dcbd6b 100644 --- a/src/py/flwr/simulation/run_simulation.py +++ b/src/py/flwr/simulation/run_simulation.py @@ -216,6 +216,7 @@ def run_simulation_from_cli() -> None: app_dir=app_dir, run=run, enable_tf_gpu_growth=args.enable_tf_gpu_growth, + delay_start=args.delay_start, verbose_logging=args.verbose, server_app_run_config=fused_config, is_app=is_app, @@ -309,7 +310,6 @@ def run_serverapp_th( f_stop: threading.Event, has_exception: threading.Event, enable_tf_gpu_growth: bool, - delay_launch: int = 3, ) -> threading.Thread: """Run SeverApp in a thread.""" @@ -365,7 +365,6 @@ def server_th_with_start_checks( server_app, ), ) - sleep(delay_launch) serverapp_th.start() return serverapp_th @@ -380,6 +379,7 @@ def _main_loop( enable_tf_gpu_growth: bool, run: Run, exit_event: EventType, + delay_start: int, flwr_dir: Optional[str] = None, client_app: Optional[ClientApp] = None, client_app_attr: Optional[str] = None, @@ -419,6 +419,9 @@ def _main_loop( enable_tf_gpu_growth=enable_tf_gpu_growth, ) + # Buffer time so the `ServerApp` in separate thread is ready + log(DEBUG, "Buffer time delay: %ds", delay_start) + sleep(delay_start) # Start Simulation Engine vce.start_vce( num_supernodes=num_supernodes, @@ -467,6 +470,7 @@ def _run_simulation( flwr_dir: Optional[str] = None, run: Optional[Run] = None, enable_tf_gpu_growth: bool = False, + delay_start: int = 5, verbose_logging: bool = False, is_app: bool = False, ) -> None: @@ -523,6 +527,7 @@ def _run_simulation( enable_tf_gpu_growth, run, exit_event, + delay_start, flwr_dir, client_app, client_app_attr, @@ -610,6 +615,13 @@ def _parse_args_run_simulation() -> argparse.ArgumentParser: "Read more about how `tf.config.experimental.set_memory_growth()` works in " "the TensorFlow documentation: https://www.tensorflow.org/api/stable.", ) + parser.add_argument( + "--delay-start", + type=int, + default=3, + help="Buffer time (in seconds) to delay the start the simulation engine after " + "the `ServerApp`, which runs in a separate thread, has been launched.", + ) parser.add_argument( "--verbose", action="store_true", From 5dffa05ee4d4cfddfd8209e034a6dca0b22555df Mon Sep 17 00:00:00 2001 From: Javier Date: Wed, 11 Sep 2024 11:58:47 +0200 Subject: [PATCH 4/7] refactor(framework) Update `huggingface` template for `flwr new` (#4169) Co-authored-by: Chong Shen Ng Co-authored-by: Daniel J. Beutel --- .../app/code/client.huggingface.py.tpl | 48 ++++++++----------- .../app/code/server.huggingface.py.tpl | 21 ++++++-- .../app/code/task.huggingface.py.tpl | 29 ++++++----- .../app/pyproject.huggingface.toml.tpl | 10 +++- 4 files changed, 62 insertions(+), 46 deletions(-) diff --git a/src/py/flwr/cli/new/templates/app/code/client.huggingface.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.huggingface.py.tpl index 3041a69e3aaa..840f938b4ecc 100644 --- a/src/py/flwr/cli/new/templates/app/code/client.huggingface.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/client.huggingface.py.tpl @@ -1,18 +1,11 @@ """$project_name: A Flower / $framework_str app.""" +import torch from flwr.client import ClientApp, NumPyClient from flwr.common import Context from transformers import AutoModelForSequenceClassification -from $import_name.task import ( - get_weights, - load_data, - set_weights, - train, - test, - CHECKPOINT, - DEVICE, -) +from $import_name.task import get_weights, load_data, set_weights, test, train # Flower client @@ -22,37 +15,34 @@ class FlowerClient(NumPyClient): self.trainloader = trainloader self.testloader = testloader self.local_epochs = local_epochs - - def get_parameters(self, config): - return get_weights(self.net) - - def set_parameters(self, parameters): - set_weights(self.net, parameters) + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + self.net.to(self.device) def fit(self, parameters, config): - self.set_parameters(parameters) - train( - self.net, - self.trainloader, - epochs=self.local_epochs, - ) - return self.get_parameters(config={}), len(self.trainloader), {} + set_weights(self.net, parameters) + train(self.net, self.trainloader, epochs=self.local_epochs, device=self.device) + return get_weights(self.net), len(self.trainloader), {} def evaluate(self, parameters, config): - self.set_parameters(parameters) - loss, accuracy = test(self.net, self.testloader) + set_weights(self.net, parameters) + loss, accuracy = test(self.net, self.testloader, self.device) return float(loss), len(self.testloader), {"accuracy": accuracy} def client_fn(context: Context): - # Load model and data - net = AutoModelForSequenceClassification.from_pretrained( - CHECKPOINT, num_labels=2 - ).to(DEVICE) + # Get this client's dataset partition partition_id = context.node_config["partition-id"] num_partitions = context.node_config["num-partitions"] - trainloader, valloader = load_data(partition_id, num_partitions) + model_name = context.run_config["model-name"] + trainloader, valloader = load_data(partition_id, num_partitions, model_name) + + # Load model + num_labels = context.run_config["num-labels"] + net = AutoModelForSequenceClassification.from_pretrained( + model_name, num_labels=num_labels + ) + local_epochs = context.run_config["local-epochs"] # Return Client instance diff --git a/src/py/flwr/cli/new/templates/app/code/server.huggingface.py.tpl b/src/py/flwr/cli/new/templates/app/code/server.huggingface.py.tpl index 5491f6616160..16f94f0a64e9 100644 --- a/src/py/flwr/cli/new/templates/app/code/server.huggingface.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/server.huggingface.py.tpl @@ -1,18 +1,33 @@ """$project_name: A Flower / $framework_str app.""" -from flwr.common import Context -from flwr.server.strategy import FedAvg +from flwr.common import Context, ndarrays_to_parameters from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedAvg +from transformers import AutoModelForSequenceClassification + +from $import_name.task import get_weights def server_fn(context: Context): # Read from config num_rounds = context.run_config["num-server-rounds"] + fraction_fit = context.run_config["fraction-fit"] + + # Initialize global model + model_name = context.run_config["model-name"] + num_labels = context.run_config["num-labels"] + net = AutoModelForSequenceClassification.from_pretrained( + model_name, num_labels=num_labels + ) + + weights = get_weights(net) + initial_parameters = ndarrays_to_parameters(weights) # Define strategy strategy = FedAvg( - fraction_fit=1.0, + fraction_fit=fraction_fit, fraction_evaluate=1.0, + initial_parameters=initial_parameters, ) config = ServerConfig(num_rounds=num_rounds) diff --git a/src/py/flwr/cli/new/templates/app/code/task.huggingface.py.tpl b/src/py/flwr/cli/new/templates/app/code/task.huggingface.py.tpl index ad52e2c3fe21..1c50e85d7103 100644 --- a/src/py/flwr/cli/new/templates/app/code/task.huggingface.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/task.huggingface.py.tpl @@ -4,24 +4,25 @@ import warnings from collections import OrderedDict import torch +import transformers +from datasets.utils.logging import disable_progress_bar from evaluate import load as load_metric +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoTokenizer, DataCollatorWithPadding -from flwr_datasets import FederatedDataset -from flwr_datasets.partitioner import IidPartitioner - - warnings.filterwarnings("ignore", category=UserWarning) -DEVICE = torch.device("cpu") -CHECKPOINT = "distilbert-base-uncased" # transformer model checkpoint +warnings.filterwarnings("ignore", category=FutureWarning) +disable_progress_bar() +transformers.logging.set_verbosity_error() fds = None # Cache FederatedDataset -def load_data(partition_id: int, num_partitions: int): +def load_data(partition_id: int, num_partitions: int, model_name: str): """Load IMDB data (training and eval)""" # Only initialize `FederatedDataset` once global fds @@ -35,10 +36,12 @@ def load_data(partition_id: int, num_partitions: int): # Divide data: 80% train, 20% test partition_train_test = partition.train_test_split(test_size=0.2, seed=42) - tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT) + tokenizer = AutoTokenizer.from_pretrained(model_name) def tokenize_function(examples): - return tokenizer(examples["text"], truncation=True) + return tokenizer( + examples["text"], truncation=True, add_special_tokens=True, max_length=512 + ) partition_train_test = partition_train_test.map(tokenize_function, batched=True) partition_train_test = partition_train_test.remove_columns("text") @@ -59,12 +62,12 @@ def load_data(partition_id: int, num_partitions: int): return trainloader, testloader -def train(net, trainloader, epochs): +def train(net, trainloader, epochs, device): optimizer = AdamW(net.parameters(), lr=5e-5) net.train() for _ in range(epochs): for batch in trainloader: - batch = {k: v.to(DEVICE) for k, v in batch.items()} + batch = {k: v.to(device) for k, v in batch.items()} outputs = net(**batch) loss = outputs.loss loss.backward() @@ -72,12 +75,12 @@ def train(net, trainloader, epochs): optimizer.zero_grad() -def test(net, testloader): +def test(net, testloader, device): metric = load_metric("accuracy") loss = 0 net.eval() for batch in testloader: - batch = {k: v.to(DEVICE) for k, v in batch.items()} + batch = {k: v.to(device) for k, v in batch.items()} with torch.no_grad(): outputs = net(**batch) logits = outputs.logits diff --git a/src/py/flwr/cli/new/templates/app/pyproject.huggingface.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.huggingface.toml.tpl index 15dc2af87a3f..af1e4d005114 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.huggingface.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.huggingface.toml.tpl @@ -8,7 +8,7 @@ version = "1.0.0" description = "" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.10.0", + "flwr[simulation]>=1.11.0", "flwr-datasets>=0.3.0", "torch==2.2.1", "transformers>=4.30.0,<5.0", @@ -29,10 +29,18 @@ clientapp = "$import_name.client_app:app" [tool.flwr.app.config] num-server-rounds = 3 +fraction-fit = 0.5 local-epochs = 1 +model-name = "prajjwal1/bert-tiny" # Set a larger model if you have access to more GPU resources +num-labels = 2 [tool.flwr.federations] default = "localhost" [tool.flwr.federations.localhost] options.num-supernodes = 10 + +[tool.flwr.federations.localhost-gpu] +options.num-supernodes = 10 +options.backend.client-resources.num-cpus = 4 # each ClientApp assumes to use 4CPUs +options.backend.client-resources.num-gpus = 0.25 # at most 4 ClientApps will run in a given GPU From 06135935720301552dc9f9cec019022817e4d9b4 Mon Sep 17 00:00:00 2001 From: Javier Date: Wed, 11 Sep 2024 12:47:09 +0200 Subject: [PATCH 5/7] refactor(framework) Handle unsuitable resources for simulation (#4143) --- .../superlink/fleet/vce/backend/raybackend.py | 33 ++++++++++++------- .../simulation/ray_transport/ray_actor.py | 4 +-- 2 files changed, 23 insertions(+), 14 deletions(-) diff --git a/src/py/flwr/server/superlink/fleet/vce/backend/raybackend.py b/src/py/flwr/server/superlink/fleet/vce/backend/raybackend.py index acfb248a6366..2024b8760d95 100644 --- a/src/py/flwr/server/superlink/fleet/vce/backend/raybackend.py +++ b/src/py/flwr/server/superlink/fleet/vce/backend/raybackend.py @@ -52,16 +52,11 @@ def __init__( # Validate client resources self.client_resources_key = "client_resources" - client_resources = self._validate_client_resources(config=backend_config) + self.client_resources = self._validate_client_resources(config=backend_config) - # Create actor pool - actor_kwargs = self._validate_actor_arguments(config=backend_config) - - self.pool = BasicActorPool( - actor_type=ClientAppActor, - client_resources=client_resources, - actor_kwargs=actor_kwargs, - ) + # Valide actor resources + self.actor_kwargs = self._validate_actor_arguments(config=backend_config) + self.pool: Optional[BasicActorPool] = None self.app_fn: Optional[Callable[[], ClientApp]] = None @@ -122,14 +117,24 @@ def init_ray(self, backend_config: BackendConfig) -> None: @property def num_workers(self) -> int: """Return number of actors in pool.""" - return self.pool.num_actors + return self.pool.num_actors if self.pool else 0 def is_worker_idle(self) -> bool: """Report whether the pool has idle actors.""" - return self.pool.is_actor_available() + return self.pool.is_actor_available() if self.pool else False def build(self, app_fn: Callable[[], ClientApp]) -> None: """Build pool of Ray actors that this backend will submit jobs to.""" + # Create Actor Pool + try: + self.pool = BasicActorPool( + actor_type=ClientAppActor, + client_resources=self.client_resources, + actor_kwargs=self.actor_kwargs, + ) + except Exception as ex: + raise ex + self.pool.add_actors_to_pool(self.pool.actors_capacity) # Set ClientApp callable that ray actors will use self.app_fn = app_fn @@ -146,6 +151,9 @@ def process_message( """ partition_id = context.node_config[PARTITION_ID_KEY] + if self.pool is None: + raise ValueError("The actor pool is empty, unfit to process messages.") + if self.app_fn is None: raise ValueError( "Unspecified function to load a `ClientApp`. " @@ -179,6 +187,7 @@ def process_message( def terminate(self) -> None: """Terminate all actors in actor pool.""" - self.pool.terminate_all_actors() + if self.pool: + self.pool.terminate_all_actors() ray.shutdown() log(DEBUG, "Terminated %s", self.__class__.__name__) diff --git a/src/py/flwr/simulation/ray_transport/ray_actor.py b/src/py/flwr/simulation/ray_transport/ray_actor.py index b1c9d2b9c0c1..698eb78f2aef 100644 --- a/src/py/flwr/simulation/ray_transport/ray_actor.py +++ b/src/py/flwr/simulation/ray_transport/ray_actor.py @@ -124,14 +124,14 @@ def pool_size_from_resources(client_resources: Dict[str, Union[int, float]]) -> WARNING, "The ActorPool is empty. The system (CPUs=%s, GPUs=%s) " "does not meet the criteria to host at least one client with resources:" - " %s. Lowering the `client_resources` could help.", + " %s. Lowering these resources could help.", num_cpus, num_gpus, client_resources, ) raise ValueError( "ActorPool is empty. Stopping Simulation. " - "Check 'client_resources' passed to `start_simulation`" + "Check `num_cpus` and/or `num_gpus` passed to the simulation engine" ) return total_num_actors From 28308157266983412558b556e4d23fabe59515e5 Mon Sep 17 00:00:00 2001 From: Heng Pan Date: Wed, 11 Sep 2024 11:56:00 +0100 Subject: [PATCH 6/7] fix(framework:skip) Add copyright header to `.proto` files (#4166) --- src/proto/flwr/proto/clientappio.proto | 15 +++++++++++++++ src/proto/flwr/proto/fab.proto | 15 +++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/src/proto/flwr/proto/clientappio.proto b/src/proto/flwr/proto/clientappio.proto index 898cb04c5b5b..0ec73b8e569a 100644 --- a/src/proto/flwr/proto/clientappio.proto +++ b/src/proto/flwr/proto/clientappio.proto @@ -1,3 +1,18 @@ +// Copyright 2024 Flower Labs GmbH. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== + syntax = "proto3"; package flwr.proto; diff --git a/src/proto/flwr/proto/fab.proto b/src/proto/flwr/proto/fab.proto index 3620a95ff009..6f8e6b87808d 100644 --- a/src/proto/flwr/proto/fab.proto +++ b/src/proto/flwr/proto/fab.proto @@ -1,3 +1,18 @@ +// Copyright 2024 Flower Labs GmbH. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== + syntax = "proto3"; package flwr.proto; From 570eb0f4916ec6ce122fd6df8cbb9de8af81678b Mon Sep 17 00:00:00 2001 From: Taner Topal Date: Wed, 11 Sep 2024 14:17:17 +0200 Subject: [PATCH 7/7] ci(*:skip) Add new project to changelog configuration (#4164) --- dev/changelog_config.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev/changelog_config.toml b/dev/changelog_config.toml index 05527e2b2cb3..82a10d30173b 100644 --- a/dev/changelog_config.toml +++ b/dev/changelog_config.toml @@ -3,7 +3,7 @@ type = ["ci", "docs", "feat", "fix", "refactor", "break"] -project = ["framework", "baselines", "datasets", "examples", "benchmarks"] +project = ["framework", "baselines", "datasets", "examples", "benchmarks", "glossary"] scope = "skip"