diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 34af632814a3..0f95b023ecaa 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -17,3 +17,7 @@ README.md @jafermarq @tanertopal @danieljanes # Changelog /doc/source/ref-changelog.md @jafermarq @tanertopal @danieljanes + +# GitHub Actions and Workflows +/.github/workflows @Robert-Steiner @tanertopal @danieljanes +/.github/actions @Robert-Steiner @tanertopal @danieljanes diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 0077bbab0909..0158aff55d15 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,6 +1,7 @@ @@ -37,29 +38,9 @@ Example: The variable `rnd` was renamed to `server_round` to improve readability - [ ] Implement proposed change - [ ] Write tests - [ ] Update [documentation](https://flower.ai/docs/writing-documentation.html) -- [ ] Update the changelog entry below - [ ] Make CI checks pass - [ ] Ping maintainers on [Slack](https://flower.ai/join-slack/) (channel `#contributions`) - - -### Changelog entry - - - ### Any other comments? ``parameters_to_ndarrays``" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:55 +msgid "``weights_to_parameters`` --> ``ndarrays_to_parameters``" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:57 +msgid "" +"Strategy initialization: if the strategy relies on the default values for" +" ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " +"``fraction_evaluate`` manually to ``0.1``. Projects that do not manually " +"create a strategy (by calling ``start_server`` or ``start_simulation`` " +"without passing a strategy instance) should now manually initialize " +"FedAvg with ``fraction_fit`` and ``fraction_evaluate`` set to ``0.1``." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 +msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:60 +msgid "``fraction_eval`` --> ``fraction_evaluate``" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 +msgid "``min_eval_clients`` --> ``min_evaluate_clients``" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:62 +msgid "``eval_fn`` --> ``evaluate_fn``" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:64 +msgid "" +"Rename ``rnd`` to ``server_round``. This impacts multiple methods and " +"functions, for example, ``configure_fit``, ``aggregate_fit``, " +"``configure_evaluate``, ``aggregate_evaluate``, and ``evaluate_fn``." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:65 +msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:67 +msgid "" +"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 +msgid "" +"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " +"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " +"Scalar]]]:``" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 +msgid "Custom strategies" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:73 +msgid "" +"The type of parameter ``failures`` has changed from " +"``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], " +"BaseException]]`` (in ``aggregate_fit``) and " +"``List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]]`` (in " +"``aggregate_evaluate``)" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:74 +msgid "" +"The ``Strategy`` method ``evaluate`` now receives the current round of " +"federated learning/evaluation as the first parameter:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:76 +msgid "" +"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 +msgid "" +"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " +"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 +msgid "Optional improvements" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:82 +msgid "" +"Along with the necessary changes above, there are a number of potential " +"improvements that just became possible:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:84 +msgid "" +"Remove \"placeholder\" methods from subclasses of ``Client`` or " +"``NumPyClient``. If you, for example, use server-side evaluation, then " +"empty placeholder implementations of ``evaluate`` are no longer " +"necessary." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:85 +msgid "" +"Configure the round timeout via ``start_simulation``: " +"``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " +"round_timeout=600.0), ...)``" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:89 +#: ../../source/how-to-upgrade-to-flower-next.rst:316 +msgid "Further help" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:91 +msgid "" +"Most official `Flower code examples " +"`_ are already updated" +" to Flower 1.0, they can serve as a reference for using the Flower 1.0 " +"API. If there are further questions, `join the Flower Slack " +"`_ and use the channel ``#questions``." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-next.rst:2 +msgid "Upgrade to Flower Next" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-next.rst:4 +msgid "" +"Welcome to the migration guide for updating Flower to Flower Next! " +"Whether you're a seasoned user or just getting started, this guide will " +"help you smoothly transition your existing setup to take advantage of the" +" latest features and improvements in Flower Next, starting from version " +"1.8." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-next.rst:9 +msgid "" +"This guide shows how to reuse pre-``1.8`` Flower code with minimum code " +"changes by using the *compatibility layer* in Flower Next. In another " +"guide, we will show how to run Flower Next end-to-end with pure Flower " +"Next APIs." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-next.rst:13 +msgid "Let's dive in!" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-next.rst:47 +msgid "" +"Here's how to update an existing installation of Flower to Flower Next " +"with ``pip``:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-next.rst:53 +msgid "or if you need Flower Next with simulation:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-next.rst:60 +msgid "" +"Ensure you set the following version constraint in your " +"``requirements.txt``" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-next.rst:70 +msgid "or ``pyproject.toml``:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-next.rst:81 +msgid "Using Poetry" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-next.rst:83 +msgid "" +"Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall " +"(don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before " +"running ``poetry install``)." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-next.rst:85 +msgid "" +"Ensure you set the following version constraint in your " +"``pyproject.toml``:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-next.rst:101 +msgid "" +"In Flower Next, the *infrastructure* and *application layers* have been " +"decoupled. Instead of starting a client in code via ``start_client()``, " +"you create a |clientapp_link|_ and start it via the command line. Instead" +" of starting a server in code via ``start_server()``, you create a " +"|serverapp_link|_ and start it via the command line. The long-running " +"components of server and client are called SuperLink and SuperNode. The " +"following non-breaking changes that require manual updates and allow you " +"to run your project both in the traditional way and in the Flower Next " +"way:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-next.rst:108 +msgid "|clientapp_link|_" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-next.rst:109 +msgid "" +"Wrap your existing client with |clientapp_link|_ instead of launching it " +"via |startclient_link|_. Here's an example:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-next.rst:131 +msgid "|serverapp_link|_" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-next.rst:132 +msgid "" +"Wrap your existing strategy with |serverapp_link|_ instead of starting " +"the server via |startserver_link|_. Here's an example:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-next.rst:153 +msgid "Deployment" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-next.rst:154 +msgid "" +"Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, " +"in sequence, |flowernext_clientapp_link|_ (2x) and " +"|flowernext_serverapp_link|_. There is no need to execute `client.py` and" +" `server.py` as Python scripts." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-next.rst:157 +msgid "" +"Here's an example to start the server without HTTPS (only for " +"prototyping):" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-next.rst:173 +msgid "" +"Here's another example to start with HTTPS. Use the ``--certificates`` " +"command line argument to pass paths to (CA certificate, server " +"certificate, and server private key)." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-next.rst:200 +msgid "Simulation in CLI" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-next.rst:201 +msgid "" +"Wrap your existing client and strategy with |clientapp_link|_ and " +"|serverapp_link|_, respectively. There is no need to use |startsim_link|_" +" anymore. Here's an example:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-next.rst:231 +msgid "" +"Run |runsimcli_link|_ in CLI and point to the ``server_app`` / " +"``client_app`` object in the code instead of executing the Python script." +" Here's an example (assuming the ``server_app`` and ``client_app`` " +"objects are in a ``sim.py`` module):" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-next.rst:248 +msgid "" +"Set default resources for each |clientapp_link|_ using the ``--backend-" +"config`` command line argument instead of setting the " +"``client_resources`` argument in |startsim_link|_. Here's an example:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-next.rst:274 +msgid "Simulation in a Notebook" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-next.rst:275 +msgid "" +"Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's " +"an example:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-next.rst:318 +msgid "" +"Some official `Flower code examples `_ " +"are already updated to Flower Next so they can serve as a reference for " +"using the Flower Next API. If there are further questions, `join the " +"Flower Slack `_ and use the channel " +"``#questions``. You can also `participate in Flower Discuss " +"`_ where you can find us answering questions," +" or share and learn from others about migrating to Flower Next." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-next.rst:324 +msgid "Important" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-next.rst:327 +msgid "" +"As we continuously enhance Flower Next at a rapid pace, we'll be " +"periodically updating this guide. Please feel free to share any feedback " +"with us!" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-next.rst:333 +msgid "Happy migrating! 🚀" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:2 +msgid "Use Built-in Mods" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:4 +msgid "" +"**Note: This tutorial covers experimental features. The functionality and" +" interfaces may change in future versions.**" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:6 +msgid "" +"In this tutorial, we will learn how to utilize built-in mods to augment " +"the behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) " +"allow us to perform operations before and after a task is processed in " +"the ``ClientApp``." +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:9 +msgid "What are Mods?" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:11 +msgid "" +"A Mod is a callable that wraps around a ``ClientApp``. It can manipulate " +"or inspect the incoming ``Message`` and the resulting outgoing " +"``Message``. The signature for a ``Mod`` is as follows:" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:18 +msgid "A typical mod function might look something like this:" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:31 +msgid "Using Mods" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:33 +msgid "To use mods in your ``ClientApp``, you can follow these steps:" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:36 +msgid "1. Import the required mods" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:38 +msgid "First, import the built-in mod you intend to use:" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:46 +msgid "2. Define your client function" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:48 +msgid "" +"Define your client function (``client_fn``) that will be wrapped by the " +"mod(s):" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:57 +msgid "3. Create the ``ClientApp`` with mods" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:59 +msgid "" +"Create your ``ClientApp`` and pass the mods as a list to the ``mods`` " +"argument. The order in which you provide the mods matters:" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:72 +msgid "Order of execution" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:74 +msgid "" +"When the ``ClientApp`` runs, the mods are executed in the order they are " +"provided in the list:" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:76 +msgid "``example_mod_1`` (outermost mod)" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:77 +msgid "``example_mod_2`` (next mod)" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:78 +msgid "" +"Message handler (core function that handles the incoming ``Message`` and " +"returns the outgoing ``Message``)" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:79 +msgid "``example_mod_2`` (on the way back)" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:80 +msgid "``example_mod_1`` (outermost mod on the way back)" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:82 +msgid "" +"Each mod has a chance to inspect and modify the incoming ``Message`` " +"before passing it to the next mod, and likewise with the outgoing " +"``Message`` before returning it up the stack." +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:87 +msgid "" +"By following this guide, you have learned how to effectively use mods to " +"enhance your ``ClientApp``'s functionality. Remember that the order of " +"mods is crucial and affects how the input and output are processed." +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:89 +msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:2 +msgid "Use Differential Privacy" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:3 +msgid "" +"This guide explains how you can utilize differential privacy in the " +"Flower framework. If you are not yet familiar with differential privacy, " +"you can refer to :doc:`explanation-differential-privacy`." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:7 +msgid "" +"Differential Privacy in Flower is in a preview phase. If you plan to use " +"these features in a production environment with sensitive data, feel free" +" contact us to discuss your requirements and to receive guidance on how " +"to best use these features." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:12 +msgid "" +"This approach consists of two seprate phases: clipping of the updates and" +" adding noise to the aggregated model. For the clipping phase, Flower " +"framework has made it possible to decide whether to perform clipping on " +"the server side or the client side." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:15 +msgid "" +"**Server-side Clipping**: This approach has the advantage of the server " +"enforcing uniform clipping across all clients' updates and reducing the " +"communication overhead for clipping values. However, it also has the " +"disadvantage of increasing the computational load on the server due to " +"the need to perform the clipping operation for all clients." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:16 +msgid "" +"**Client-side Clipping**: This approach has the advantage of reducing the" +" computational overhead on the server. However, it also has the " +"disadvantage of lacking centralized control, as the server has less " +"control over the clipping process." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:21 +msgid "Server-side Clipping" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:22 +msgid "" +"For central DP with server-side clipping, there are two :code:`Strategy` " +"classes that act as wrappers around the actual :code:`Strategy` instance " +"(for example, :code:`FedAvg`). The two wrapper classes are " +":code:`DifferentialPrivacyServerSideFixedClipping` and " +":code:`DifferentialPrivacyServerSideAdaptiveClipping` for fixed and " +"adaptive clipping." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:-1 +msgid "server side clipping" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:31 +msgid "" +"The code sample below enables the :code:`FedAvg` strategy to use server-" +"side fixed clipping using the " +":code:`DifferentialPrivacyServerSideFixedClipping` wrapper class. The " +"same approach can be used with " +":code:`DifferentialPrivacyServerSideAdaptiveClipping` by adjusting the " +"corresponding input parameters." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:52 +msgid "Client-side Clipping" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:53 +msgid "" +"For central DP with client-side clipping, the server sends the clipping " +"value to selected clients on each round. Clients can use existing Flower " +":code:`Mods` to perform the clipping. Two mods are available for fixed " +"and adaptive client-side clipping: :code:`fixedclipping_mod` and " +":code:`adaptiveclipping_mod` with corresponding server-side wrappers " +":code:`DifferentialPrivacyClientSideFixedClipping` and " +":code:`DifferentialPrivacyClientSideAdaptiveClipping`." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:-1 +msgid "client side clipping" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:63 +msgid "" +"The code sample below enables the :code:`FedAvg` strategy to use " +"differential privacy with client-side fixed clipping using both the " +":code:`DifferentialPrivacyClientSideFixedClipping` wrapper class and, on " +"the client, :code:`fixedclipping_mod`:" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:80 +msgid "" +"In addition to the server-side strategy wrapper, the :code:`ClientApp` " +"needs to configure the matching :code:`fixedclipping_mod` to perform the " +"client-side clipping:" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:97 +msgid "" +"To utilize local differential privacy (DP) and add noise to the client " +"model parameters before transmitting them to the server in Flower, you " +"can use the `LocalDpMod`. The following hyperparameters need to be set: " +"clipping norm value, sensitivity, epsilon, and delta." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:-1 +msgid "local DP mod" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:104 +msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:122 +msgid "" +"Please note that the order of mods, especially those that modify " +"parameters, is important when using multiple modifiers. Typically, " +"differential privacy (DP) modifiers should be the last to operate on " +"parameters." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:125 +msgid "Local Training using Privacy Engines" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:126 +msgid "" +"For ensuring data instance-level privacy during local model training on " +"the client side, consider leveraging privacy engines such as Opacus and " +"TensorFlow Privacy. For examples of using Flower with these engines, " +"please refer to the Flower examples directory (`Opacus " +"`_, `Tensorflow" +" Privacy `_)." +msgstr "" + +#: ../../source/how-to-use-strategies.rst:2 +msgid "Use strategies" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:4 +msgid "" +"Flower allows full customization of the learning process through the " +":code:`Strategy` abstraction. A number of built-in strategies are " +"provided in the core framework." +msgstr "" + +#: ../../source/how-to-use-strategies.rst:6 +msgid "" +"There are three ways to customize the way Flower orchestrates the " +"learning process on the server side:" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:8 +msgid "Use an existing strategy, for example, :code:`FedAvg`" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:9 +#: ../../source/how-to-use-strategies.rst:40 +msgid "Customize an existing strategy with callback functions" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:10 +#: ../../source/how-to-use-strategies.rst:87 +msgid "Implement a novel strategy" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:14 +msgid "Use an existing strategy" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:16 +msgid "" +"Flower comes with a number of popular federated learning strategies " +"built-in. A built-in strategy can be instantiated as follows:" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:25 +msgid "" +"This creates a strategy with all parameters left at their default values " +"and passes it to the :code:`start_server` function. It is usually " +"recommended to adjust a few parameters during instantiation:" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:42 +msgid "" +"Existing strategies provide several ways to customize their behaviour. " +"Callback functions allow strategies to call user-provided code during " +"execution." +msgstr "" + +#: ../../source/how-to-use-strategies.rst:45 +msgid "Configuring client fit and client evaluate" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:47 +msgid "" +"The server can pass new configuration values to the client each round by " +"providing a function to :code:`on_fit_config_fn`. The provided function " +"will be called by the strategy and must return a dictionary of " +"configuration key values pairs that will be sent to the client. It must " +"return a dictionary of arbitrary configuration values :code:`client.fit`" +" and :code:`client.evaluate` functions during each round of federated " +"learning." +msgstr "" + +#: ../../source/how-to-use-strategies.rst:75 +msgid "" +"The :code:`on_fit_config_fn` can be used to pass arbitrary configuration " +"values from server to client, and poetentially change these values each " +"round, for example, to adjust the learning rate. The client will receive " +"the dictionary returned by the :code:`on_fit_config_fn` in its own " +":code:`client.fit()` function." +msgstr "" + +#: ../../source/how-to-use-strategies.rst:78 +msgid "" +"Similar to :code:`on_fit_config_fn`, there is also " +":code:`on_evaluate_config_fn` to customize the configuration sent to " +":code:`client.evaluate()`" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:81 +msgid "Configuring server-side evaluation" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:83 +msgid "" +"Server-side evaluation can be enabled by passing an evaluation function " +"to :code:`evaluate_fn`." +msgstr "" + +#: ../../source/how-to-use-strategies.rst:89 +msgid "" +"Writing a fully custom strategy is a bit more involved, but it provides " +"the most flexibility. Read the `Implementing Strategies `_ guide to learn more." +msgstr "" + +#: ../../source/index.rst:34 +msgid "Tutorial" +msgstr "" + +#: ../../source/index.rst:44 +msgid "Quickstart tutorials" +msgstr "" + +#: ../../source/index.rst:74 ../../source/index.rst:78 +msgid "How-to guides" +msgstr "" + +#: ../../source/index.rst:99 +msgid "Legacy example guides" +msgstr "" + +#: ../../source/index.rst:108 ../../source/index.rst:112 +msgid "Explanations" +msgstr "" + +#: None:-1 +msgid "API reference" +msgstr "" + +#: ../../source/index.rst:137 +msgid "Reference docs" +msgstr "" + +#: ../../source/index.rst:153 +msgid "Contributor tutorials" +msgstr "" + +#: ../../source/index.rst:160 +msgid "Contributor how-to guides" +msgstr "" + +#: ../../source/index.rst:173 +msgid "Contributor explanations" +msgstr "" + +#: ../../source/index.rst:179 +msgid "Contributor references" +msgstr "" + +#: ../../source/index.rst:-1 +msgid "" +"Check out the documentation of the main Flower Framework enabling easy " +"Python development for Federated Learning." +msgstr "" + +#: ../../source/index.rst:2 +msgid "Flower Framework Documentation" +msgstr "" + +#: ../../source/index.rst:7 +msgid "" +"Welcome to Flower's documentation. `Flower `_ is a " +"friendly federated learning framework." +msgstr "" + +#: ../../source/index.rst:11 +msgid "Join the Flower Community" +msgstr "" + +#: ../../source/index.rst:13 +msgid "" +"The Flower Community is growing quickly - we're a friendly group of " +"researchers, engineers, students, professionals, academics, and other " +"enthusiasts." +msgstr "" + +#: ../../source/index.rst:15 +msgid "Join us on Slack" +msgstr "" + +#: ../../source/index.rst:23 +msgid "Flower Framework" +msgstr "" + +#: ../../source/index.rst:25 +msgid "" +"The user guide is targeted at researchers and developers who want to use " +"Flower to bring existing machine learning workloads into a federated " +"setting. One of Flower's design goals was to make this simple. Read on to" +" learn more." +msgstr "" + +#: ../../source/index.rst:30 +msgid "Tutorials" +msgstr "" + +#: ../../source/index.rst:32 +msgid "" +"A learning-oriented series of federated learning tutorials, the best " +"place to start." +msgstr "" + +#: ../../source/index.rst:61 +msgid "" +"QUICKSTART TUTORIALS: :doc:`PyTorch ` | " +":doc:`TensorFlow ` | :doc:`🤗 Transformers" +" ` | :doc:`JAX ` | :doc:`Pandas ` | :doc:`fastai " +"` | :doc:`PyTorch Lightning ` | :doc:`scikit-learn ` | :doc:`XGBoost ` | " +":doc:`Android ` | :doc:`iOS `" +msgstr "" + +#: ../../source/index.rst:63 +msgid "We also made video tutorials for PyTorch:" +msgstr "" + +#: ../../source/index.rst:68 +msgid "And TensorFlow:" +msgstr "" + +#: ../../source/index.rst:76 +msgid "" +"Problem-oriented how-to guides show step-by-step how to achieve a " +"specific goal." +msgstr "" + +#: ../../source/index.rst:110 +msgid "" +"Understanding-oriented concept guides explain and discuss key topics and " +"underlying ideas behind Flower and collaborative AI." +msgstr "" + +#: ../../source/index.rst:120 +msgid "References" +msgstr "" + +#: ../../source/index.rst:122 +msgid "Information-oriented API reference and other reference material." +msgstr "" + +#: ../../source/index.rst:131::1 +msgid ":py:obj:`flwr `\\" +msgstr "" + +#: ../../source/index.rst:131::1 flwr:1 of +msgid "Flower main package." +msgstr "" + +#: ../../source/index.rst:148 +msgid "Contributor docs" +msgstr "" + +#: ../../source/index.rst:150 +msgid "" +"The Flower community welcomes contributions. The following docs are " +"intended to help along the way." +msgstr "" + +#: ../../source/ref-api-cli.rst:2 +msgid "Flower CLI reference" +msgstr "" + +#: ../../source/ref-api-cli.rst:7 +msgid "flower-simulation" +msgstr "" + +#: ../../source/ref-api-cli.rst:17 +msgid "flower-superlink" +msgstr "" + +#: ../../source/ref-api-cli.rst:27 +msgid "flower-client-app" +msgstr "" + +#: ../../source/ref-api-cli.rst:37 +msgid "flower-server-app" +msgstr "" + +#: ../../source/ref-api/flwr.rst:2 +msgid "flwr" +msgstr "" + +#: ../../source/ref-api/flwr.rst:25 ../../source/ref-api/flwr.server.rst:51 +msgid "Modules" +msgstr "" + +#: ../../source/ref-api/flwr.rst:35::1 +msgid ":py:obj:`flwr.client `\\" +msgstr "" + +#: ../../source/ref-api/flwr.rst:35::1 flwr.client:1 of +msgid "Flower client." +msgstr "" + +#: ../../source/ref-api/flwr.rst:35::1 +msgid ":py:obj:`flwr.common `\\" +msgstr "" + +#: ../../source/ref-api/flwr.rst:35::1 flwr.common:1 of +msgid "Common components shared between server and client." +msgstr "" + +#: ../../source/ref-api/flwr.rst:35::1 +msgid ":py:obj:`flwr.server `\\" +msgstr "" + +#: ../../source/ref-api/flwr.rst:35::1 +#: ../../source/ref-api/flwr.server.rst:40::1 flwr.server:1 +#: flwr.server.server.Server:1 of +msgid "Flower server." +msgstr "" + +#: ../../source/ref-api/flwr.rst:35::1 +msgid ":py:obj:`flwr.simulation `\\" +msgstr "" + +#: ../../source/ref-api/flwr.rst:35::1 flwr.simulation:1 of +msgid "Flower simulation." +msgstr "" + +#: ../../source/ref-api/flwr.client.rst:2 +msgid "client" +msgstr "" + +#: ../../source/ref-api/flwr.client.rst:13 +#: ../../source/ref-api/flwr.common.rst:13 +#: ../../source/ref-api/flwr.server.rst:13 +#: ../../source/ref-api/flwr.simulation.rst:13 +msgid "Functions" +msgstr "" + +#: ../../source/ref-api/flwr.client.rst:25::1 +msgid ":py:obj:`run_client_app `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.rst:25::1 +#: flwr.client.supernode.app.run_client_app:1 of +msgid "Run Flower client app." +msgstr "" + +#: ../../source/ref-api/flwr.client.rst:25::1 +msgid ":py:obj:`run_supernode `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.rst:25::1 +#: flwr.client.supernode.app.run_supernode:1 of +msgid "Run Flower SuperNode." +msgstr "" + +#: ../../source/ref-api/flwr.client.rst:25::1 +msgid "" +":py:obj:`start_client `\\ \\(\\*\\, " +"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.rst:25::1 +#: flwr.client.app.start_client:1 of +msgid "Start a Flower client node which connects to a Flower server." +msgstr "" + +#: ../../source/ref-api/flwr.client.rst:25::1 +msgid "" +":py:obj:`start_numpy_client `\\ \\(\\*\\," +" server\\_address\\, client\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.rst:25::1 +#: flwr.client.app.start_numpy_client:1 of +msgid "Start a Flower NumPyClient which connects to a gRPC server." +msgstr "" + +#: ../../source/ref-api/flwr.client.rst:27 +#: ../../source/ref-api/flwr.common.rst:32 +#: ../../source/ref-api/flwr.server.rst:28 +#: ../../source/ref-api/flwr.server.strategy.rst:17 +#: ../../source/ref-api/flwr.server.workflow.rst:17 +msgid "Classes" +msgstr "" + +#: ../../source/ref-api/flwr.client.rst:34::1 +msgid ":py:obj:`Client `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.rst:34::1 +#: flwr.client.client.Client:1 of +msgid "Abstract base class for Flower clients." +msgstr "" + +#: ../../source/ref-api/flwr.client.rst:34::1 +msgid "" +":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " +"mods\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.rst:34::1 +#: flwr.client.client_app.ClientApp:1 of +msgid "Flower ClientApp." +msgstr "" + +#: ../../source/ref-api/flwr.client.rst:34::1 +msgid ":py:obj:`NumPyClient `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.rst:34::1 +#: flwr.client.numpy_client.NumPyClient:1 of +msgid "Abstract base class for Flower clients using NumPy." +msgstr "" + +#: flwr.client.client.Client:1 flwr.client.numpy_client.NumPyClient:1 +#: flwr.server.client_manager.ClientManager:1 +#: flwr.server.driver.driver.Driver:1 flwr.server.strategy.strategy.Strategy:1 +#: of +msgid "Bases: :py:class:`~abc.ABC`" +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:15 +#: ../../source/ref-api/flwr.client.ClientApp.rst:15 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:15 +#: ../../source/ref-api/flwr.common.Array.rst:15 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:15 +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:15 +#: ../../source/ref-api/flwr.common.Context.rst:15 +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:15 +#: ../../source/ref-api/flwr.common.Error.rst:15 +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:15 +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:15 +#: ../../source/ref-api/flwr.common.EventType.rst:15 +#: ../../source/ref-api/flwr.common.FitIns.rst:15 +#: ../../source/ref-api/flwr.common.FitRes.rst:15 +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:15 +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:15 +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:15 +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:15 +#: ../../source/ref-api/flwr.common.Message.rst:15 +#: ../../source/ref-api/flwr.common.MessageType.rst:15 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:15 +#: ../../source/ref-api/flwr.common.Metadata.rst:15 +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:15 +#: ../../source/ref-api/flwr.common.Parameters.rst:15 +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:15 +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:15 +#: ../../source/ref-api/flwr.common.RecordSet.rst:15 +#: ../../source/ref-api/flwr.common.ServerMessage.rst:15 +#: ../../source/ref-api/flwr.common.Status.rst:15 +#: ../../source/ref-api/flwr.server.ClientManager.rst:15 +#: ../../source/ref-api/flwr.server.Driver.rst:15 +#: ../../source/ref-api/flwr.server.History.rst:15 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:15 +#: ../../source/ref-api/flwr.server.Server.rst:15 +#: ../../source/ref-api/flwr.server.ServerApp.rst:15 +#: ../../source/ref-api/flwr.server.ServerConfig.rst:15 +#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Krum.rst:15 +#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:15 +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 +msgid "Methods" +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`evaluate `\\ \\(ins\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.evaluate:1 +#: flwr.client.numpy_client.NumPyClient.evaluate:1 of +msgid "Evaluate the provided parameters using the locally held dataset." +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`fit `\\ \\(ins\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.fit:1 of +msgid "Refine the provided parameters using the locally held dataset." +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_context `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.get_context:1 +#: flwr.client.numpy_client.NumPyClient.get_context:1 of +msgid "Get the run context from this client." +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_parameters `\\ \\(ins\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.get_parameters:1 +#: flwr.client.numpy_client.NumPyClient.get_parameters:1 of +msgid "Return the current local model parameters." +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_properties `\\ \\(ins\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.get_properties:1 of +msgid "Return set of client's properties." +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`set_context `\\ \\(context\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.set_context:1 +#: flwr.client.numpy_client.NumPyClient.set_context:1 of +msgid "Apply a run context to this client." +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`to_client `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.to_client:1 of +msgid "Return client (itself)." +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:46 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:46 +#: ../../source/ref-api/flwr.common.Array.rst:28 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:25 +#: ../../source/ref-api/flwr.common.Code.rst:19 +#: ../../source/ref-api/flwr.common.Context.rst:25 +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:25 +#: ../../source/ref-api/flwr.common.Error.rst:25 +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:25 +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:25 +#: ../../source/ref-api/flwr.common.EventType.rst:165 +#: ../../source/ref-api/flwr.common.FitIns.rst:25 +#: ../../source/ref-api/flwr.common.FitRes.rst:25 +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:25 +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:25 +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:25 +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:25 +#: ../../source/ref-api/flwr.common.Message.rst:37 +#: ../../source/ref-api/flwr.common.MessageType.rst:25 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:25 +#: ../../source/ref-api/flwr.common.Metadata.rst:25 +#: ../../source/ref-api/flwr.common.Parameters.rst:25 +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:25 +#: ../../source/ref-api/flwr.common.RecordSet.rst:25 +#: ../../source/ref-api/flwr.common.ServerMessage.rst:25 +#: ../../source/ref-api/flwr.common.Status.rst:25 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:25 +#: ../../source/ref-api/flwr.server.ServerConfig.rst:25 +msgid "Attributes" +msgstr "" + +#: flwr.client.client.Client.evaluate:1::1 of +msgid ":py:obj:`context `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.Parameters.rst:2 +#: flwr.client.app.start_client flwr.client.app.start_numpy_client +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.context.Context flwr.common.message.Error +#: flwr.common.message.Message flwr.common.message.Message.create_error_reply +#: flwr.common.message.Message.create_reply flwr.common.message.Metadata +#: flwr.common.record.parametersrecord.Array flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.ClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.strategy.bulyan.Bulyan +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.fedadagrad.FedAdagrad +#: flwr.server.strategy.fedadam.FedAdam flwr.server.strategy.fedavg.FedAvg +#: flwr.server.strategy.fedavg_android.FedAvgAndroid +#: flwr.server.strategy.fedavgm.FedAvgM flwr.server.strategy.fedopt.FedOpt +#: flwr.server.strategy.fedprox.FedProx +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg +#: flwr.server.strategy.fedyogi.FedYogi flwr.server.strategy.krum.Krum +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow +#: flwr.simulation.app.start_simulation +#: flwr.simulation.run_simulation.run_simulation of +msgid "Parameters" +msgstr "" + +#: flwr.client.client.Client.evaluate:3 of +msgid "" +"The evaluation instructions containing (global) model parameters received" +" from the server and a dictionary of configuration values used to " +"customize the local evaluation process." +msgstr "" + +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_reply flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters +#: flwr.simulation.app.start_simulation of +msgid "Returns" +msgstr "" + +#: flwr.client.client.Client.evaluate:8 of +msgid "" +"The evaluation result containing the loss on the local dataset and other " +"details such as the number of local data examples used for evaluation." +msgstr "" + +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_reply flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters +#: flwr.simulation.app.start_simulation of +msgid "Return type" +msgstr "" + +#: flwr.client.client.Client.fit:3 of +msgid "" +"The training instructions containing (global) model parameters received " +"from the server and a dictionary of configuration values used to " +"customize the local training process." +msgstr "" + +#: flwr.client.client.Client.fit:8 of +msgid "" +"The training result containing updated parameters and other details such " +"as the number of local training examples used for training." +msgstr "" + +#: flwr.client.client.Client.get_parameters:3 of +msgid "" +"The get parameters instructions received from the server containing a " +"dictionary of configuration values." +msgstr "" + +#: flwr.client.client.Client.get_parameters:7 of +msgid "The current local model parameters." +msgstr "" + +#: flwr.client.client.Client.get_properties:3 of +msgid "" +"The get properties instructions received from the server containing a " +"dictionary of configuration values." +msgstr "" + +#: flwr.client.client.Client.get_properties:7 of +msgid "The current client properties." +msgstr "" + +#: ../../source/ref-api/flwr.client.ClientApp.rst:2 +msgid "ClientApp" +msgstr "" + +#: flwr.client.client_app.ClientApp:1 flwr.common.constant.MessageType:1 +#: flwr.common.constant.MessageTypeLegacy:1 flwr.common.context.Context:1 +#: flwr.common.message.Error:1 flwr.common.message.Message:1 +#: flwr.common.message.Metadata:1 flwr.common.record.parametersrecord.Array:1 +#: flwr.common.record.recordset.RecordSet:1 flwr.common.typing.ClientMessage:1 +#: flwr.common.typing.DisconnectRes:1 flwr.common.typing.EvaluateIns:1 +#: flwr.common.typing.EvaluateRes:1 flwr.common.typing.FitIns:1 +#: flwr.common.typing.FitRes:1 flwr.common.typing.GetParametersIns:1 +#: flwr.common.typing.GetParametersRes:1 flwr.common.typing.GetPropertiesIns:1 +#: flwr.common.typing.GetPropertiesRes:1 flwr.common.typing.Parameters:1 +#: flwr.common.typing.ReconnectIns:1 flwr.common.typing.ServerMessage:1 +#: flwr.common.typing.Status:1 flwr.server.history.History:1 +#: flwr.server.server.Server:1 flwr.server.server_app.ServerApp:1 +#: flwr.server.server_config.ServerConfig:1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of +msgid "Bases: :py:class:`object`" +msgstr "" + +#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 +#: flwr.client.client_app.ClientApp:4 +#: flwr.client.client_app.ClientApp.evaluate:4 +#: flwr.client.client_app.ClientApp.query:4 +#: flwr.client.client_app.ClientApp.train:4 flwr.server.app.start_server:41 +#: flwr.server.server_app.ServerApp:4 flwr.server.server_app.ServerApp.main:4 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:29 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:21 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:14 +#: of +msgid "Examples" +msgstr "" + +#: flwr.client.client_app.ClientApp:5 of +msgid "" +"Assuming a typical `Client` implementation named `FlowerClient`, you can " +"wrap it in a `ClientApp` as follows:" +msgstr "" + +#: flwr.client.client_app.ClientApp:16 of +msgid "" +"If the above code is in a Python module called `client`, it can be " +"started as follows:" +msgstr "" + +#: flwr.client.client_app.ClientApp:21 of +msgid "" +"In this `client:app` example, `client` refers to the Python module " +"`client.py` in which the previous code lives in and `app` refers to the " +"global attribute `app` that points to an object of type `ClientApp`." +msgstr "" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`evaluate `\\ \\(\\)" +msgstr "" + +#: flwr.client.client_app.ClientApp.evaluate:1 +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid "Return a decorator that registers the evaluate fn with the client app." +msgstr "" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`query `\\ \\(\\)" +msgstr "" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.query:1 of +msgid "Return a decorator that registers the query fn with the client app." +msgstr "" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`train `\\ \\(\\)" +msgstr "" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.train:1 of +msgid "Return a decorator that registers the train fn with the client app." +msgstr "" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:2 +msgid "NumPyClient" +msgstr "" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid "" +":py:obj:`evaluate `\\ \\(parameters\\, " +"config\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`fit `\\ \\(parameters\\, config\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.fit:1 of +msgid "Train the provided parameters using the locally held dataset." +msgstr "" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`get_context `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid "" +":py:obj:`get_parameters `\\ " +"\\(config\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid "" +":py:obj:`get_properties `\\ " +"\\(config\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.get_properties:1 of +msgid "Return a client's set of properties." +msgstr "" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid "" +":py:obj:`set_context `\\ " +"\\(context\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`to_client `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.to_client:1 of +msgid "Convert to object to Client type and return it." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.evaluate:1::1 of +msgid ":py:obj:`context `\\" +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.evaluate:3 +#: flwr.client.numpy_client.NumPyClient.fit:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:5 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:8 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:5 +#: flwr.server.strategy.strategy.Strategy.configure_fit:5 +#: flwr.server.strategy.strategy.Strategy.evaluate:8 of +msgid "The current (global) model parameters." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.evaluate:5 of +msgid "" +"Configuration parameters which allow the server to influence evaluation " +"on the client. It can be used to communicate arbitrary values from the " +"server to the client, for example, to influence the number of examples " +"used for evaluation." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +msgid "" +"* **loss** (*float*) -- The evaluation loss of the model on the local " +"dataset. * **num_examples** (*int*) -- The number of examples used for " +"evaluation. * **metrics** (*Dict[str, Scalar]*) -- A dictionary mapping " +"arbitrary string keys to values of type bool, bytes, float, int, or " +"str. It can be used to communicate arbitrary values back to the server." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +msgid "" +"**loss** (*float*) -- The evaluation loss of the model on the local " +"dataset." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.evaluate:12 of +msgid "**num_examples** (*int*) -- The number of examples used for evaluation." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.evaluate:13 +#: flwr.client.numpy_client.NumPyClient.fit:13 of +msgid "" +"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " +"string keys to values of type bool, bytes, float, int, or str. It can be " +"used to communicate arbitrary values back to the server." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.evaluate:19 of +msgid "" +"The previous return type format (int, float, float) and the extended " +"format (int, float, float, Dict[str, Scalar]) have been deprecated and " +"removed since Flower 0.19." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.fit:5 of +msgid "" +"Configuration parameters which allow the server to influence training on " +"the client. It can be used to communicate arbitrary values from the " +"server to the client, for example, to set the number of (local) training " +"epochs." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.fit:11 of +msgid "" +"* **parameters** (*NDArrays*) -- The locally updated model parameters. * " +"**num_examples** (*int*) -- The number of examples used for training. * " +"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " +"string keys to values of type bool, bytes, float, int, or str. It can " +"be used to communicate arbitrary values back to the server." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.fit:11 of +msgid "**parameters** (*NDArrays*) -- The locally updated model parameters." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.fit:12 of +msgid "**num_examples** (*int*) -- The number of examples used for training." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.get_parameters:3 of +msgid "" +"Configuration parameters requested by the server. This can be used to " +"tell the client which parameters are needed along with some Scalar " +"attributes." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.get_parameters:8 of +msgid "**parameters** -- The local model parameters as a list of NumPy ndarrays." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.get_properties:3 of +msgid "" +"Configuration parameters requested by the server. This can be used to " +"tell the client which properties are needed along with some Scalar " +"attributes." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.get_properties:8 of +msgid "" +"**properties** -- A dictionary mapping arbitrary string keys to values of" +" type bool, bytes, float, int, or str. It can be used to communicate " +"arbitrary property values back to the server." +msgstr "" + +#: ../../source/ref-api/flwr.client.run_client_app.rst:2 +msgid "run\\_client\\_app" +msgstr "" + +#: ../../source/ref-api/flwr.client.run_supernode.rst:2 +msgid "run\\_supernode" +msgstr "" + +#: ../../source/ref-api/flwr.client.start_client.rst:2 +msgid "start\\_client" +msgstr "" + +#: flwr.client.app.start_client:3 flwr.client.app.start_numpy_client:9 of +msgid "" +"The IPv4 or IPv6 address of the server. If the Flower server runs on the " +"same machine on port 8080, then `server_address` would be " +"`\"[::]:8080\"`." +msgstr "" + +#: flwr.client.app.start_client:7 of +msgid "A callable that instantiates a Client. (default: None)" +msgstr "" + +#: flwr.client.app.start_client:9 of +msgid "" +"An implementation of the abstract base class `flwr.client.Client` " +"(default: None)" +msgstr "" + +#: flwr.client.app.start_client:12 flwr.client.app.start_numpy_client:15 of +msgid "" +"The maximum length of gRPC messages that can be exchanged with the Flower" +" server. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower server needs to be started with the same value (see " +"`flwr.server.start_server`), otherwise it will not know about the " +"increased limit and block larger messages." +msgstr "" + +#: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 of +msgid "" +"The PEM-encoded root certificates as a byte string or a path string. If " +"provided, a secure connection using the certificates will be established " +"to an SSL-enabled Flower server." +msgstr "" + +#: flwr.client.app.start_client:23 flwr.client.app.start_numpy_client:26 of +msgid "" +"Starts an insecure gRPC connection when True. Enables HTTPS connection " +"when False, using system certificates if `root_certificates` is None." +msgstr "" + +#: flwr.client.app.start_client:26 flwr.client.app.start_numpy_client:29 of +msgid "" +"Configure the transport layer. Allowed values: - 'grpc-bidi': gRPC, " +"bidirectional streaming - 'grpc-rere': gRPC, request-response " +"(experimental) - 'rest': HTTP (experimental)" +msgstr "" + +#: flwr.client.app.start_client:31 of +msgid "" +"The maximum number of times the client will try to connect to the server " +"before giving up in case of a connection error. If set to None, there is " +"no limit to the number of tries." +msgstr "" + +#: flwr.client.app.start_client:35 of +msgid "" +"The maximum duration before the client stops trying to connect to the " +"server in case of connection error. If set to None, there is no limit to " +"the total time." +msgstr "" + +#: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of +msgid "Starting a gRPC client with an insecure server connection:" +msgstr "" + +#: flwr.client.app.start_client:49 flwr.client.app.start_numpy_client:44 of +msgid "Starting an SSL-enabled gRPC client using system certificates:" +msgstr "" + +#: flwr.client.app.start_client:60 flwr.client.app.start_numpy_client:52 of +msgid "Starting an SSL-enabled gRPC client using provided certificates:" +msgstr "" + +#: ../../source/ref-api/flwr.client.start_numpy_client.rst:2 +msgid "start\\_numpy\\_client" +msgstr "" + +#: flwr.client.app.start_numpy_client:5 of +msgid "" +"This function is deprecated since 1.7.0. Use " +":code:`flwr.client.start_client` instead and first convert your " +":code:`NumPyClient` to type :code:`flwr.client.Client` by executing its " +":code:`to_client()` method." +msgstr "" + +#: flwr.client.app.start_numpy_client:13 of +msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:2 +msgid "common" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.record.conversion_utils.array_from_numpy:1 of +msgid "Create Array from NumPy ndarray." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.bytes_to_ndarray:1 of +msgid "Deserialize NumPy ndarray from bytes." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`configure `\\ \\(identifier\\[\\, " +"filename\\, host\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.logger.configure:1 of +msgid "Configure logging to file and/or remote log server." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`event `\\ \\(event\\_type\\[\\, " +"event\\_details\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.telemetry.event:1 of +msgid "Submit create_event to ThreadPoolExecutor to avoid blocking." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " +"\\*\\*kwargs\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:30::1 logging.Logger.log:1 +#: of +msgid "Log 'msg % args' with the integer severity 'level'." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.ndarray_to_bytes:1 of +msgid "Serialize NumPy ndarray to bytes." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`now `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.date.now:1 of +msgid "Construct a datetime from time.time() with time zone set to UTC." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`ndarrays_to_parameters `\\ " +"\\(ndarrays\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.ndarrays_to_parameters:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarrays_to_parameters:1 +#: of +msgid "Convert NumPy ndarrays to parameters object." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`parameters_to_ndarrays `\\ " +"\\(parameters\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.parameters_to_ndarrays:1 of +msgid "Convert parameters object to NumPy ndarrays." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " +"data\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.record.parametersrecord.Array:1 of +msgid "Array type." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`ClientMessage `\\ " +"\\(\\[get\\_properties\\_res\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.typing.ClientMessage:1 of +msgid "ClientMessage is a container used to hold one result message." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`Code `\\ \\(value\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.typing.Code:1 of +msgid "Client status codes." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`ConfigsRecord `\\ " +"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.record.configsrecord.ConfigsRecord:1 of +msgid "Configs record." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`Context `\\ \\(state\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.context.Context:1 of +msgid "State of your run." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`DisconnectRes `\\ \\(reason\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.typing.DisconnectRes:1 of +msgid "DisconnectRes message from client to server." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`EvaluateIns `\\ \\(parameters\\, " +"config\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.typing.EvaluateIns:1 of +msgid "Evaluate instructions for a client." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " +"num\\_examples\\, metrics\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.typing.EvaluateRes:1 of +msgid "Evaluate response from a client." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`EventType `\\ \\(value\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.telemetry.EventType:1 of +msgid "Types of telemetry events." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.typing.FitIns:1 of +msgid "Fit instructions for a client." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`FitRes `\\ \\(status\\, parameters\\, " +"num\\_examples\\, metrics\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.typing.FitRes:1 of +msgid "Fit response from a client." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.message.Error:1 of +msgid "A dataclass that stores information about an error that occurred." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`GetParametersIns `\\ \\(config\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.typing.GetParametersIns:1 of +msgid "Parameters request for a client." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`GetParametersRes `\\ \\(status\\, " +"parameters\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.typing.GetParametersRes:1 of +msgid "Response when asked to return parameters." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`GetPropertiesIns `\\ \\(config\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.typing.GetPropertiesIns:1 of +msgid "Properties request for a client." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`GetPropertiesRes `\\ \\(status\\, " +"properties\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.typing.GetPropertiesRes:1 of +msgid "Properties response from a client." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " +"error\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.message.Message:1 of +msgid "State of your application from the viewpoint of the entity using it." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`MessageType `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.constant.MessageType:1 of +msgid "Message type." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`MessageTypeLegacy `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.constant.MessageTypeLegacy:1 of +msgid "Legacy message type." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`Metadata `\\ \\(run\\_id\\, " +"message\\_id\\, src\\_node\\_id\\, ...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.message.Metadata:1 of +msgid "A dataclass holding metadata associated with the current message." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`MetricsRecord `\\ " +"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.record.metricsrecord.MetricsRecord:1 of +msgid "Metrics record." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`NDArray `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " +":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`Parameters `\\ \\(tensors\\, " +"tensor\\_type\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.typing.Parameters:1 of +msgid "Model parameters." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`ParametersRecord `\\ " +"\\(\\[array\\_dict\\, keep\\_input\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.record.parametersrecord.ParametersRecord:1 of +msgid "Parameters record." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`ReconnectIns `\\ \\(seconds\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.typing.ReconnectIns:1 of +msgid "ReconnectIns message from server to client." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`RecordSet `\\ " +"\\(\\[parameters\\_records\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.record.recordset.RecordSet:1 of +msgid "RecordSet stores groups of parameters, metrics and configs." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`ServerMessage `\\ " +"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.typing.ServerMessage:1 of +msgid "ServerMessage is a container used to hold one instruction message." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`Status `\\ \\(code\\, message\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.typing.Status:1 of +msgid "Client status." +msgstr "" + +#: ../../source/ref-api/flwr.common.Array.rst:2 +msgid "Array" +msgstr "" + +#: flwr.common.record.parametersrecord.Array:3 of +msgid "" +"A dataclass containing serialized data from an array-like or tensor-like " +"object along with some metadata about it." +msgstr "" + +#: flwr.common.record.parametersrecord.Array:6 of +msgid "" +"A string representing the data type of the serialised object (e.g. " +"`np.float32`)" +msgstr "" + +#: flwr.common.record.parametersrecord.Array:8 of +msgid "" +"A list representing the shape of the unserialized array-like object. This" +" is used to deserialize the data (depending on the serialization method) " +"or simply as a metadata field." +msgstr "" + +#: flwr.common.record.parametersrecord.Array:12 of +msgid "" +"A string indicating the type of serialisation mechanism used to generate " +"the bytes in `data` from an array-like or tensor-like object." +msgstr "" + +#: flwr.common.record.parametersrecord.Array:15 of +msgid "A buffer of bytes containing the data." +msgstr "" + +#: ../../source/ref-api/flwr.common.Array.rst:26::1 +msgid ":py:obj:`numpy `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Array.rst:26::1 +#: flwr.common.record.parametersrecord.Array.numpy:1 of +msgid "Return the array as a NumPy array." +msgstr "" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`dtype `\\" +msgstr "" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`shape `\\" +msgstr "" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`stype `\\" +msgstr "" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`data `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.ClientMessage.rst:2 +msgid "ClientMessage" +msgstr "" + +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid ":py:obj:`evaluate_res `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid ":py:obj:`fit_res `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid "" +":py:obj:`get_parameters_res " +"`\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid "" +":py:obj:`get_properties_res " +"`\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.Code.rst:2 +msgid "Code" +msgstr "" + +#: flwr.common.typing.Code:1 of +msgid "Bases: :py:class:`~enum.Enum`" +msgstr "" + +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid ":py:obj:`OK `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid "" +":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " +"`\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid "" +":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " +"`\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid ":py:obj:`FIT_NOT_IMPLEMENTED `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid "" +":py:obj:`EVALUATE_NOT_IMPLEMENTED " +"`\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:2 +msgid "ConfigsRecord" +msgstr "" + +#: flwr.common.record.configsrecord.ConfigsRecord:1 of +msgid "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`int` | :py:class:`float` | :py:class:`str` |" +" :py:class:`bytes` | :py:class:`bool` | :py:class:`~typing.List`\\ " +"[:py:class:`int`] | :py:class:`~typing.List`\\ [:py:class:`float`] | " +":py:class:`~typing.List`\\ [:py:class:`str`] | :py:class:`~typing.List`\\" +" [:py:class:`bytes`] | :py:class:`~typing.List`\\ [:py:class:`bool`]]" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1 +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid "Remove all items from R." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:1 +#: flwr.common.record.metricsrecord.MetricsRecord.count_bytes:1 +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:1 +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid "Return number of Bytes stored in this object." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 +#: flwr.common.record.typeddict.TypedDict.get:1 of +msgid "d defaults to None." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 +#: flwr.common.record.typeddict.TypedDict.pop:1 of +msgid "If key is not found, d is returned if given, otherwise KeyError is raised." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 +#: flwr.common.record.typeddict.TypedDict.update:1 of +msgid "Update R from dict/iterable E and F." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:3 of +msgid "This function counts booleans as occupying 1 Byte." +msgstr "" + +#: ../../source/ref-api/flwr.common.Context.rst:2 +msgid "Context" +msgstr "" + +#: flwr.common.context.Context:3 of +msgid "" +"Holds records added by the entity in a given run and that will stay " +"local. This means that the data it holds will never leave the system it's" +" running from. This can be used as an intermediate storage or scratchpad " +"when executing mods. It can also be used as a memory to access at " +"different points during the lifecycle of this entity (e.g. across " +"multiple rounds)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Context.rst:28::1 +msgid ":py:obj:`state `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:2 +msgid "DisconnectRes" +msgstr "" + +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:28::1 +msgid ":py:obj:`reason `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.Error.rst:2 +msgid "Error" +msgstr "" + +#: flwr.common.message.Error:3 of +msgid "An identifier for the error." +msgstr "" + +#: flwr.common.message.Error:5 of +msgid "A reason for why the error arose (e.g. an exception stack-trace)" +msgstr "" + +#: flwr.common.Error.code:1::1 of +msgid ":py:obj:`code `\\" +msgstr "" + +#: flwr.common.Error.code:1 flwr.common.Error.code:1::1 of +msgid "Error code." +msgstr "" + +#: flwr.common.Error.code:1::1 of +msgid ":py:obj:`reason `\\" +msgstr "" + +#: flwr.common.Error.code:1::1 flwr.common.Error.reason:1 of +msgid "Reason reported about the error." +msgstr "" + +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:2 +msgid "EvaluateIns" +msgstr "" + +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 +msgid ":py:obj:`parameters `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 +msgid ":py:obj:`config `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:2 +msgid "EvaluateRes" +msgstr "" + +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`status `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`loss `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`num_examples `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`metrics `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:2 +msgid "EventType" +msgstr "" + +#: flwr.common.telemetry.EventType:1 of +msgid "Bases: :py:class:`str`, :py:class:`~enum.Enum`" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`encode `\\ \\(\\[encoding\\, " +"errors\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.encode:1 of +msgid "Encode the string using the codec registered for encoding." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`replace `\\ \\(old\\, new\\[\\, " +"count\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.replace:1 of +msgid "Return a copy with all occurrences of substring old replaced by new." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`split `\\ \\(\\[sep\\, " +"maxsplit\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rsplit:1 flwr.common.EventType.split:1 of +msgid "" +"Return a list of the substrings in the string, using sep as the separator" +" string." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`rsplit `\\ \\(\\[sep\\, " +"maxsplit\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`join `\\ \\(iterable\\, \\/\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.join:1 of +msgid "Concatenate any number of strings." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`capitalize `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.capitalize:1 of +msgid "Return a capitalized version of the string." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`casefold `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.casefold:1 of +msgid "Return a version of the string suitable for caseless comparisons." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`title `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.title:1 of +msgid "Return a version of the string where each word is titlecased." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`center `\\ \\(width\\[\\, " +"fillchar\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.center:1 of +msgid "Return a centered string of length width." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`count `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +"Return the number of non-overlapping occurrences of substring sub in " +"string S[start:end]." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`expandtabs `\\ " +"\\(\\[tabsize\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.expandtabs:1 of +msgid "Return a copy where all tab characters are expanded using spaces." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`find `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +"Return the lowest index in S where substring sub is found, such that sub " +"is contained within S[start:end]." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`partition `\\ \\(sep\\, \\/\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.partition:1 flwr.common.EventType.rpartition:1 of +msgid "Partition the string into three parts using the given separator." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`index `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`ljust `\\ \\(width\\[\\, " +"fillchar\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.ljust:1 of +msgid "Return a left-justified string of length width." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`lower `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.lower:1 of +msgid "Return a copy of the string converted to lowercase." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`lstrip `\\ \\(\\[chars\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.lstrip:1 of +msgid "Return a copy of the string with leading whitespace removed." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`rfind `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +"Return the highest index in S where substring sub is found, such that sub" +" is contained within S[start:end]." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`rindex `\\ \\(sub\\[\\, " +"start\\[\\, end\\]\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`rjust `\\ \\(width\\[\\, " +"fillchar\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rjust:1 of +msgid "Return a right-justified string of length width." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`rstrip `\\ \\(\\[chars\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rstrip:1 of +msgid "Return a copy of the string with trailing whitespace removed." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`rpartition `\\ \\(sep\\, \\/\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`splitlines `\\ " +"\\(\\[keepends\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.splitlines:1 of +msgid "Return a list of the lines in the string, breaking at line boundaries." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`strip `\\ \\(\\[chars\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.strip:1 of +msgid "Return a copy of the string with leading and trailing whitespace removed." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`swapcase `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.swapcase:1 of +msgid "" +"Convert uppercase characters to lowercase and lowercase characters to " +"uppercase." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`translate `\\ \\(table\\, \\/\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.translate:1 of +msgid "Replace each character in the string using the given translation table." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`upper `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.upper:1 of +msgid "Return a copy of the string converted to uppercase." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`startswith `\\ \\(prefix\\[\\," +" start\\[\\, end\\]\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return True if S starts with the specified prefix, False otherwise." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`endswith `\\ \\(suffix\\[\\, " +"start\\[\\, end\\]\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return True if S ends with the specified suffix, False otherwise." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`removeprefix `\\ " +"\\(prefix\\, \\/\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.removeprefix:1 of +msgid "Return a str with the given prefix string removed if present." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`removesuffix `\\ " +"\\(suffix\\, \\/\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.removesuffix:1 of +msgid "Return a str with the given suffix string removed if present." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isascii `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isascii:1 of +msgid "Return True if all characters in the string are ASCII, False otherwise." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`islower `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.islower:1 of +msgid "Return True if the string is a lowercase string, False otherwise." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isupper `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isupper:1 of +msgid "Return True if the string is an uppercase string, False otherwise." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`istitle `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.istitle:1 of +msgid "Return True if the string is a title-cased string, False otherwise." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isspace `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isspace:1 of +msgid "Return True if the string is a whitespace string, False otherwise." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isdecimal `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isdecimal:1 of +msgid "Return True if the string is a decimal string, False otherwise." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isdigit `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isdigit:1 of +msgid "Return True if the string is a digit string, False otherwise." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isnumeric `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isnumeric:1 of +msgid "Return True if the string is a numeric string, False otherwise." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isalpha `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isalpha:1 of +msgid "Return True if the string is an alphabetic string, False otherwise." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isalnum `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isalnum:1 of +msgid "Return True if the string is an alpha-numeric string, False otherwise." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isidentifier `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isidentifier:1 of +msgid "Return True if the string is a valid Python identifier, False otherwise." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isprintable `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isprintable:1 of +msgid "Return True if the string is printable, False otherwise." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`zfill `\\ \\(width\\, \\/\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.zfill:1 of +msgid "" +"Pad a numeric string with zeros on the left, to fill a field of the given" +" width." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`format `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return a formatted version of S, using substitutions from args and kwargs." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`format_map `\\ \\(mapping\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return a formatted version of S, using substitutions from mapping." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`maketrans `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.maketrans:1 of +msgid "Return a translation table usable for str.translate()." +msgstr "" + +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`PING `\\" +msgstr "" + +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_CLIENT_ENTER `\\" +msgstr "" + +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_CLIENT_LEAVE `\\" +msgstr "" + +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_SERVER_ENTER `\\" +msgstr "" + +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_SERVER_LEAVE `\\" +msgstr "" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_DRIVER_API_ENTER " +"`\\" +msgstr "" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_DRIVER_API_LEAVE " +"`\\" +msgstr "" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_FLEET_API_ENTER " +"`\\" +msgstr "" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_FLEET_API_LEAVE " +"`\\" +msgstr "" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPERLINK_ENTER " +"`\\" +msgstr "" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPERLINK_LEAVE " +"`\\" +msgstr "" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`START_SIMULATION_ENTER " +"`\\" +msgstr "" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`START_SIMULATION_LEAVE " +"`\\" +msgstr "" + +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`DRIVER_CONNECT `\\" +msgstr "" + +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`DRIVER_DISCONNECT `\\" +msgstr "" + +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_DRIVER_ENTER `\\" +msgstr "" + +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_DRIVER_LEAVE `\\" +msgstr "" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_CLIENT_APP_ENTER " +"`\\" +msgstr "" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_CLIENT_APP_LEAVE " +"`\\" +msgstr "" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SERVER_APP_ENTER " +"`\\" +msgstr "" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SERVER_APP_LEAVE " +"`\\" +msgstr "" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPERNODE_ENTER " +"`\\" +msgstr "" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPERNODE_LEAVE " +"`\\" +msgstr "" + +#: flwr.common.EventType.capitalize:3 of +msgid "" +"More specifically, make the first character have upper case and the rest " +"lower case." +msgstr "" + +#: flwr.common.EventType.center:3 flwr.common.EventType.ljust:3 +#: flwr.common.EventType.rjust:3 of +msgid "Padding is done using the specified fill character (default is a space)." +msgstr "" + +#: flwr.common.EventType.count:1 of +msgid "" +"Return the number of non-overlapping occurrences of substring sub in " +"string S[start:end]. Optional arguments start and end are interpreted as" +" in slice notation." +msgstr "" + +#: flwr.common.EventType.encode:3 of +msgid "encoding" +msgstr "" + +#: flwr.common.EventType.encode:4 of +msgid "The encoding in which to encode the string." +msgstr "" + +#: flwr.common.EventType.encode:9 of +msgid "errors" +msgstr "" + +#: flwr.common.EventType.encode:6 of +msgid "" +"The error handling scheme to use for encoding errors. The default is " +"'strict' meaning that encoding errors raise a UnicodeEncodeError. Other " +"possible values are 'ignore', 'replace' and 'xmlcharrefreplace' as well " +"as any other name registered with codecs.register_error that can handle " +"UnicodeEncodeErrors." +msgstr "" + +#: flwr.common.EventType.endswith:1 of +msgid "" +"Return True if S ends with the specified suffix, False otherwise. With " +"optional start, test S beginning at that position. With optional end, " +"stop comparing S at that position. suffix can also be a tuple of strings " +"to try." +msgstr "" + +#: flwr.common.EventType.expandtabs:3 of +msgid "If tabsize is not given, a tab size of 8 characters is assumed." +msgstr "" + +#: flwr.common.EventType.find:1 flwr.common.EventType.index:1 of +msgid "" +"Return the lowest index in S where substring sub is found, such that sub " +"is contained within S[start:end]. Optional arguments start and end are " +"interpreted as in slice notation." +msgstr "" + +#: flwr.common.EventType.find:5 flwr.common.EventType.rfind:5 of +msgid "Return -1 on failure." +msgstr "" + +#: flwr.common.EventType.format:1 of +msgid "" +"Return a formatted version of S, using substitutions from args and " +"kwargs. The substitutions are identified by braces ('{' and '}')." +msgstr "" + +#: flwr.common.EventType.format_map:1 of +msgid "" +"Return a formatted version of S, using substitutions from mapping. The " +"substitutions are identified by braces ('{' and '}')." +msgstr "" + +#: flwr.common.EventType.index:5 flwr.common.EventType.rindex:5 of +msgid "Raises ValueError when the substring is not found." +msgstr "" + +#: flwr.common.EventType.isalnum:3 of +msgid "" +"A string is alpha-numeric if all characters in the string are alpha-" +"numeric and there is at least one character in the string." +msgstr "" + +#: flwr.common.EventType.isalpha:3 of +msgid "" +"A string is alphabetic if all characters in the string are alphabetic and" +" there is at least one character in the string." +msgstr "" + +#: flwr.common.EventType.isascii:3 of +msgid "" +"ASCII characters have code points in the range U+0000-U+007F. Empty " +"string is ASCII too." +msgstr "" + +#: flwr.common.EventType.isdecimal:3 of +msgid "" +"A string is a decimal string if all characters in the string are decimal " +"and there is at least one character in the string." +msgstr "" + +#: flwr.common.EventType.isdigit:3 of +msgid "" +"A string is a digit string if all characters in the string are digits and" +" there is at least one character in the string." +msgstr "" + +#: flwr.common.EventType.isidentifier:3 of +msgid "" +"Call keyword.iskeyword(s) to test whether string s is a reserved " +"identifier, such as \"def\" or \"class\"." +msgstr "" + +#: flwr.common.EventType.islower:3 of +msgid "" +"A string is lowercase if all cased characters in the string are lowercase" +" and there is at least one cased character in the string." +msgstr "" + +#: flwr.common.EventType.isnumeric:3 of +msgid "" +"A string is numeric if all characters in the string are numeric and there" +" is at least one character in the string." +msgstr "" + +#: flwr.common.EventType.isprintable:3 of +msgid "" +"A string is printable if all of its characters are considered printable " +"in repr() or if it is empty." +msgstr "" + +#: flwr.common.EventType.isspace:3 of +msgid "" +"A string is whitespace if all characters in the string are whitespace and" +" there is at least one character in the string." +msgstr "" + +#: flwr.common.EventType.istitle:3 of +msgid "" +"In a title-cased string, upper- and title-case characters may only follow" +" uncased characters and lowercase characters only cased ones." +msgstr "" + +#: flwr.common.EventType.isupper:3 of +msgid "" +"A string is uppercase if all cased characters in the string are uppercase" +" and there is at least one cased character in the string." +msgstr "" + +#: flwr.common.EventType.join:3 of +msgid "" +"The string whose method is called is inserted in between each given " +"string. The result is returned as a new string." +msgstr "" + +#: flwr.common.EventType.join:6 of +msgid "Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'" +msgstr "" + +#: flwr.common.EventType.lstrip:3 flwr.common.EventType.rstrip:3 +#: flwr.common.EventType.strip:3 of +msgid "If chars is given and not None, remove characters in chars instead." +msgstr "" + +#: flwr.common.EventType.maketrans:3 of +msgid "" +"If there is only one argument, it must be a dictionary mapping Unicode " +"ordinals (integers) or characters to Unicode ordinals, strings or None. " +"Character keys will be then converted to ordinals. If there are two " +"arguments, they must be strings of equal length, and in the resulting " +"dictionary, each character in x will be mapped to the character at the " +"same position in y. If there is a third argument, it must be a string, " +"whose characters will be mapped to None in the result." +msgstr "" + +#: flwr.common.EventType.partition:3 of +msgid "" +"This will search for the separator in the string. If the separator is " +"found, returns a 3-tuple containing the part before the separator, the " +"separator itself, and the part after it." +msgstr "" + +#: flwr.common.EventType.partition:7 of +msgid "" +"If the separator is not found, returns a 3-tuple containing the original " +"string and two empty strings." +msgstr "" + +#: flwr.common.EventType.removeprefix:3 of +msgid "" +"If the string starts with the prefix string, return string[len(prefix):]." +" Otherwise, return a copy of the original string." +msgstr "" + +#: flwr.common.EventType.removesuffix:3 of +msgid "" +"If the string ends with the suffix string and that suffix is not empty, " +"return string[:-len(suffix)]. Otherwise, return a copy of the original " +"string." +msgstr "" + +#: flwr.common.EventType.replace:5 of +msgid "count" +msgstr "" + +#: flwr.common.EventType.replace:4 of +msgid "" +"Maximum number of occurrences to replace. -1 (the default value) means " +"replace all occurrences." +msgstr "" + +#: flwr.common.EventType.replace:7 of +msgid "" +"If the optional argument count is given, only the first count occurrences" +" are replaced." +msgstr "" + +#: flwr.common.EventType.rfind:1 flwr.common.EventType.rindex:1 of +msgid "" +"Return the highest index in S where substring sub is found, such that sub" +" is contained within S[start:end]. Optional arguments start and end are " +"interpreted as in slice notation." +msgstr "" + +#: flwr.common.EventType.rpartition:3 of +msgid "" +"This will search for the separator in the string, starting at the end. If" +" the separator is found, returns a 3-tuple containing the part before the" +" separator, the separator itself, and the part after it." +msgstr "" + +#: flwr.common.EventType.rpartition:7 of +msgid "" +"If the separator is not found, returns a 3-tuple containing two empty " +"strings and the original string." +msgstr "" + +#: flwr.common.EventType.rsplit:7 flwr.common.EventType.split:7 of +msgid "sep" +msgstr "" + +#: flwr.common.EventType.rsplit:4 flwr.common.EventType.split:4 of +msgid "The separator used to split the string." +msgstr "" + +#: flwr.common.EventType.rsplit:6 flwr.common.EventType.split:6 of +msgid "" +"When set to None (the default value), will split on any whitespace " +"character (including \\\\n \\\\r \\\\t \\\\f and spaces) and will discard" +" empty strings from the result." +msgstr "" + +#: flwr.common.EventType.rsplit:11 flwr.common.EventType.split:11 of +msgid "maxsplit" +msgstr "" + +#: flwr.common.EventType.rsplit:10 flwr.common.EventType.split:10 of +msgid "" +"Maximum number of splits (starting from the left). -1 (the default value)" +" means no limit." +msgstr "" + +#: flwr.common.EventType.rsplit:13 of +msgid "Splitting starts at the end of the string and works to the front." +msgstr "" + +#: flwr.common.EventType.split:13 of +msgid "" +"Note, str.split() is mainly useful for data that has been intentionally " +"delimited. With natural text that includes punctuation, consider using " +"the regular expression module." +msgstr "" + +#: flwr.common.EventType.splitlines:3 of +msgid "" +"Line breaks are not included in the resulting list unless keepends is " +"given and true." +msgstr "" + +#: flwr.common.EventType.startswith:1 of +msgid "" +"Return True if S starts with the specified prefix, False otherwise. With " +"optional start, test S beginning at that position. With optional end, " +"stop comparing S at that position. prefix can also be a tuple of strings " +"to try." +msgstr "" + +#: flwr.common.EventType.title:3 of +msgid "" +"More specifically, words start with uppercased characters and all " +"remaining cased characters have lower case." +msgstr "" + +#: flwr.common.EventType.translate:5 of +msgid "table" +msgstr "" + +#: flwr.common.EventType.translate:4 of +msgid "" +"Translation table, which must be a mapping of Unicode ordinals to Unicode" +" ordinals, strings, or None." +msgstr "" + +#: flwr.common.EventType.translate:7 of +msgid "" +"The table must implement lookup/indexing via __getitem__, for instance a " +"dictionary or list. If this operation raises LookupError, the character " +"is left untouched. Characters mapped to None are deleted." +msgstr "" + +#: flwr.common.EventType.zfill:3 of +msgid "The string is never truncated." +msgstr "" + +#: ../../source/ref-api/flwr.common.FitIns.rst:2 +msgid "FitIns" +msgstr "" + +#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 +msgid ":py:obj:`parameters `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 +msgid ":py:obj:`config `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.FitRes.rst:2 +msgid "FitRes" +msgstr "" + +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`status `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`parameters `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`num_examples `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`metrics `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:2 +msgid "GetParametersIns" +msgstr "" + +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:28::1 +msgid ":py:obj:`config `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:2 +msgid "GetParametersRes" +msgstr "" + +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 +msgid ":py:obj:`status `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 +msgid ":py:obj:`parameters `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:2 +msgid "GetPropertiesIns" +msgstr "" + +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:28::1 +msgid ":py:obj:`config `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:2 +msgid "GetPropertiesRes" +msgstr "" + +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 +msgid ":py:obj:`status `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 +msgid ":py:obj:`properties `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:2 +msgid "Message" +msgstr "" + +#: flwr.common.Message.content:1::1 flwr.common.Message.metadata:1 +#: flwr.common.message.Message:3 of +msgid "A dataclass including information about the message to be executed." +msgstr "" + +#: flwr.common.message.Message:5 of +msgid "" +"Holds records either sent by another entity (e.g. sent by the server-side" +" logic to a client, or vice-versa) or that will be sent to it." +msgstr "" + +#: flwr.common.message.Message:8 of +msgid "" +"A dataclass that captures information about an error that took place when" +" processing another message." +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid "" +":py:obj:`create_error_reply `\\ " +"\\(error\\[\\, ttl\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_error_reply:1 of +msgid "Construct a reply message indicating an error happened." +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid "" +":py:obj:`create_reply `\\ " +"\\(content\\[\\, ttl\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_reply:1 of +msgid "Create a reply to this message with specified content and TTL." +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid ":py:obj:`has_content `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_content:1 of +msgid "Return True if message has content, else False." +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid ":py:obj:`has_error `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_error:1 of +msgid "Return True if message has an error, else False." +msgstr "" + +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`content `\\" +msgstr "" + +#: flwr.common.Message.content:1 flwr.common.Message.content:1::1 +#: of +msgid "The content of this message." +msgstr "" + +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`error `\\" +msgstr "" + +#: flwr.common.Message.content:1::1 flwr.common.Message.error:1 of +msgid "Error captured by this message." +msgstr "" + +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`metadata `\\" +msgstr "" + +#: flwr.common.message.Message.create_error_reply:3 of +msgid "The error that was encountered." +msgstr "" + +#: flwr.common.message.Message.create_error_reply:5 +#: flwr.common.message.Message.create_reply:9 of +msgid "" +"Time-to-live for this message in seconds. If unset, it will be set based " +"on the remaining time for the received message before it expires. This " +"follows the equation: ttl = msg.meta.ttl - (reply.meta.created_at - " +"msg.meta.created_at)" +msgstr "" + +#: flwr.common.message.Message.create_error_reply:5 +#: flwr.common.message.Message.create_reply:9 of +msgid "" +"Time-to-live for this message in seconds. If unset, it will be set based " +"on the remaining time for the received message before it expires. This " +"follows the equation:" +msgstr "" + +#: flwr.common.message.Message.create_error_reply:9 +#: flwr.common.message.Message.create_reply:13 of +msgid "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" +msgstr "" + +#: flwr.common.message.Message.create_reply:3 of +msgid "" +"The method generates a new `Message` as a reply to this message. It " +"inherits 'run_id', 'src_node_id', 'dst_node_id', and 'message_type' from " +"this message and sets 'reply_to_message' to the ID of this message." +msgstr "" + +#: flwr.common.message.Message.create_reply:7 of +msgid "The content for the reply message." +msgstr "" + +#: flwr.common.message.Message.create_reply:16 of +msgid "A new `Message` instance representing the reply." +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageType.rst:2 +msgid "MessageType" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`EVALUATE `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`QUERY `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`TRAIN `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:2 +msgid "MessageTypeLegacy" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +msgid ":py:obj:`GET_PARAMETERS `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +msgid ":py:obj:`GET_PROPERTIES `\\" +msgstr "" + +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of +msgid "An identifier for the current run." +msgstr "" + +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.message_id:1 flwr.common.message.Metadata:5 of +msgid "An identifier for the current message." +msgstr "" + +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.src_node_id:1 flwr.common.message.Metadata:7 of +msgid "An identifier for the node sending this message." +msgstr "" + +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.dst_node_id:1 flwr.common.message.Metadata:9 of +msgid "An identifier for the node receiving this message." +msgstr "" + +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.reply_to_message:1 flwr.common.message.Metadata:11 of +msgid "An identifier for the message this message replies to." +msgstr "" + +#: flwr.common.message.Metadata:13 of +msgid "" +"An identifier for grouping messages. In some settings, this is used as " +"the FL round." +msgstr "" + +#: flwr.common.message.Metadata:16 of +msgid "Time-to-live for this message in seconds." +msgstr "" + +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.message_type:1 flwr.common.message.Metadata:18 of +msgid "A string that encodes the action to be executed on the receiving end." +msgstr "" + +#: flwr.common.message.Metadata:21 of +msgid "" +"An identifier that can be used when loading a particular data partition " +"for a ClientApp. Making use of this identifier is more relevant when " +"conducting simulations." +msgstr "" + +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`created_at `\\" +msgstr "" + +#: flwr.common.Metadata.created_at:1 +#: flwr.common.Metadata.created_at:1::1 of +msgid "Unix timestamp when the message was created." +msgstr "" + +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`dst_node_id `\\" +msgstr "" + +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`group_id `\\" +msgstr "" + +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.group_id:1 of +msgid "An identifier for grouping messages." +msgstr "" + +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`message_id `\\" +msgstr "" + +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`message_type `\\" +msgstr "" + +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`partition_id `\\" +msgstr "" + +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.partition_id:1 of +msgid "An identifier telling which data partition a ClientApp should use." +msgstr "" + +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`reply_to_message `\\" +msgstr "" + +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`run_id `\\" +msgstr "" + +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`src_node_id `\\" +msgstr "" + +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`ttl `\\" +msgstr "" + +#: flwr.common.Metadata.created_at:1::1 flwr.common.Metadata.ttl:1 +#: of +msgid "Time-to-live for this message." +msgstr "" + +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:2 +msgid "MetricsRecord" +msgstr "" + +#: flwr.common.record.metricsrecord.MetricsRecord:1 of +msgid "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`int` | :py:class:`float` | " +":py:class:`~typing.List`\\ [:py:class:`int`] | :py:class:`~typing.List`\\" +" [:py:class:`float`]]" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.NDArray.rst:2 +msgid "NDArray" +msgstr "" + +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +msgid ":py:obj:`tensors `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +msgid ":py:obj:`tensor_type `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:2 +msgid "ParametersRecord" +msgstr "" + +#: flwr.common.record.parametersrecord.ParametersRecord:1 of +msgid "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" +msgstr "" + +#: flwr.common.record.parametersrecord.ParametersRecord:3 of +msgid "" +"A dataclass storing named Arrays in order. This means that it holds " +"entries as an OrderedDict[str, Array]. ParametersRecord objects can be " +"viewed as an equivalent to PyTorch's state_dict, but holding serialised " +"tensors instead." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:3 of +msgid "" +"Note that a small amount of Bytes might also be included in this counting" +" that correspond to metadata of the serialized object (e.g. of NumPy " +"array) needed for deseralization." +msgstr "" + +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:2 +msgid "ReconnectIns" +msgstr "" + +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:28::1 +msgid ":py:obj:`seconds `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.RecordSet.rst:2 +msgid "RecordSet" +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`configs_records `\\" +msgstr "" + +#: flwr.common.RecordSet.configs_records:1 +#: flwr.common.RecordSet.configs_records:1::1 of +msgid "Dictionary holding ConfigsRecord instances." +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`metrics_records `\\" +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.metrics_records:1 of +msgid "Dictionary holding MetricsRecord instances." +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`parameters_records `\\" +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.parameters_records:1 of +msgid "Dictionary holding ParametersRecord instances." +msgstr "" + +#: ../../source/ref-api/flwr.common.ServerMessage.rst:2 +msgid "ServerMessage" +msgstr "" + +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +msgid ":py:obj:`evaluate_ins `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +msgid ":py:obj:`fit_ins `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +msgid "" +":py:obj:`get_parameters_ins " +"`\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +msgid "" +":py:obj:`get_properties_ins " +"`\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.Status.rst:2 +msgid "Status" +msgstr "" + +#: ../../source/ref-api/flwr.common.Status.rst:29::1 +msgid ":py:obj:`code `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.Status.rst:29::1 +msgid ":py:obj:`message `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.array_from_numpy.rst:2 +msgid "array\\_from\\_numpy" +msgstr "" + +#: ../../source/ref-api/flwr.common.bytes_to_ndarray.rst:2 +msgid "bytes\\_to\\_ndarray" +msgstr "" + +#: ../../source/ref-api/flwr.common.configure.rst:2 +msgid "configure" +msgstr "" + +#: ../../source/ref-api/flwr.common.event.rst:2 +msgid "event" +msgstr "" + +#: ../../source/ref-api/flwr.common.log.rst:2 +msgid "log" +msgstr "" + +#: logging.Logger.log:3 of +msgid "" +"To pass exception information, use the keyword argument exc_info with a " +"true value, e.g." +msgstr "" + +#: logging.Logger.log:6 of +#, python-format +msgid "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" +msgstr "" + +#: ../../source/ref-api/flwr.common.ndarray_to_bytes.rst:2 +msgid "ndarray\\_to\\_bytes" +msgstr "" + +#: ../../source/ref-api/flwr.common.ndarrays_to_parameters.rst:2 +msgid "ndarrays\\_to\\_parameters" +msgstr "" + +#: ../../source/ref-api/flwr.common.now.rst:2 +msgid "now" +msgstr "" + +#: ../../source/ref-api/flwr.common.parameters_to_ndarrays.rst:2 +msgid "parameters\\_to\\_ndarrays" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:2 +msgid "server" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:26::1 +msgid ":py:obj:`run_driver_api `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:26::1 +#: flwr.server.app.run_driver_api:1 of +msgid "Run Flower server (Driver API)." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:26::1 +msgid ":py:obj:`run_fleet_api `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:26::1 +#: flwr.server.app.run_fleet_api:1 of +msgid "Run Flower server (Fleet API)." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:26::1 +msgid ":py:obj:`run_server_app `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:26::1 +#: flwr.server.run_serverapp.run_server_app:1 of +msgid "Run Flower server app." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:26::1 +msgid ":py:obj:`run_superlink `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:26::1 +#: flwr.server.app.run_superlink:1 of +msgid "Run Flower SuperLink (Driver API and Fleet API)." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:26::1 +msgid "" +":py:obj:`start_server `\\ \\(\\*\\[\\, " +"server\\_address\\, server\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:26::1 +#: flwr.server.app.start_server:1 of +msgid "Start a Flower server using the gRPC transport layer." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:40::1 +msgid ":py:obj:`ClientManager `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:40::1 +#: flwr.server.client_manager.ClientManager:1 of +msgid "Abstract base class for managing Flower clients." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:40::1 +msgid ":py:obj:`Driver `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:40::1 +#: flwr.server.driver.driver.Driver:1 of +msgid "Abstract base Driver class for the Driver API." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:40::1 +msgid ":py:obj:`History `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:40::1 +#: flwr.server.history.History:1 of +msgid "History class for training and/or evaluation metrics collection." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:40::1 +msgid "" +":py:obj:`LegacyContext `\\ \\(state\\[\\, " +"config\\, strategy\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:40::1 +#: flwr.server.compat.legacy_context.LegacyContext:1 of +msgid "Legacy Context." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:40::1 +msgid "" +":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " +"strategy\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:40::1 +msgid "" +":py:obj:`ServerApp `\\ \\(\\[server\\, config\\, " +"strategy\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:40::1 +#: flwr.server.server_app.ServerApp:1 of +msgid "Flower ServerApp." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:40::1 +msgid "" +":py:obj:`ServerConfig `\\ \\(\\[num\\_rounds\\," +" round\\_timeout\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:40::1 +#: flwr.server.server_config.ServerConfig:1 of +msgid "Flower server config." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:40::1 +msgid ":py:obj:`SimpleClientManager `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:40::1 +#: flwr.server.client_manager.SimpleClientManager:1 of +msgid "Provides a pool of available clients." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:59::1 +msgid ":py:obj:`flwr.server.strategy `\\" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:59::1 +#: flwr.server.strategy:1 of +msgid "Contains the strategy abstraction and different implementations." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:59::1 +msgid ":py:obj:`flwr.server.workflow `\\" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:59::1 +#: flwr.server.workflow:1 of +msgid "Workflows." +msgstr "" + +#: ../../source/ref-api/flwr.server.ClientManager.rst:2 +msgid "ClientManager" +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`all `\\ \\(\\)" +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1 +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.all:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "Return all available clients." +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`num_available `\\ \\(\\)" +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.num_available:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.num_available:1 of +msgid "Return the number of available clients." +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`register `\\ \\(client\\)" +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.register:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.register:1 of +msgid "Register Flower ClientProxy instance." +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid "" +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.sample:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.sample:1 of +msgid "Sample a number of Flower ClientProxy instances." +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`unregister `\\ \\(client\\)" +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.unregister:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.unregister:1 of +msgid "Unregister Flower ClientProxy instance." +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid "" +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\, timeout\\)" +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.wait_for:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.wait_for:1 of +msgid "Wait until at least `num_clients` are available." +msgstr "" + +#: flwr.server.client_manager.ClientManager.num_available:3 +#: flwr.server.client_manager.SimpleClientManager.num_available:3 of +msgid "**num_available** -- The number of currently available clients." +msgstr "" + +#: flwr.server.client_manager.ClientManager.register:6 +#: flwr.server.client_manager.SimpleClientManager.register:6 of +msgid "" +"**success** -- Indicating if registration was successful. False if " +"ClientProxy is already registered or can not be registered for any " +"reason." +msgstr "" + +#: flwr.server.client_manager.ClientManager.unregister:3 +#: flwr.server.client_manager.SimpleClientManager.unregister:3 of +msgid "This method is idempotent." +msgstr "" + +#: ../../source/ref-api/flwr.server.Driver.rst:2 +msgid "Driver" +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:1::1 of +msgid "" +":py:obj:`create_message `\\ " +"\\(content\\, message\\_type\\, ...\\[\\, ttl\\]\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:1 +#: flwr.server.driver.driver.Driver.create_message:1::1 of +msgid "Create a new message with specified parameters." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:1::1 of +msgid ":py:obj:`get_node_ids `\\ \\(\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:1::1 +#: flwr.server.driver.driver.Driver.get_node_ids:1 of +msgid "Get node IDs." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:1::1 of +msgid "" +":py:obj:`pull_messages `\\ " +"\\(message\\_ids\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:1::1 +#: flwr.server.driver.driver.Driver.pull_messages:1 of +msgid "Pull messages based on message IDs." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:1::1 of +msgid "" +":py:obj:`push_messages `\\ " +"\\(messages\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:1::1 +#: flwr.server.driver.driver.Driver.push_messages:1 of +msgid "Push messages to specified node IDs." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:1::1 of +msgid "" +":py:obj:`send_and_receive `\\ " +"\\(messages\\, \\*\\[\\, timeout\\]\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:1::1 +#: flwr.server.driver.driver.Driver.send_and_receive:1 of +msgid "Push messages to specified node IDs and pull the reply messages." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:3 of +msgid "" +"This method constructs a new `Message` with given content and metadata. " +"The `run_id` and `src_node_id` will be set automatically." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:6 of +msgid "" +"The content for the new message. This holds records that are to be sent " +"to the destination node." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:9 of +msgid "" +"The type of the message, defining the action to be executed on the " +"receiving end." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:12 of +msgid "The ID of the destination node to which the message is being sent." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:14 of +msgid "" +"The ID of the group to which this message is associated. In some " +"settings, this is used as the FL round." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:17 of +msgid "" +"Time-to-live for the round trip of this message, i.e., the time from " +"sending this message to receiving a reply. It specifies in seconds the " +"duration for which the message and its potential reply are considered " +"valid. If unset, the default TTL (i.e., `common.DEFAULT_TTL`) will be " +"used." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:23 of +msgid "" +"**message** -- A new `Message` instance with the specified content and " +"metadata." +msgstr "" + +#: flwr.server.driver.driver.Driver.pull_messages:3 of +msgid "" +"This method is used to collect messages from the SuperLink that " +"correspond to a set of given message IDs." +msgstr "" + +#: flwr.server.driver.driver.Driver.pull_messages:6 of +msgid "An iterable of message IDs for which reply messages are to be retrieved." +msgstr "" + +#: flwr.server.driver.driver.Driver.pull_messages:9 of +msgid "**messages** -- An iterable of messages received." +msgstr "" + +#: flwr.server.driver.driver.Driver.push_messages:3 of +msgid "" +"This method takes an iterable of messages and sends each message to the " +"node specified in `dst_node_id`." +msgstr "" + +#: flwr.server.driver.driver.Driver.push_messages:6 +#: flwr.server.driver.driver.Driver.send_and_receive:7 of +msgid "An iterable of messages to be sent." +msgstr "" + +#: flwr.server.driver.driver.Driver.push_messages:9 of +msgid "" +"**message_ids** -- An iterable of IDs for the messages that were sent, " +"which can be used to pull replies." +msgstr "" + +#: flwr.server.driver.driver.Driver.send_and_receive:3 of +msgid "" +"This method sends a list of messages to their destination node IDs and " +"then waits for the replies. It continues to pull replies until either all" +" replies are received or the specified timeout duration is exceeded." +msgstr "" + +#: flwr.server.driver.driver.Driver.send_and_receive:9 of +msgid "" +"The timeout duration in seconds. If specified, the method will wait for " +"replies for this duration. If `None`, there is no time limit and the " +"method will wait until replies for all messages are received." +msgstr "" + +#: flwr.server.driver.driver.Driver.send_and_receive:14 of +msgid "**replies** -- An iterable of reply messages received from the SuperLink." +msgstr "" + +#: flwr.server.driver.driver.Driver.send_and_receive:18 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:53 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:60 +#: of +msgid "Notes" +msgstr "" + +#: flwr.server.driver.driver.Driver.send_and_receive:19 of +msgid "" +"This method uses `push_messages` to send the messages and `pull_messages`" +" to collect the replies. If `timeout` is set, the method may not return " +"replies for all sent messages. A message remains valid until its TTL, " +"which is not affected by `timeout`." +msgstr "" + +#: ../../source/ref-api/flwr.server.History.rst:2 +msgid "History" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_loss_centralized " +"`\\ \\(server\\_round\\, " +"loss\\)" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1 +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "Add one loss entry (from centralized evaluation)." +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_loss_distributed " +"`\\ \\(server\\_round\\, " +"loss\\)" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_loss_distributed:1 of +msgid "Add one loss entry (from distributed evaluation)." +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_metrics_centralized " +"`\\ \\(server\\_round\\, " +"metrics\\)" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_centralized:1 of +msgid "Add metrics entries (from centralized evaluation)." +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_metrics_distributed " +"`\\ \\(server\\_round\\, " +"metrics\\)" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_distributed:1 of +msgid "Add metrics entries (from distributed evaluation)." +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_metrics_distributed_fit " +"`\\ \\(server\\_round\\," +" ...\\)" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_distributed_fit:1 of +msgid "Add metrics entries (from distributed fit)." +msgstr "" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:2 +msgid "LegacyContext" +msgstr "" + +#: flwr.server.compat.legacy_context.LegacyContext:1 of +msgid "Bases: :py:class:`~flwr.common.context.Context`" +msgstr "" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +msgid ":py:obj:`config `\\" +msgstr "" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +msgid ":py:obj:`strategy `\\" +msgstr "" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +msgid ":py:obj:`client_manager `\\" +msgstr "" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +msgid ":py:obj:`history `\\" +msgstr "" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +msgid ":py:obj:`state `\\" +msgstr "" + +#: ../../source/ref-api/flwr.server.Server.rst:2 +msgid "Server" +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`client_manager `\\ \\(\\)" +msgstr "" + +#: flwr.server.server.Server.client_manager:1 +#: flwr.server.server.Server.client_manager:1::1 of +msgid "Return ClientManager." +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 of +msgid "" +":py:obj:`disconnect_all_clients " +"`\\ \\(timeout\\)" +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.disconnect_all_clients:1 of +msgid "Send shutdown signal to all clients." +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 of +msgid "" +":py:obj:`evaluate_round `\\ " +"\\(server\\_round\\, timeout\\)" +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.evaluate_round:1 of +msgid "Validate current global model on a number of clients." +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`fit `\\ \\(num\\_rounds\\, timeout\\)" +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.fit:1 of +msgid "Run federated averaging for a number of rounds." +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 of +msgid "" +":py:obj:`fit_round `\\ \\(server\\_round\\," +" timeout\\)" +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.fit_round:1 of +msgid "Perform a single round of federated averaging." +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 of +msgid "" +":py:obj:`set_max_workers `\\ " +"\\(max\\_workers\\)" +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.set_max_workers:1 of +msgid "Set the max_workers used by ThreadPoolExecutor." +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`set_strategy `\\ \\(strategy\\)" +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.set_strategy:1 of +msgid "Replace server strategy." +msgstr "" + +#: ../../source/ref-api/flwr.server.ServerApp.rst:2 +msgid "ServerApp" +msgstr "" + +#: flwr.server.server_app.ServerApp:5 of +msgid "Use the `ServerApp` with an existing `Strategy`:" +msgstr "" + +#: flwr.server.server_app.ServerApp:15 of +msgid "Use the `ServerApp` with a custom main function:" +msgstr "" + +#: flwr.server.server_app.ServerApp.main:1::1 of +msgid ":py:obj:`main `\\ \\(\\)" +msgstr "" + +#: flwr.server.server_app.ServerApp.main:1 +#: flwr.server.server_app.ServerApp.main:1::1 of +msgid "Return a decorator that registers the main fn with the server app." +msgstr "" + +#: ../../source/ref-api/flwr.server.ServerConfig.rst:2 +msgid "ServerConfig" +msgstr "" + +#: flwr.server.server_config.ServerConfig:3 of +msgid "" +"All attributes have default values which allows users to configure just " +"the ones they care about." +msgstr "" + +#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 +msgid ":py:obj:`num_rounds `\\" +msgstr "" + +#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 +msgid ":py:obj:`round_timeout `\\" +msgstr "" + +#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:2 +msgid "SimpleClientManager" +msgstr "" + +#: flwr.server.client_manager.SimpleClientManager:1 of +msgid "Bases: :py:class:`~flwr.server.client_manager.ClientManager`" +msgstr "" + +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid ":py:obj:`all `\\ \\(\\)" +msgstr "" + +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "" +":py:obj:`num_available `\\" +" \\(\\)" +msgstr "" + +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "" +":py:obj:`register `\\ " +"\\(client\\)" +msgstr "" + +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "" +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" +msgstr "" + +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "" +":py:obj:`unregister `\\ " +"\\(client\\)" +msgstr "" + +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "" +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\[\\, timeout\\]\\)" +msgstr "" + +#: flwr.server.client_manager.SimpleClientManager.wait_for:3 of +msgid "" +"Blocks until the requested number of clients is available or until a " +"timeout is reached. Current timeout default: 1 day." +msgstr "" + +#: flwr.server.client_manager.SimpleClientManager.wait_for:6 of +msgid "The number of clients to wait for." +msgstr "" + +#: flwr.server.client_manager.SimpleClientManager.wait_for:8 of +msgid "The time in seconds to wait for, defaults to 86400 (24h)." +msgstr "" + +#: flwr.server.client_manager.SimpleClientManager.wait_for:11 of +msgid "**success**" +msgstr "" + +#: ../../source/ref-api/flwr.server.run_driver_api.rst:2 +msgid "run\\_driver\\_api" +msgstr "" + +#: ../../source/ref-api/flwr.server.run_fleet_api.rst:2 +msgid "run\\_fleet\\_api" +msgstr "" + +#: ../../source/ref-api/flwr.server.run_server_app.rst:2 +msgid "run\\_server\\_app" +msgstr "" + +#: ../../source/ref-api/flwr.server.run_superlink.rst:2 +msgid "run\\_superlink" +msgstr "" + +#: ../../source/ref-api/flwr.server.start_driver.rst:2 +msgid "start\\_driver" +msgstr "" + +#: ../../source/ref-api/flwr.server.start_server.rst:2 +msgid "start\\_server" +msgstr "" + +#: flwr.server.app.start_server:3 of +msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." +msgstr "" + +#: flwr.server.app.start_server:5 of +msgid "" +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, then `start_server` will create one." +msgstr "" + +#: flwr.server.app.start_server:9 flwr.simulation.app.start_simulation:28 of +msgid "" +"Currently supported values are `num_rounds` (int, default: 1) and " +"`round_timeout` in seconds (float, default: None)." +msgstr "" + +#: flwr.server.app.start_server:12 of +msgid "" +"An implementation of the abstract base class " +"`flwr.server.strategy.Strategy`. If no strategy is provided, then " +"`start_server` will use `flwr.server.strategy.FedAvg`." +msgstr "" + +#: flwr.server.app.start_server:16 of +msgid "" +"An implementation of the abstract base class `flwr.server.ClientManager`." +" If no implementation is provided, then `start_server` will use " +"`flwr.server.client_manager.SimpleClientManager`." +msgstr "" + +#: flwr.server.app.start_server:21 of +msgid "" +"The maximum length of gRPC messages that can be exchanged with the Flower" +" clients. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower clients need to be started with the same value (see " +"`flwr.client.start_client`), otherwise clients will not know about the " +"increased limit and block larger messages." +msgstr "" + +#: flwr.server.app.start_server:28 of +msgid "" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order: * CA certificate. * " +"server certificate. * server private key." +msgstr "" + +#: flwr.server.app.start_server:28 of +msgid "" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order:" +msgstr "" + +#: flwr.server.app.start_server:32 of +msgid "CA certificate." +msgstr "" + +#: flwr.server.app.start_server:33 of +msgid "server certificate." +msgstr "" + +#: flwr.server.app.start_server:34 of +msgid "server private key." +msgstr "" + +#: flwr.server.app.start_server:37 of +msgid "**hist** -- Object containing training and evaluation metrics." +msgstr "" + +#: flwr.server.app.start_server:42 of +msgid "Starting an insecure server:" +msgstr "" + +#: flwr.server.app.start_server:46 of +msgid "Starting an SSL-enabled server:" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:2 +msgid "strategy" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`Bulyan `\\ \\(\\*\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.bulyan.Bulyan:1 of +msgid "Bulyan strategy." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`DPFedAvgAdaptive `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`DPFedAvgFixed `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of +msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " +"`\\ " +"\\(...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 +#: of +msgid "Strategy wrapper for central DP with client-side adaptive clipping." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " +"`\\ " +"\\(...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 +#: of +msgid "Strategy wrapper for central DP with server-side adaptive clipping." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`DifferentialPrivacyClientSideFixedClipping " +"`\\ " +"\\(...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 +#: of +msgid "Strategy wrapper for central DP with client-side fixed clipping." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`DifferentialPrivacyServerSideFixedClipping " +"`\\ " +"\\(...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 +#: of +msgid "Strategy wrapper for central DP with server-side fixed clipping." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedadagrad.FedAdagrad:1 of +msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedAdam `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedadam.FedAdam:1 of +msgid "FedAdam - Adaptive Federated Optimization using Adam." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedAvg `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedavg.FedAvg:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +msgid "Federated Averaging strategy." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedAvgAndroid `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedavgm.FedAvgM:1 of +msgid "Federated Averaging with Momentum strategy." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedMedian `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedmedian.FedMedian:1 of +msgid "Configurable FedMedian strategy implementation." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedOpt `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedopt.FedOpt:1 of +msgid "Federated Optim strategy." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedProx `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedprox.FedProx:1 of +msgid "Federated Optimization strategy." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedTrimmedAvg `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of +msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedXgbBagging `\\ " +"\\(\\[evaluate\\_function\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of +msgid "Configurable FedXgbBagging strategy implementation." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedXgbCyclic `\\ " +"\\(\\*\\*kwargs\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of +msgid "Configurable FedXgbCyclic strategy implementation." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of +msgid "Configurable FedXgbNnAvg strategy implementation." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedYogi `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "FedYogi [Reddi et al., 2020] strategy." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FaultTolerantFedAvg " +"`\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of +msgid "Configurable fault-tolerant FedAvg strategy implementation." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`Krum `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.krum.Krum:1 of +msgid "Krum [Blanchard et al., 2017] strategy." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " +"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.qfedavg.QFedAvg:1 of +msgid "Configurable QFedAvg strategy implementation." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid ":py:obj:`Strategy `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.strategy.Strategy:1 of +msgid "Abstract base class for server strategy implementations." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:2 +msgid "Bulyan" +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 +#: flwr.server.strategy.fedavgm.FedAvgM:1 +#: flwr.server.strategy.fedmedian.FedMedian:1 +#: flwr.server.strategy.fedopt.FedOpt:1 flwr.server.strategy.fedprox.FedProx:1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 +#: flwr.server.strategy.krum.Krum:1 flwr.server.strategy.qfedavg.QFedAvg:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.fedavg.FedAvg`" +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:3 of +msgid "Implementation based on https://arxiv.org/abs/1802.07927." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:5 +#: flwr.server.strategy.fedadagrad.FedAdagrad:5 +#: flwr.server.strategy.fedadam.FedAdam:5 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:5 +#: flwr.server.strategy.fedavgm.FedAvgM:5 flwr.server.strategy.fedopt.FedOpt:5 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:5 +#: flwr.server.strategy.fedyogi.FedYogi:5 flwr.server.strategy.krum.Krum:5 of +msgid "Fraction of clients used during training. Defaults to 1.0." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:7 +#: flwr.server.strategy.fedadagrad.FedAdagrad:7 +#: flwr.server.strategy.fedadam.FedAdam:7 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:7 +#: flwr.server.strategy.fedavgm.FedAvgM:7 flwr.server.strategy.fedopt.FedOpt:7 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:7 +#: flwr.server.strategy.fedyogi.FedYogi:7 flwr.server.strategy.krum.Krum:7 of +msgid "Fraction of clients used during validation. Defaults to 1.0." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:9 +#: flwr.server.strategy.fedadagrad.FedAdagrad:9 +#: flwr.server.strategy.fedadam.FedAdam:9 flwr.server.strategy.fedavg.FedAvg:13 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:9 +#: flwr.server.strategy.fedavgm.FedAvgM:9 flwr.server.strategy.fedopt.FedOpt:9 +#: flwr.server.strategy.fedprox.FedProx:45 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:9 +#: flwr.server.strategy.fedyogi.FedYogi:9 flwr.server.strategy.krum.Krum:9 of +msgid "Minimum number of clients used during training. Defaults to 2." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:11 +#: flwr.server.strategy.fedadagrad.FedAdagrad:11 +#: flwr.server.strategy.fedadam.FedAdam:11 +#: flwr.server.strategy.fedavg.FedAvg:15 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:11 +#: flwr.server.strategy.fedavgm.FedAvgM:11 +#: flwr.server.strategy.fedopt.FedOpt:11 +#: flwr.server.strategy.fedprox.FedProx:47 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:11 +#: flwr.server.strategy.fedyogi.FedYogi:11 flwr.server.strategy.krum.Krum:11 of +msgid "Minimum number of clients used during validation. Defaults to 2." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:13 +#: flwr.server.strategy.fedadagrad.FedAdagrad:13 +#: flwr.server.strategy.fedadam.FedAdam:13 +#: flwr.server.strategy.fedavg.FedAvg:17 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:13 +#: flwr.server.strategy.fedavgm.FedAvgM:13 +#: flwr.server.strategy.fedopt.FedOpt:13 +#: flwr.server.strategy.fedprox.FedProx:49 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:13 +#: flwr.server.strategy.fedyogi.FedYogi:13 flwr.server.strategy.krum.Krum:13 of +msgid "Minimum number of total clients in the system. Defaults to 2." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:15 flwr.server.strategy.krum.Krum:15 of +msgid "Number of malicious clients in the system. Defaults to 0." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:17 +#: flwr.server.strategy.fedadagrad.FedAdagrad:15 +#: flwr.server.strategy.fedadam.FedAdam:15 +#: flwr.server.strategy.fedavg.FedAvg:19 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:15 +#: flwr.server.strategy.fedavgm.FedAvgM:15 +#: flwr.server.strategy.fedopt.FedOpt:15 +#: flwr.server.strategy.fedprox.FedProx:51 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:15 +#: flwr.server.strategy.fedyogi.FedYogi:17 +#: flwr.server.strategy.fedyogi.FedYogi:18 +#: flwr.server.strategy.fedyogi.FedYogi:19 flwr.server.strategy.krum.Krum:20 of +msgid "Optional function used for validation. Defaults to None." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:19 +#: flwr.server.strategy.fedadagrad.FedAdagrad:17 +#: flwr.server.strategy.fedadam.FedAdam:17 +#: flwr.server.strategy.fedavg.FedAvg:21 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:17 +#: flwr.server.strategy.fedavgm.FedAvgM:17 +#: flwr.server.strategy.fedopt.FedOpt:17 +#: flwr.server.strategy.fedprox.FedProx:53 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:17 +#: flwr.server.strategy.fedyogi.FedYogi:20 flwr.server.strategy.krum.Krum:22 of +msgid "Function used to configure training. Defaults to None." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad:19 +#: flwr.server.strategy.fedadam.FedAdam:19 +#: flwr.server.strategy.fedavg.FedAvg:23 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:19 +#: flwr.server.strategy.fedavgm.FedAvgM:19 +#: flwr.server.strategy.fedopt.FedOpt:19 +#: flwr.server.strategy.fedprox.FedProx:55 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:19 +#: flwr.server.strategy.fedyogi.FedYogi:22 flwr.server.strategy.krum.Krum:24 of +msgid "Function used to configure validation. Defaults to None." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:23 +#: flwr.server.strategy.fedadagrad.FedAdagrad:25 +#: flwr.server.strategy.fedadam.FedAdam:21 +#: flwr.server.strategy.fedavg.FedAvg:25 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:21 +#: flwr.server.strategy.fedavgm.FedAvgM:21 +#: flwr.server.strategy.fedopt.FedOpt:21 +#: flwr.server.strategy.fedprox.FedProx:57 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:21 +#: flwr.server.strategy.fedyogi.FedYogi:24 flwr.server.strategy.krum.Krum:26 of +msgid "Whether or not accept rounds containing failures. Defaults to True." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:25 +#: flwr.server.strategy.fedadagrad.FedAdagrad:27 +#: flwr.server.strategy.fedadam.FedAdam:23 +#: flwr.server.strategy.fedavg.FedAvg:27 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:24 +#: flwr.server.strategy.fedavgm.FedAvgM:23 +#: flwr.server.strategy.fedopt.FedOpt:23 +#: flwr.server.strategy.fedprox.FedProx:59 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:23 +#: flwr.server.strategy.fedyogi.FedYogi:26 flwr.server.strategy.krum.Krum:28 of +msgid "Initial global model parameters." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:27 of +msgid "" +"Byzantine resilient aggregation rule that is used as the first step of " +"the Bulyan (e.g., Krum)" +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:29 of +msgid "arguments to the first_aggregation rule" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Aggregate evaluation losses using weighted average." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan.aggregate_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "Aggregate fit results using Bulyan." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_evaluate:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.configure_evaluate:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:1 of +msgid "Configure the next round of evaluation." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_fit:1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.configure_fit:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_fit:1 +#: flwr.server.strategy.fedprox.FedProx.configure_fit:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.configure_fit:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.configure_fit:1 of +msgid "Configure the next round of training." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.evaluate:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Evaluate model parameters using an evaluation function." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.initialize_parameters:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.initialize_parameters:1 +#: flwr.server.strategy.fedavgm.FedAvgM.initialize_parameters:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Initialize global model parameters." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.num_evaluation_clients:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_evaluation_clients:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.num_evaluation_clients:1 of +msgid "Use a fraction of available clients for evaluation." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.num_fit_clients:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_fit_clients:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.num_fit_clients:1 of +msgid "Return the sample size and the required number of available clients." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 +msgid "DPFedAvgAdaptive" +msgstr "" + +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" +msgstr "" + +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:3 of +msgid "This class is deprecated and will be removed in a future release." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "Aggregate evaluation losses using the given strategy." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.aggregate_fit:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "Aggregate training results as in DPFedAvgFixed and update clip norms." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:1 of +msgid "Configure the next round of evaluation using the specified strategy." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.evaluate:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of +msgid "Evaluate model parameters using an evaluation function from the strategy." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.initialize_parameters:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of +msgid "Initialize global model parameters using given strategy." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:6 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:3 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:3 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:3 +#: flwr.server.strategy.strategy.Strategy.configure_fit:3 +#: flwr.server.strategy.strategy.Strategy.evaluate:6 of +msgid "The current round of federated learning." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:7 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:10 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:7 +#: flwr.server.strategy.strategy.Strategy.configure_fit:7 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:3 of +msgid "The client manager which holds all currently connected clients." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:10 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:10 of +msgid "" +"**evaluate_configuration** -- A list of tuples. Each tuple in the list " +"identifies a `ClientProxy` and the `EvaluateIns` for this particular " +"`ClientProxy`. If a particular `ClientProxy` is not included in this " +"list, it means that this `ClientProxy` will not participate in the next " +"round of federated evaluation." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 +msgid "DPFedAvgFixed" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 +#: flwr.server.strategy.fedavg.FedAvg:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_fit:1 of +msgid "Aggregate training results using unweighted aggregation." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:1 of +msgid "" +"Configure the next round of training incorporating Differential Privacy " +"(DP)." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:3 of +msgid "" +"Configuration of the next training round includes information related to " +"DP, such as clip norm and noise stddev." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:13 +#: flwr.server.strategy.strategy.Strategy.configure_fit:10 of +msgid "" +"**fit_configuration** -- A list of tuples. Each tuple in the list " +"identifies a `ClientProxy` and the `FitIns` for this particular " +"`ClientProxy`. If a particular `ClientProxy` is not included in this " +"list, it means that this `ClientProxy` will not participate in the next " +"round of federated learning." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:2 +msgid "DifferentialPrivacyClientSideAdaptiveClipping" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:3 +#: of +msgid "Use `adaptiveclipping_mod` modifier at the client side." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:5 +#: of +msgid "" +"In comparison to `DifferentialPrivacyServerSideAdaptiveClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideAdaptiveClipping` expects clipping to " +"happen on the client-side, usually by using the built-in " +"`adaptiveclipping_mod`." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:10 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:3 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:10 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:3 +#: of +msgid "The strategy to which DP functionalities will be added by this wrapper." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:12 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:5 +#: of +msgid "The noise multiplier for the Gaussian mechanism for model updates." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:14 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:7 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:17 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:10 +#: of +msgid "The number of clients that are sampled on each round." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:16 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:9 +#: of +msgid "" +"The initial value of clipping norm. Defaults to 0.1. Andrew et al. " +"recommends to set to 0.1." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:19 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:12 +#: of +msgid "The desired quantile of updates which should be clipped. Defaults to 0.5." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:21 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:14 +#: of +msgid "" +"The learning rate for the clipping norm adaptation. Defaults to 0.2. " +"Andrew et al. recommends to set to 0.2." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:24 +#: of +msgid "" +"The stddev of the noise added to the count of updates currently below the" +" estimate. Andrew et al. recommends to set to `expected_num_records/20`" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:30 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:23 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:15 +#: of +msgid "Create a strategy:" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:34 +#: of +msgid "" +"Wrap the strategy with the " +"`DifferentialPrivacyClientSideAdaptiveClipping` wrapper:" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:40 +#: of +msgid "On the client, add the `adaptiveclipping_mod` to the client-side mods:" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_fit:1 +#: of +msgid "Aggregate training results and update clip norms." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:2 +msgid "DifferentialPrivacyClientSideFixedClipping" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:3 +#: of +msgid "Use `fixedclipping_mod` modifier at the client side." +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:5 +#: of +msgid "" +"In comparison to `DifferentialPrivacyServerSideFixedClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideFixedClipping` expects clipping to happen " +"on the client-side, usually by using the built-in `fixedclipping_mod`." +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:12 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:5 +#: of +msgid "" +"The noise multiplier for the Gaussian mechanism for model updates. A " +"value of 1.0 or higher is recommended for strong privacy." +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:15 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:8 +#: of +msgid "The value of the clipping norm." +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:26 +#: of +msgid "" +"Wrap the strategy with the `DifferentialPrivacyClientSideFixedClipping` " +"wrapper:" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:32 +#: of +msgid "On the client, add the `fixedclipping_mod` to the client-side mods:" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_fit:1 +#: of +msgid "Add noise to the aggregated parameters." +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:2 +msgid "DifferentialPrivacyServerSideAdaptiveClipping" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:17 +#: of +msgid "" +"The standard deviation of the noise added to the count of updates below " +"the estimate. Andrew et al. recommends to set to " +"`expected_num_records/20`" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:27 +#: of +msgid "" +"Wrap the strategy with the DifferentialPrivacyServerSideAdaptiveClipping " +"wrapper" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:2 +msgid "DifferentialPrivacyServerSideFixedClipping" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:19 +#: of +msgid "" +"Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping " +"wrapper" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:1 +#: of +msgid "Compute the updates, clip, and pass them for aggregation." +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:3 +#: of +msgid "Afterward, add noise to the aggregated parameters." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 +msgid "FaultTolerantFedAvg" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedadagrad.FedAdagrad.aggregate_fit:1 +#: flwr.server.strategy.fedadam.FedAdam.aggregate_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_fit:1 +#: flwr.server.strategy.fedavgm.FedAvgM.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.aggregate_fit:1 +#: flwr.server.strategy.fedyogi.FedYogi.aggregate_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using weighted average." +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 +#: ../../source/ref-changelog.md:905 +msgid "FedAdagrad" +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:1 +#: flwr.server.strategy.fedadam.FedAdam:1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:3 +#: flwr.server.strategy.fedadam.FedAdam:3 flwr.server.strategy.fedopt.FedOpt:3 +#: flwr.server.strategy.fedyogi.FedYogi:3 of +msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad:23 +#: flwr.server.strategy.fedadam.FedAdam:25 +#: flwr.server.strategy.fedadam.FedAdam:27 +#: flwr.server.strategy.fedavg.FedAvg:29 flwr.server.strategy.fedavg.FedAvg:31 +#: flwr.server.strategy.fedopt.FedOpt:25 flwr.server.strategy.fedopt.FedOpt:27 +#: flwr.server.strategy.fedprox.FedProx:61 +#: flwr.server.strategy.fedprox.FedProx:63 +#: flwr.server.strategy.fedyogi.FedYogi:28 +#: flwr.server.strategy.fedyogi.FedYogi:30 of +msgid "Metrics aggregation function, optional." +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:29 +#: flwr.server.strategy.fedadam.FedAdam:29 +#: flwr.server.strategy.fedopt.FedOpt:29 of +msgid "Server-side learning rate. Defaults to 1e-1." +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:31 +#: flwr.server.strategy.fedadam.FedAdam:31 +#: flwr.server.strategy.fedopt.FedOpt:31 of +msgid "Client-side learning rate. Defaults to 1e-1." +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:33 +#: flwr.server.strategy.fedadam.FedAdam:37 +#: flwr.server.strategy.fedopt.FedOpt:37 of +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\" +" \\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:2 +msgid "FedAdam" +msgstr "" + +#: flwr.server.strategy.fedadam.FedAdam:33 +#: flwr.server.strategy.fedyogi.FedYogi:36 of +msgid "Momentum parameter. Defaults to 0.9." +msgstr "" + +#: flwr.server.strategy.fedadam.FedAdam:35 +#: flwr.server.strategy.fedyogi.FedYogi:38 of +msgid "Second moment parameter. Defaults to 0.99." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:2 +msgid "FedAvg" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg:3 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:3 of +msgid "Implementation based on https://arxiv.org/abs/1602.05629" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg:5 flwr.server.strategy.fedprox.FedProx:37 +#: of +msgid "" +"Fraction of clients used during training. In case `min_fit_clients` is " +"larger than `fraction_fit * available_clients`, `min_fit_clients` will " +"still be sampled. Defaults to 1.0." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg:9 flwr.server.strategy.fedprox.FedProx:41 +#: of +msgid "" +"Fraction of clients used during validation. In case " +"`min_evaluate_clients` is larger than `fraction_evaluate * " +"available_clients`, `min_evaluate_clients` will still be sampled. " +"Defaults to 1.0." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg:33 of +msgid "Enable (True) or disable (False) in-place aggregation of model updates." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:2 +msgid "FedAvgAndroid" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`bytes_to_ndarray " +"`\\ \\(tensor\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.bytes_to_ndarray:1 of +msgid "Deserialize NumPy array from bytes." +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`ndarray_to_bytes " +"`\\ \\(ndarray\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarray_to_bytes:1 of +msgid "Serialize NumPy array to bytes." +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`ndarrays_to_parameters " +"`\\ " +"\\(ndarrays\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`parameters_to_ndarrays " +"`\\ " +"\\(parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.parameters_to_ndarrays:1 +#: of +msgid "Convert parameters object to NumPy weights." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:2 +msgid "FedAvgM" +msgstr "" + +#: flwr.server.strategy.fedavgm.FedAvgM:3 of +msgid "Implementation based on https://arxiv.org/abs/1909.06335" +msgstr "" + +#: flwr.server.strategy.fedavgm.FedAvgM:25 of +msgid "" +"Server-side learning rate used in server-side optimization. Defaults to " +"1.0." +msgstr "" + +#: flwr.server.strategy.fedavgm.FedAvgM:28 of +msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:2 +msgid "FedMedian" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of +msgid "Aggregate fit results using median." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:2 +msgid "FedOpt" +msgstr "" + +#: flwr.server.strategy.fedopt.FedOpt:33 of +msgid "Momentum parameter. Defaults to 0.0." +msgstr "" + +#: flwr.server.strategy.fedopt.FedOpt:35 of +msgid "Second moment parameter. Defaults to 0.0." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:2 +msgid "FedProx" +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx:3 of +msgid "Implementation based on https://arxiv.org/abs/1812.06127" +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx:5 of +msgid "" +"The strategy in itself will not be different than FedAvg, the client " +"needs to be adjusted. A proximal term needs to be added to the loss " +"function during the training:" +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx:9 of +msgid "" +"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" +"\n" +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx:12 of +msgid "" +"Where $w^t$ are the global parameters and $w$ are the local weights the " +"function will be optimized with." +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx:15 of +msgid "In PyTorch, for example, the loss would go from:" +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx:21 of +msgid "To:" +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx:30 of +msgid "" +"With `global_params` being a copy of the parameters before the training " +"takes place." +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx:65 of +msgid "" +"The weight of the proximal term used in the optimization. 0.0 makes this " +"strategy equivalent to FedAvg, and the higher the coefficient, the more " +"regularization will be used (that is, the client parameters will need to " +"be closer to the server parameters during training)." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of +msgid "Sends the proximal factor mu to the clients" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:2 +msgid "FedTrimmedAvg" +msgstr "" + +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:3 of +msgid "Implemented based on: https://arxiv.org/abs/1803.01498" +msgstr "" + +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:25 of +msgid "Fraction to cut off of both tails of the distribution. Defaults to 0.2." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using trimmed average." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:2 +msgid "FedXgbBagging" +msgstr "" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "Aggregate evaluation metrics using average." +msgstr "" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_fit:1 of +msgid "Aggregate fit results using bagging." +msgstr "" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:2 +msgid "FedXgbCyclic" +msgstr "" + +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\," +" results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:2 +msgid "FedXgbNnAvg" +msgstr "" + +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:5 of +msgid "" +"This strategy is deprecated, but a copy of it is available in Flower " +"Baselines: " +"https://github.com/adap/flower/tree/main/baselines/hfedxgboost." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\, " +"results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:2 +msgid "FedYogi" +msgstr "" + +#: flwr.server.strategy.fedyogi.FedYogi:32 of +msgid "Server-side learning rate. Defaults to 1e-2." +msgstr "" + +#: flwr.server.strategy.fedyogi.FedYogi:34 of +msgid "Client-side learning rate. Defaults to 0.0316." +msgstr "" + +#: flwr.server.strategy.fedyogi.FedYogi:40 of +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-3." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.Krum.rst:2 +msgid "Krum" +msgstr "" + +#: flwr.server.strategy.krum.Krum:3 of +msgid "Implementation based on https://arxiv.org/abs/1703.02757" +msgstr "" + +#: flwr.server.strategy.krum.Krum:17 of +msgid "" +"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in" +" that case classical Krum is applied." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.krum.Krum.aggregate_fit:1 of +msgid "Aggregate fit results using Krum." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients `\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:2 +msgid "QFedAvg" +msgstr "" + +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:2 +msgid "Strategy" +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "Aggregate evaluation results." +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of +msgid "Aggregate training results." +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.evaluate:1 of +msgid "Evaluate the current model parameters." +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:1 of +msgid "Initialize the (global) model parameters." +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:5 of +msgid "" +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes` constitutes a successful update from " +"one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:13 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:13 of +msgid "Exceptions that occurred while the server was waiting for client updates." +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:16 of +msgid "" +"**aggregation_result** -- The aggregated evaluation result. Aggregation " +"typically uses some variant of a weighted average." +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:5 of +msgid "" +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes)` constitutes a successful update from" +" one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:17 of +msgid "" +"**parameters** -- If parameters are returned, then the server will treat " +"these as the new global model parameters (i.e., it will replace the " +"previous parameters with the ones returned from this method). If `None` " +"is returned (e.g., because there were only failures and no viable " +"results) then the server will no update the previous model parameters, " +"the updates received in this round are discarded, and the global model " +"parameters remain the same." +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.evaluate:3 of +msgid "" +"This function can be used to perform centralized (i.e., server-side) " +"evaluation of model parameters." +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.evaluate:11 of +msgid "" +"**evaluation_result** -- The evaluation result, usually a Tuple " +"containing loss and a dictionary containing task-specific metrics (e.g., " +"accuracy)." +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:6 of +msgid "" +"**parameters** -- If parameters are returned, then the server will treat " +"these as the initial global model parameters." +msgstr "" + +#: ../../source/ref-api/flwr.server.workflow.rst:2 +msgid "workflow" +msgstr "" + +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +msgid "" +":py:obj:`DefaultWorkflow `\\ " +"\\(\\[fit\\_workflow\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 of +msgid "Default workflow in Flower." +msgstr "" + +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +msgid "" +":py:obj:`SecAggPlusWorkflow `\\ " +"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of +msgid "The workflow for the SecAgg+ protocol." +msgstr "" + +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +msgid "" +":py:obj:`SecAggWorkflow `\\ " +"\\(reconstruction\\_threshold\\, \\*\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +msgid "The workflow for the SecAgg protocol." +msgstr "" + +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:2 +msgid "DefaultWorkflow" +msgstr "" + +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:2 +msgid "SecAggPlusWorkflow" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:3 +#: of +msgid "" +"The SecAgg+ protocol ensures the secure summation of integer vectors " +"owned by multiple parties, without accessing any individual integer " +"vector. This workflow allows the server to compute the weighted average " +"of model parameters across all clients, ensuring individual contributions" +" remain private. This is achieved by clients sending both, a weighting " +"factor and a weighted version of the locally updated parameters, both of " +"which are masked for privacy. Specifically, each client uploads \"[w, w *" +" params]\" with masks, where weighting factor 'w' is the number of " +"examples ('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:14 +#: of +msgid "" +"The protocol involves four main stages: - 'setup': Send SecAgg+ " +"configuration to clients and collect their public keys. - 'share keys': " +"Broadcast public keys among clients and collect encrypted secret" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:17 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:17 +#: of +msgid "key shares." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:18 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:18 +#: of +msgid "" +"'collect masked vectors': Forward encrypted secret key shares to target " +"clients and collect masked model parameters." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:20 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:20 +#: of +msgid "" +"'unmask': Collect secret key shares to decrypt and aggregate the model " +"parameters." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:22 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:22 +#: of +msgid "" +"Only the aggregated model parameters are exposed and passed to " +"`Strategy.aggregate_fit`, ensuring individual data privacy." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:25 +#: of +msgid "" +"The number of shares into which each client's private key is split under " +"the SecAgg+ protocol. If specified as a float, it represents the " +"proportion of all selected clients, and the number of shares will be set " +"dynamically in the run time. A private key can be reconstructed from " +"these shares, allowing for the secure aggregation of model updates. Each " +"client sends one share to each of its neighbors while retaining one." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:25 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:32 +#: of +msgid "" +"The minimum number of shares required to reconstruct a client's private " +"key, or, if specified as a float, it represents the proportion of the " +"total number of shares needed for reconstruction. This threshold ensures " +"privacy by allowing for the recovery of contributions from dropped " +"clients during aggregation, without compromising individual client data." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:31 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:38 +#: of +msgid "" +"The maximum value of the weight that can be assigned to any single " +"client's update during the weighted average calculation on the server " +"side, e.g., in the FedAvg algorithm." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:35 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:42 +#: of +msgid "" +"The range within which model parameters are clipped before quantization. " +"This parameter ensures each model parameter is bounded within " +"[-clipping_range, clipping_range], facilitating quantization." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:39 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:46 +#: of +msgid "" +"The size of the range into which floating-point model parameters are " +"quantized, mapping each parameter to an integer in [0, " +"quantization_range-1]. This facilitates cryptographic operations on the " +"model updates." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:43 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:50 +#: of +msgid "" +"The range of values from which random mask entries are uniformly sampled " +"([0, modulus_range-1]). `modulus_range` must be less than 4294967296. " +"Please use 2**n values for `modulus_range` to prevent overflow issues." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:47 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:54 +#: of +msgid "" +"The timeout duration in seconds. If specified, the workflow will wait for" +" replies for this duration each time. If `None`, there is no time limit " +"and the workflow will wait until replies for all messages are received." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:61 +#: of +msgid "" +"Generally, higher `num_shares` means more robust to dropouts while " +"increasing the computational costs; higher `reconstruction_threshold` " +"means better privacy guarantees but less tolerance to dropouts." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:58 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:64 +#: of +msgid "Too large `max_weight` may compromise the precision of the quantization." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:59 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:65 +#: of +msgid "`modulus_range` must be 2**n and larger than `quantization_range`." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:66 +#: of +msgid "" +"When `num_shares` is a float, it is interpreted as the proportion of all " +"selected clients, and hence the number of shares will be determined in " +"the runtime. This allows for dynamic adjustment based on the total number" +" of participating clients." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:69 +#: of +msgid "" +"Similarly, when `reconstruction_threshold` is a float, it is interpreted " +"as the proportion of the number of shares needed for the reconstruction " +"of a private key. This feature enables flexibility in setting the " +"security threshold relative to the number of distributed shares." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:73 +#: of +msgid "" +"`num_shares`, `reconstruction_threshold`, and the quantization parameters" +" (`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg+" +" protocol." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`collect_masked_vectors_stage " +"`\\" +" \\(driver\\, ...\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "Execute the 'collect masked vectors' stage." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`setup_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.setup_stage:1 +#: of +msgid "Execute the 'setup' stage." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`share_keys_stage " +"`\\ " +"\\(driver\\, context\\, state\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.share_keys_stage:1 +#: of +msgid "Execute the 'share keys' stage." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.unmask_stage:1 +#: of +msgid "Execute the 'unmask' stage." +msgstr "" + +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:2 +msgid "SecAggWorkflow" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +msgid "" +"Bases: " +":py:class:`~flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow`" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:3 of +msgid "" +"The SecAgg protocol ensures the secure summation of integer vectors owned" +" by multiple parties, without accessing any individual integer vector. " +"This workflow allows the server to compute the weighted average of model " +"parameters across all clients, ensuring individual contributions remain " +"private. This is achieved by clients sending both, a weighting factor and" +" a weighted version of the locally updated parameters, both of which are " +"masked for privacy. Specifically, each client uploads \"[w, w * params]\"" +" with masks, where weighting factor 'w' is the number of examples " +"('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:14 of +msgid "" +"The protocol involves four main stages: - 'setup': Send SecAgg " +"configuration to clients and collect their public keys. - 'share keys': " +"Broadcast public keys among clients and collect encrypted secret" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:54 of +msgid "" +"Each client's private key is split into N shares under the SecAgg " +"protocol, where N is the number of selected clients." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:56 of +msgid "" +"Generally, higher `reconstruction_threshold` means better privacy " +"guarantees but less tolerance to dropouts." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:60 of +msgid "" +"When `reconstruction_threshold` is a float, it is interpreted as the " +"proportion of the number of all selected clients needed for the " +"reconstruction of a private key. This feature enables flexibility in " +"setting the security threshold relative to the number of selected " +"clients." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:64 of +msgid "" +"`reconstruction_threshold`, and the quantization parameters " +"(`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg " +"protocol." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`collect_masked_vectors_stage " +"`\\ " +"\\(driver\\, ...\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`setup_stage `\\" +" \\(driver\\, context\\, state\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`share_keys_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" +msgstr "" + +#: ../../source/ref-api/flwr.simulation.rst:2 +msgid "simulation" +msgstr "" + +#: ../../source/ref-api/flwr.simulation.rst:18::1 +msgid "" +":py:obj:`start_simulation `\\ \\(\\*\\," +" client\\_fn\\[\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.simulation.app.start_simulation:1 of +msgid "Start a Ray-based Flower simulation server." +msgstr "" + +#: ../../source/ref-api/flwr.simulation.rst:18::1 +msgid "" +":py:obj:`run_simulation `\\ " +"\\(server\\_app\\, client\\_app\\, ...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.simulation.run_simulation.run_simulation:1 of +msgid "Run a Flower App using the Simulation Engine." +msgstr "" + +#: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 +msgid "run\\_simulation" +msgstr "" + +#: flwr.simulation.run_simulation.run_simulation:3 of +msgid "" +"The `ServerApp` to be executed. It will send messages to different " +"`ClientApp` instances running on different (virtual) SuperNodes." +msgstr "" + +#: flwr.simulation.run_simulation.run_simulation:6 of +msgid "" +"The `ClientApp` to be executed by each of the SuperNodes. It will receive" +" messages sent by the `ServerApp`." +msgstr "" + +#: flwr.simulation.run_simulation.run_simulation:9 of +msgid "" +"Number of nodes that run a ClientApp. They can be sampled by a Driver in " +"the ServerApp and receive a Message describing what the ClientApp should " +"perform." +msgstr "" + +#: flwr.simulation.run_simulation.run_simulation:13 of +msgid "A simulation backend that runs `ClientApp`s." +msgstr "" + +#: flwr.simulation.run_simulation.run_simulation:15 of +msgid "" +"'A dictionary, e.g {\"\": , \"\": } to " +"configure a backend. Values supported in are those included by " +"`flwr.common.typing.ConfigsRecordValues`." +msgstr "" + +#: flwr.simulation.run_simulation.run_simulation:19 of +msgid "" +"A boolean to indicate whether to enable GPU growth on the main thread. " +"This is desirable if you make use of a TensorFlow model on your " +"`ServerApp` while having your `ClientApp` running on the same GPU. " +"Without enabling this, you might encounter an out-of-memory error because" +" TensorFlow, by default, allocates all GPU memory. Read more about how " +"`tf.config.experimental.set_memory_growth()` works in the TensorFlow " +"documentation: https://www.tensorflow.org/api/stable." +msgstr "" + +#: flwr.simulation.run_simulation.run_simulation:26 of +msgid "" +"When diabled, only INFO, WARNING and ERROR log messages will be shown. If" +" enabled, DEBUG-level logs will be displayed." +msgstr "" + +#: ../../source/ref-api/flwr.simulation.run_simulation_from_cli.rst:2 +msgid "run\\_simulation\\_from\\_cli" +msgstr "" + +#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 +msgid "start\\_simulation" +msgstr "" + +#: flwr.simulation.app.start_simulation:3 of +msgid "" +"A function creating client instances. The function must take a single " +"`str` argument called `cid`. It should return a single client instance of" +" type Client. Note that the created client instances are ephemeral and " +"will often be destroyed after a single method invocation. Since client " +"instances are not long-lived, they should not attempt to carry state over" +" method invocations. Any state required by the instance (model, dataset, " +"hyperparameters, ...) should be (re-)created in either the call to " +"`client_fn` or the call to any of the client methods (e.g., load " +"evaluation data in the `evaluate` method itself)." +msgstr "" + +#: flwr.simulation.app.start_simulation:13 of +msgid "" +"The total number of clients in this simulation. This must be set if " +"`clients_ids` is not set and vice-versa." +msgstr "" + +#: flwr.simulation.app.start_simulation:16 of +msgid "" +"List `client_id`s for each client. This is only required if `num_clients`" +" is not set. Setting both `num_clients` and `clients_ids` with " +"`len(clients_ids)` not equal to `num_clients` generates an error." +msgstr "" + +#: flwr.simulation.app.start_simulation:20 of +msgid "" +"CPU and GPU resources for a single client. Supported keys are `num_cpus` " +"and `num_gpus`. To understand the GPU utilization caused by `num_gpus`, " +"as well as using custom resources, please consult the Ray documentation." +msgstr "" + +#: flwr.simulation.app.start_simulation:25 of +msgid "" +"An implementation of the abstract base class `flwr.server.Server`. If no " +"instance is provided, then `start_server` will create one." +msgstr "" + +#: flwr.simulation.app.start_simulation:31 of +msgid "" +"An implementation of the abstract base class `flwr.server.Strategy`. If " +"no strategy is provided, then `start_server` will use " +"`flwr.server.strategy.FedAvg`." +msgstr "" + +#: flwr.simulation.app.start_simulation:35 of +msgid "" +"An implementation of the abstract base class `flwr.server.ClientManager`." +" If no implementation is provided, then `start_simulation` will use " +"`flwr.server.client_manager.SimpleClientManager`." +msgstr "" + +#: flwr.simulation.app.start_simulation:39 of +msgid "" +"Optional dictionary containing arguments for the call to `ray.init`. If " +"ray_init_args is None (the default), Ray will be initialized with the " +"following default args: { \"ignore_reinit_error\": True, " +"\"include_dashboard\": False } An empty dictionary can be used " +"(ray_init_args={}) to prevent any arguments from being passed to " +"ray.init." +msgstr "" + +#: flwr.simulation.app.start_simulation:39 of +msgid "" +"Optional dictionary containing arguments for the call to `ray.init`. If " +"ray_init_args is None (the default), Ray will be initialized with the " +"following default args:" +msgstr "" + +#: flwr.simulation.app.start_simulation:43 of +msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +msgstr "" + +#: flwr.simulation.app.start_simulation:45 of +msgid "" +"An empty dictionary can be used (ray_init_args={}) to prevent any " +"arguments from being passed to ray.init." +msgstr "" + +#: flwr.simulation.app.start_simulation:48 of +msgid "" +"Set to True to prevent `ray.shutdown()` in case " +"`ray.is_initialized()=True`." +msgstr "" + +#: flwr.simulation.app.start_simulation:50 of +msgid "" +"Optionally specify the type of actor to use. The actor object, which " +"persists throughout the simulation, will be the process in charge of " +"executing a ClientApp wrapping input argument `client_fn`." +msgstr "" + +#: flwr.simulation.app.start_simulation:54 of +msgid "" +"If you want to create your own Actor classes, you might need to pass some" +" input argument. You can use this dictionary for such purpose." +msgstr "" + +#: flwr.simulation.app.start_simulation:57 of +msgid "" +"(default: \"DEFAULT\") Optional string (\"DEFAULT\" or \"SPREAD\") for " +"the VCE to choose in which node the actor is placed. If you are an " +"advanced user needed more control you can use lower-level scheduling " +"strategies to pin actors to specific compute nodes (e.g. via " +"NodeAffinitySchedulingStrategy). Please note this is an advanced feature." +" For all details, please refer to the Ray documentation: " +"https://docs.ray.io/en/latest/ray-core/scheduling/index.html" +msgstr "" + +#: flwr.simulation.app.start_simulation:66 of +msgid "**hist** -- Object containing metrics from training." +msgstr "" + +#: ../../source/ref-changelog.md:1 +msgid "Changelog" +msgstr "" + +#: ../../source/ref-changelog.md:3 +msgid "Unreleased" +msgstr "" + +#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:19 +#: ../../source/ref-changelog.md:83 ../../source/ref-changelog.md:176 +#: ../../source/ref-changelog.md:276 ../../source/ref-changelog.md:360 +#: ../../source/ref-changelog.md:424 ../../source/ref-changelog.md:482 +#: ../../source/ref-changelog.md:551 ../../source/ref-changelog.md:680 +#: ../../source/ref-changelog.md:722 ../../source/ref-changelog.md:789 +#: ../../source/ref-changelog.md:855 ../../source/ref-changelog.md:900 +#: ../../source/ref-changelog.md:939 ../../source/ref-changelog.md:972 +#: ../../source/ref-changelog.md:1022 +msgid "What's new?" +msgstr "" + +#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:71 +#: ../../source/ref-changelog.md:146 ../../source/ref-changelog.md:258 +#: ../../source/ref-changelog.md:348 ../../source/ref-changelog.md:412 +#: ../../source/ref-changelog.md:470 ../../source/ref-changelog.md:539 +#: ../../source/ref-changelog.md:601 ../../source/ref-changelog.md:620 +#: ../../source/ref-changelog.md:776 ../../source/ref-changelog.md:847 +#: ../../source/ref-changelog.md:884 ../../source/ref-changelog.md:927 +msgid "Incompatible changes" +msgstr "" + +#: ../../source/ref-changelog.md:9 ../../source/ref-changelog.md:73 +#: ../../source/ref-changelog.md:350 ../../source/ref-changelog.md:414 +#: ../../source/ref-changelog.md:472 ../../source/ref-changelog.md:541 +#: ../../source/ref-changelog.md:603 +msgid "None" +msgstr "" + +#: ../../source/ref-changelog.md:11 +msgid "v1.8.0 (2024-04-03)" +msgstr "" + +#: ../../source/ref-changelog.md:13 ../../source/ref-changelog.md:77 +#: ../../source/ref-changelog.md:170 ../../source/ref-changelog.md:270 +#: ../../source/ref-changelog.md:354 ../../source/ref-changelog.md:418 +#: ../../source/ref-changelog.md:476 ../../source/ref-changelog.md:545 +#: ../../source/ref-changelog.md:614 +msgid "Thanks to our contributors" +msgstr "" + +#: ../../source/ref-changelog.md:15 ../../source/ref-changelog.md:79 +#: ../../source/ref-changelog.md:172 ../../source/ref-changelog.md:272 +#: ../../source/ref-changelog.md:356 ../../source/ref-changelog.md:420 +#: ../../source/ref-changelog.md:478 +msgid "" +"We would like to give our special thanks to all the contributors who made" +" the new version of Flower possible (in `git shortlog` order):" +msgstr "" + +#: ../../source/ref-changelog.md:17 +msgid "" +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata " +"Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear " +"Ashimine`, `Jack Cook`, `Javier`, `Raj Parekh`, `Robert Steiner`, " +"`Sebastian van der Voort`, `Taner Topal`, `Yan Gao`, `mohammadnaseri`, " +"`tabdar-khan` " +msgstr "" + +#: ../../source/ref-changelog.md:21 +msgid "" +"**Introduce Flower Next high-level API (stable)** " +"([#3002](https://github.com/adap/flower/pull/3002), " +"[#2934](https://github.com/adap/flower/pull/2934), " +"[#2958](https://github.com/adap/flower/pull/2958), " +"[#3173](https://github.com/adap/flower/pull/3173), " +"[#3174](https://github.com/adap/flower/pull/3174), " +"[#2923](https://github.com/adap/flower/pull/2923), " +"[#2691](https://github.com/adap/flower/pull/2691), " +"[#3079](https://github.com/adap/flower/pull/3079), " +"[#2961](https://github.com/adap/flower/pull/2961), " +"[#2924](https://github.com/adap/flower/pull/2924), " +"[#3166](https://github.com/adap/flower/pull/3166), " +"[#3031](https://github.com/adap/flower/pull/3031), " +"[#3057](https://github.com/adap/flower/pull/3057), " +"[#3000](https://github.com/adap/flower/pull/3000), " +"[#3113](https://github.com/adap/flower/pull/3113), " +"[#2957](https://github.com/adap/flower/pull/2957), " +"[#3183](https://github.com/adap/flower/pull/3183), " +"[#3180](https://github.com/adap/flower/pull/3180), " +"[#3035](https://github.com/adap/flower/pull/3035), " +"[#3189](https://github.com/adap/flower/pull/3189), " +"[#3185](https://github.com/adap/flower/pull/3185), " +"[#3190](https://github.com/adap/flower/pull/3190), " +"[#3191](https://github.com/adap/flower/pull/3191), " +"[#3195](https://github.com/adap/flower/pull/3195), " +"[#3197](https://github.com/adap/flower/pull/3197))" +msgstr "" + +#: ../../source/ref-changelog.md:23 +msgid "" +"The Flower Next high-level API is stable! Flower Next is the future of " +"Flower - all new features (like Flower Mods) will be built on top of it. " +"You can start to migrate your existing projects to Flower Next by using " +"`ServerApp` and `ClientApp` (check out `quickstart-pytorch` or " +"`quickstart-tensorflow`, a detailed migration guide will follow shortly)." +" Flower Next allows you to run multiple projects concurrently (we call " +"this multi-run) and execute the same project in either simulation " +"environments or deployment environments without having to change a single" +" line of code. The best part? It's fully compatible with existing Flower " +"projects that use `Strategy`, `NumPyClient` & co." +msgstr "" + +#: ../../source/ref-changelog.md:25 +msgid "" +"**Introduce Flower Next low-level API (preview)** " +"([#3062](https://github.com/adap/flower/pull/3062), " +"[#3034](https://github.com/adap/flower/pull/3034), " +"[#3069](https://github.com/adap/flower/pull/3069))" +msgstr "" + +#: ../../source/ref-changelog.md:27 +msgid "" +"In addition to the Flower Next *high-level* API that uses `Strategy`, " +"`NumPyClient` & co, Flower 1.8 also comes with a preview version of the " +"new Flower Next *low-level* API. The low-level API allows for granular " +"control of every aspect of the learning process by sending/receiving " +"individual messages to/from client nodes. The new `ServerApp` supports " +"registering a custom `main` function that allows writing custom training " +"loops for methods like async FL, cyclic training, or federated analytics." +" The new `ClientApp` supports registering `train`, `evaluate` and `query`" +" functions that can access the raw message received from the `ServerApp`." +" New abstractions like `RecordSet`, `Message` and `Context` further " +"enable sending multiple models, multiple sets of config values and " +"metrics, stateful computations on the client node and implementations of " +"custom SMPC protocols, to name just a few." +msgstr "" + +#: ../../source/ref-changelog.md:29 +msgid "" +"**Introduce Flower Mods (preview)** " +"([#3054](https://github.com/adap/flower/pull/3054), " +"[#2911](https://github.com/adap/flower/pull/2911), " +"[#3083](https://github.com/adap/flower/pull/3083))" +msgstr "" + +#: ../../source/ref-changelog.md:31 +msgid "" +"Flower Modifiers (we call them Mods) can intercept messages and analyze, " +"edit or handle them directly. Mods can be used to develop pluggable " +"modules that work across different projects. Flower 1.8 already includes " +"mods to log the size of a message, the number of parameters sent over the" +" network, differential privacy with fixed clipping and adaptive clipping," +" local differential privacy and secure aggregation protocols SecAgg and " +"SecAgg+. The Flower Mods API is released as a preview, but researchers " +"can already use it to experiment with arbirtrary SMPC protocols." +msgstr "" + +#: ../../source/ref-changelog.md:33 +msgid "" +"**Fine-tune LLMs with LLM FlowerTune** " +"([#3029](https://github.com/adap/flower/pull/3029), " +"[#3089](https://github.com/adap/flower/pull/3089), " +"[#3092](https://github.com/adap/flower/pull/3092), " +"[#3100](https://github.com/adap/flower/pull/3100), " +"[#3114](https://github.com/adap/flower/pull/3114), " +"[#3162](https://github.com/adap/flower/pull/3162), " +"[#3172](https://github.com/adap/flower/pull/3172))" +msgstr "" + +#: ../../source/ref-changelog.md:35 +msgid "" +"We are introducing LLM FlowerTune, an introductory example that " +"demonstrates federated LLM fine-tuning of pre-trained Llama2 models on " +"the Alpaca-GPT4 dataset. The example is built to be easily adapted to use" +" different models and/or datasets. Read our blog post [LLM FlowerTune: " +"Federated LLM Fine-tuning with Flower](https://flower.ai/blog/2024-03-14" +"-llm-flowertune-federated-llm-finetuning-with-flower/) for more details." +msgstr "" + +#: ../../source/ref-changelog.md:37 +msgid "" +"**Introduce built-in Differential Privacy (preview)** " +"([#2798](https://github.com/adap/flower/pull/2798), " +"[#2959](https://github.com/adap/flower/pull/2959), " +"[#3038](https://github.com/adap/flower/pull/3038), " +"[#3147](https://github.com/adap/flower/pull/3147), " +"[#2909](https://github.com/adap/flower/pull/2909), " +"[#2893](https://github.com/adap/flower/pull/2893), " +"[#2892](https://github.com/adap/flower/pull/2892), " +"[#3039](https://github.com/adap/flower/pull/3039), " +"[#3074](https://github.com/adap/flower/pull/3074))" +msgstr "" + +#: ../../source/ref-changelog.md:39 +msgid "" +"Built-in Differential Privacy is here! Flower supports both central and " +"local differential privacy (DP). Central DP can be configured with either" +" fixed or adaptive clipping. The clipping can happen either on the " +"server-side or the client-side. Local DP does both clipping and noising " +"on the client-side. A new documentation page [explains Differential " +"Privacy approaches](https://flower.ai/docs/framework/explanation-" +"differential-privacy.html) and a new how-to guide describes [how to use " +"the new Differential Privacy components](https://flower.ai/docs/framework" +"/how-to-use-differential-privacy.html) in Flower." +msgstr "" + +#: ../../source/ref-changelog.md:41 +msgid "" +"**Introduce built-in Secure Aggregation (preview)** " +"([#3120](https://github.com/adap/flower/pull/3120), " +"[#3110](https://github.com/adap/flower/pull/3110), " +"[#3108](https://github.com/adap/flower/pull/3108))" +msgstr "" + +#: ../../source/ref-changelog.md:43 +msgid "" +"Built-in Secure Aggregation is here! Flower now supports different secure" +" aggregation protocols out-of-the-box. The best part? You can add secure " +"aggregation to your Flower projects with only a few lines of code. In " +"this initial release, we inlcude support for SecAgg and SecAgg+, but more" +" protocols will be implemented shortly. We'll also add detailed docs that" +" explain secure aggregation and how to use it in Flower. You can already " +"check out the new code example that shows how to use Flower to easily " +"combine Federated Learning, Differential Privacy and Secure Aggregation " +"in the same project." +msgstr "" + +#: ../../source/ref-changelog.md:45 +msgid "" +"**Introduce** `flwr` **CLI (preview)** " +"([#2942](https://github.com/adap/flower/pull/2942), " +"[#3055](https://github.com/adap/flower/pull/3055), " +"[#3111](https://github.com/adap/flower/pull/3111), " +"[#3130](https://github.com/adap/flower/pull/3130), " +"[#3136](https://github.com/adap/flower/pull/3136), " +"[#3094](https://github.com/adap/flower/pull/3094), " +"[#3059](https://github.com/adap/flower/pull/3059), " +"[#3049](https://github.com/adap/flower/pull/3049), " +"[#3142](https://github.com/adap/flower/pull/3142))" +msgstr "" + +#: ../../source/ref-changelog.md:47 +msgid "" +"A new `flwr` CLI command allows creating new Flower projects (`flwr new`)" +" and then running them using the Simulation Engine (`flwr run`)." +msgstr "" + +#: ../../source/ref-changelog.md:49 +msgid "" +"**Introduce Flower Next Simulation Engine** " +"([#3024](https://github.com/adap/flower/pull/3024), " +"[#3061](https://github.com/adap/flower/pull/3061), " +"[#2997](https://github.com/adap/flower/pull/2997), " +"[#2783](https://github.com/adap/flower/pull/2783), " +"[#3184](https://github.com/adap/flower/pull/3184), " +"[#3075](https://github.com/adap/flower/pull/3075), " +"[#3047](https://github.com/adap/flower/pull/3047), " +"[#2998](https://github.com/adap/flower/pull/2998), " +"[#3009](https://github.com/adap/flower/pull/3009), " +"[#3008](https://github.com/adap/flower/pull/3008))" +msgstr "" + +#: ../../source/ref-changelog.md:51 +msgid "" +"The Flower Simulation Engine can now run Flower Next projects. For " +"notebook environments, there's also a new `run_simulation` function that " +"can run `ServerApp` and `ClientApp`." +msgstr "" + +#: ../../source/ref-changelog.md:53 +msgid "" +"**Handle SuperNode connection errors** " +"([#2969](https://github.com/adap/flower/pull/2969))" +msgstr "" + +#: ../../source/ref-changelog.md:55 +msgid "" +"A SuperNode will now try to reconnect indefinitely to the SuperLink in " +"case of connection errors. The arguments `--max-retries` and `--max-wait-" +"time` can now be passed to the `flower-client-app` command. `--max-" +"retries` will define the number of tentatives the client should make " +"before it gives up trying to reconnect to the SuperLink, and, `--max-" +"wait-time` defines the time before the SuperNode gives up trying to " +"reconnect to the SuperLink." +msgstr "" + +#: ../../source/ref-changelog.md:57 +msgid "" +"**General updates to Flower Baselines** " +"([#2904](https://github.com/adap/flower/pull/2904), " +"[#2482](https://github.com/adap/flower/pull/2482), " +"[#2985](https://github.com/adap/flower/pull/2985), " +"[#2968](https://github.com/adap/flower/pull/2968))" +msgstr "" + +#: ../../source/ref-changelog.md:59 +msgid "" +"There's a new [FedStar](https://flower.ai/docs/baselines/fedstar.html) " +"baseline. Several other baselined have been updated as well." +msgstr "" + +#: ../../source/ref-changelog.md:61 +msgid "" +"**Improve documentation and translations** " +"([#3050](https://github.com/adap/flower/pull/3050), " +"[#3044](https://github.com/adap/flower/pull/3044), " +"[#3043](https://github.com/adap/flower/pull/3043), " +"[#2986](https://github.com/adap/flower/pull/2986), " +"[#3041](https://github.com/adap/flower/pull/3041), " +"[#3046](https://github.com/adap/flower/pull/3046), " +"[#3042](https://github.com/adap/flower/pull/3042), " +"[#2978](https://github.com/adap/flower/pull/2978), " +"[#2952](https://github.com/adap/flower/pull/2952), " +"[#3167](https://github.com/adap/flower/pull/3167), " +"[#2953](https://github.com/adap/flower/pull/2953), " +"[#3045](https://github.com/adap/flower/pull/3045), " +"[#2654](https://github.com/adap/flower/pull/2654), " +"[#3082](https://github.com/adap/flower/pull/3082), " +"[#2990](https://github.com/adap/flower/pull/2990), " +"[#2989](https://github.com/adap/flower/pull/2989))" +msgstr "" + +#: ../../source/ref-changelog.md:63 +msgid "" +"As usual, we merged many smaller and larger improvements to the " +"documentation. A special thank you goes to [Sebastian van der " +"Voort](https://github.com/svdvoort) for landing a big documentation PR!" +msgstr "" + +#: ../../source/ref-changelog.md:65 +msgid "" +"**General updates to Flower Examples** " +"([3134](https://github.com/adap/flower/pull/3134), " +"[2996](https://github.com/adap/flower/pull/2996), " +"[2930](https://github.com/adap/flower/pull/2930), " +"[2967](https://github.com/adap/flower/pull/2967), " +"[2467](https://github.com/adap/flower/pull/2467), " +"[2910](https://github.com/adap/flower/pull/2910), " +"[#2918](https://github.com/adap/flower/pull/2918), " +"[#2773](https://github.com/adap/flower/pull/2773), " +"[#3063](https://github.com/adap/flower/pull/3063), " +"[#3116](https://github.com/adap/flower/pull/3116), " +"[#3117](https://github.com/adap/flower/pull/3117))" +msgstr "" + +#: ../../source/ref-changelog.md:67 +msgid "" +"Two new examples show federated training of a Vision Transformer (ViT) " +"and federated learning in a medical context using the popular MONAI " +"library. `quickstart-pytorch` and `quickstart-tensorflow` demonstrate the" +" new Flower Next `ServerApp` and `ClientApp`. Many other examples " +"received considerable updates as well." +msgstr "" + +#: ../../source/ref-changelog.md:69 +msgid "" +"**General improvements** " +"([#3171](https://github.com/adap/flower/pull/3171), " +"[3099](https://github.com/adap/flower/pull/3099), " +"[3003](https://github.com/adap/flower/pull/3003), " +"[3145](https://github.com/adap/flower/pull/3145), " +"[3017](https://github.com/adap/flower/pull/3017), " +"[3085](https://github.com/adap/flower/pull/3085), " +"[3012](https://github.com/adap/flower/pull/3012), " +"[3119](https://github.com/adap/flower/pull/3119), " +"[2991](https://github.com/adap/flower/pull/2991), " +"[2970](https://github.com/adap/flower/pull/2970), " +"[2980](https://github.com/adap/flower/pull/2980), " +"[3086](https://github.com/adap/flower/pull/3086), " +"[2932](https://github.com/adap/flower/pull/2932), " +"[2928](https://github.com/adap/flower/pull/2928), " +"[2941](https://github.com/adap/flower/pull/2941), " +"[2933](https://github.com/adap/flower/pull/2933), " +"[3181](https://github.com/adap/flower/pull/3181), " +"[2973](https://github.com/adap/flower/pull/2973), " +"[2992](https://github.com/adap/flower/pull/2992), " +"[2915](https://github.com/adap/flower/pull/2915), " +"[3040](https://github.com/adap/flower/pull/3040), " +"[3022](https://github.com/adap/flower/pull/3022), " +"[3032](https://github.com/adap/flower/pull/3032), " +"[2902](https://github.com/adap/flower/pull/2902), " +"[2931](https://github.com/adap/flower/pull/2931), " +"[3005](https://github.com/adap/flower/pull/3005), " +"[3132](https://github.com/adap/flower/pull/3132), " +"[3115](https://github.com/adap/flower/pull/3115), " +"[2944](https://github.com/adap/flower/pull/2944), " +"[3064](https://github.com/adap/flower/pull/3064), " +"[3106](https://github.com/adap/flower/pull/3106), " +"[2974](https://github.com/adap/flower/pull/2974), " +"[3178](https://github.com/adap/flower/pull/3178), " +"[2993](https://github.com/adap/flower/pull/2993), " +"[3186](https://github.com/adap/flower/pull/3186), " +"[3091](https://github.com/adap/flower/pull/3091), " +"[3125](https://github.com/adap/flower/pull/3125), " +"[3093](https://github.com/adap/flower/pull/3093), " +"[3013](https://github.com/adap/flower/pull/3013), " +"[3033](https://github.com/adap/flower/pull/3033), " +"[3133](https://github.com/adap/flower/pull/3133), " +"[3068](https://github.com/adap/flower/pull/3068), " +"[2916](https://github.com/adap/flower/pull/2916), " +"[2975](https://github.com/adap/flower/pull/2975), " +"[2984](https://github.com/adap/flower/pull/2984), " +"[2846](https://github.com/adap/flower/pull/2846), " +"[3077](https://github.com/adap/flower/pull/3077), " +"[3143](https://github.com/adap/flower/pull/3143), " +"[2921](https://github.com/adap/flower/pull/2921), " +"[3101](https://github.com/adap/flower/pull/3101), " +"[2927](https://github.com/adap/flower/pull/2927), " +"[2995](https://github.com/adap/flower/pull/2995), " +"[2972](https://github.com/adap/flower/pull/2972), " +"[2912](https://github.com/adap/flower/pull/2912), " +"[3065](https://github.com/adap/flower/pull/3065), " +"[3028](https://github.com/adap/flower/pull/3028), " +"[2922](https://github.com/adap/flower/pull/2922), " +"[2982](https://github.com/adap/flower/pull/2982), " +"[2914](https://github.com/adap/flower/pull/2914), " +"[3179](https://github.com/adap/flower/pull/3179), " +"[3080](https://github.com/adap/flower/pull/3080), " +"[2994](https://github.com/adap/flower/pull/2994), " +"[3187](https://github.com/adap/flower/pull/3187), " +"[2926](https://github.com/adap/flower/pull/2926), " +"[3018](https://github.com/adap/flower/pull/3018), " +"[3144](https://github.com/adap/flower/pull/3144), " +"[3011](https://github.com/adap/flower/pull/3011), " +"[#3152](https://github.com/adap/flower/pull/3152), " +"[#2836](https://github.com/adap/flower/pull/2836), " +"[#2929](https://github.com/adap/flower/pull/2929), " +"[#2943](https://github.com/adap/flower/pull/2943), " +"[#2955](https://github.com/adap/flower/pull/2955), " +"[#2954](https://github.com/adap/flower/pull/2954))" +msgstr "" + +#: ../../source/ref-changelog.md:75 +msgid "v1.7.0 (2024-02-05)" +msgstr "" + +#: ../../source/ref-changelog.md:81 +msgid "" +"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " +"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " +"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " +"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " +"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " +"Shaaban`, `Yan Gao`, `Yasar Abbas` " +msgstr "" + +#: ../../source/ref-changelog.md:85 +msgid "" +"**Introduce stateful clients (experimental)** " +"([#2770](https://github.com/adap/flower/pull/2770), " +"[#2686](https://github.com/adap/flower/pull/2686), " +"[#2696](https://github.com/adap/flower/pull/2696), " +"[#2643](https://github.com/adap/flower/pull/2643), " +"[#2769](https://github.com/adap/flower/pull/2769))" +msgstr "" + +#: ../../source/ref-changelog.md:87 +msgid "" +"Subclasses of `Client` and `NumPyClient` can now store local state that " +"remains on the client. Let's start with the highlight first: this new " +"feature is compatible with both simulated clients (via " +"`start_simulation`) and networked clients (via `start_client`). It's also" +" the first preview of new abstractions like `Context` and `RecordSet`. " +"Clients can access state of type `RecordSet` via `state: RecordSet = " +"self.context.state`. Changes to this `RecordSet` are preserved across " +"different rounds of execution to enable stateful computations in a " +"unified way across simulation and deployment." +msgstr "" + +#: ../../source/ref-changelog.md:89 +msgid "" +"**Improve performance** " +"([#2293](https://github.com/adap/flower/pull/2293))" +msgstr "" + +#: ../../source/ref-changelog.md:91 +msgid "" +"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" +"place aggregation to reduce memory consumption. The Flower client " +"serialization/deserialization has been rewritten from the ground up, " +"which results in significant speedups, especially when the client-side " +"training time is short." +msgstr "" + +#: ../../source/ref-changelog.md:93 +msgid "" +"**Support Federated Learning with Apple MLX and Flower** " +"([#2693](https://github.com/adap/flower/pull/2693))" +msgstr "" + +#: ../../source/ref-changelog.md:95 +msgid "" +"Flower has official support for federated learning using [Apple " +"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " +"example." +msgstr "" + +#: ../../source/ref-changelog.md:97 +msgid "" +"**Introduce new XGBoost cyclic strategy** " +"([#2666](https://github.com/adap/flower/pull/2666), " +"[#2668](https://github.com/adap/flower/pull/2668))" +msgstr "" + +#: ../../source/ref-changelog.md:99 +msgid "" +"A new strategy called `FedXgbCyclic` supports a client-by-client style of" +" training (often called cyclic). The `xgboost-comprehensive` code example" +" shows how to use it in a full project. In addition to that, `xgboost-" +"comprehensive` now also supports simulation mode. With this, Flower " +"offers best-in-class XGBoost support." +msgstr "" + +#: ../../source/ref-changelog.md:101 +msgid "" +"**Support Python 3.11** " +"([#2394](https://github.com/adap/flower/pull/2394))" +msgstr "" + +#: ../../source/ref-changelog.md:103 +msgid "" +"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " +"ensure better support for users using more recent Python versions." +msgstr "" + +#: ../../source/ref-changelog.md:105 +msgid "" +"**Update gRPC and ProtoBuf dependencies** " +"([#2814](https://github.com/adap/flower/pull/2814))" +msgstr "" + +#: ../../source/ref-changelog.md:107 +msgid "" +"The `grpcio` and `protobuf` dependencies were updated to their latest " +"versions for improved security and performance." +msgstr "" + +#: ../../source/ref-changelog.md:109 +msgid "" +"**Introduce Docker image for Flower server** " +"([#2700](https://github.com/adap/flower/pull/2700), " +"[#2688](https://github.com/adap/flower/pull/2688), " +"[#2705](https://github.com/adap/flower/pull/2705), " +"[#2695](https://github.com/adap/flower/pull/2695), " +"[#2747](https://github.com/adap/flower/pull/2747), " +"[#2746](https://github.com/adap/flower/pull/2746), " +"[#2680](https://github.com/adap/flower/pull/2680), " +"[#2682](https://github.com/adap/flower/pull/2682), " +"[#2701](https://github.com/adap/flower/pull/2701))" +msgstr "" + +#: ../../source/ref-changelog.md:111 +msgid "" +"The Flower server can now be run using an official Docker image. A new " +"how-to guide explains [how to run Flower using " +"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html). An official Flower client Docker image will follow." +msgstr "" + +#: ../../source/ref-changelog.md:113 +msgid "" +"**Introduce** `flower-via-docker-compose` **example** " +"([#2626](https://github.com/adap/flower/pull/2626))" +msgstr "" + +#: ../../source/ref-changelog.md:115 +msgid "" +"**Introduce** `quickstart-sklearn-tabular` **example** " +"([#2719](https://github.com/adap/flower/pull/2719))" +msgstr "" + +#: ../../source/ref-changelog.md:117 +msgid "" +"**Introduce** `custom-metrics` **example** " +"([#1958](https://github.com/adap/flower/pull/1958))" +msgstr "" + +#: ../../source/ref-changelog.md:119 +msgid "" +"**Update code examples to use Flower Datasets** " +"([#2450](https://github.com/adap/flower/pull/2450), " +"[#2456](https://github.com/adap/flower/pull/2456), " +"[#2318](https://github.com/adap/flower/pull/2318), " +"[#2712](https://github.com/adap/flower/pull/2712))" +msgstr "" + +#: ../../source/ref-changelog.md:121 +msgid "" +"Several code examples were updated to use [Flower " +"Datasets](https://flower.ai/docs/datasets/)." +msgstr "" + +#: ../../source/ref-changelog.md:123 +msgid "" +"**General updates to Flower Examples** " +"([#2381](https://github.com/adap/flower/pull/2381), " +"[#2805](https://github.com/adap/flower/pull/2805), " +"[#2782](https://github.com/adap/flower/pull/2782), " +"[#2806](https://github.com/adap/flower/pull/2806), " +"[#2829](https://github.com/adap/flower/pull/2829), " +"[#2825](https://github.com/adap/flower/pull/2825), " +"[#2816](https://github.com/adap/flower/pull/2816), " +"[#2726](https://github.com/adap/flower/pull/2726), " +"[#2659](https://github.com/adap/flower/pull/2659), " +"[#2655](https://github.com/adap/flower/pull/2655))" +msgstr "" + +#: ../../source/ref-changelog.md:125 +msgid "Many Flower code examples received substantial updates." +msgstr "" + +#: ../../source/ref-changelog.md:127 ../../source/ref-changelog.md:220 +msgid "**Update Flower Baselines**" +msgstr "" + +#: ../../source/ref-changelog.md:129 +msgid "" +"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " +"[#2771](https://github.com/adap/flower/pull/2771))" +msgstr "" + +#: ../../source/ref-changelog.md:130 +msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" +msgstr "" + +#: ../../source/ref-changelog.md:131 +msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" +msgstr "" + +#: ../../source/ref-changelog.md:132 +msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" +msgstr "" + +#: ../../source/ref-changelog.md:133 +msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" +msgstr "" + +#: ../../source/ref-changelog.md:134 +msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" +msgstr "" + +#: ../../source/ref-changelog.md:136 +msgid "" +"**Improve documentation** " +"([#2674](https://github.com/adap/flower/pull/2674), " +"[#2480](https://github.com/adap/flower/pull/2480), " +"[#2826](https://github.com/adap/flower/pull/2826), " +"[#2727](https://github.com/adap/flower/pull/2727), " +"[#2761](https://github.com/adap/flower/pull/2761), " +"[#2900](https://github.com/adap/flower/pull/2900))" +msgstr "" + +#: ../../source/ref-changelog.md:138 +msgid "" +"**Improved testing and development infrastructure** " +"([#2797](https://github.com/adap/flower/pull/2797), " +"[#2676](https://github.com/adap/flower/pull/2676), " +"[#2644](https://github.com/adap/flower/pull/2644), " +"[#2656](https://github.com/adap/flower/pull/2656), " +"[#2848](https://github.com/adap/flower/pull/2848), " +"[#2675](https://github.com/adap/flower/pull/2675), " +"[#2735](https://github.com/adap/flower/pull/2735), " +"[#2767](https://github.com/adap/flower/pull/2767), " +"[#2732](https://github.com/adap/flower/pull/2732), " +"[#2744](https://github.com/adap/flower/pull/2744), " +"[#2681](https://github.com/adap/flower/pull/2681), " +"[#2699](https://github.com/adap/flower/pull/2699), " +"[#2745](https://github.com/adap/flower/pull/2745), " +"[#2734](https://github.com/adap/flower/pull/2734), " +"[#2731](https://github.com/adap/flower/pull/2731), " +"[#2652](https://github.com/adap/flower/pull/2652), " +"[#2720](https://github.com/adap/flower/pull/2720), " +"[#2721](https://github.com/adap/flower/pull/2721), " +"[#2717](https://github.com/adap/flower/pull/2717), " +"[#2864](https://github.com/adap/flower/pull/2864), " +"[#2694](https://github.com/adap/flower/pull/2694), " +"[#2709](https://github.com/adap/flower/pull/2709), " +"[#2658](https://github.com/adap/flower/pull/2658), " +"[#2796](https://github.com/adap/flower/pull/2796), " +"[#2692](https://github.com/adap/flower/pull/2692), " +"[#2657](https://github.com/adap/flower/pull/2657), " +"[#2813](https://github.com/adap/flower/pull/2813), " +"[#2661](https://github.com/adap/flower/pull/2661), " +"[#2398](https://github.com/adap/flower/pull/2398))" +msgstr "" + +#: ../../source/ref-changelog.md:140 +msgid "" +"The Flower testing and development infrastructure has received " +"substantial updates. This makes Flower 1.7 the most tested release ever." +msgstr "" + +#: ../../source/ref-changelog.md:142 +msgid "" +"**Update dependencies** " +"([#2753](https://github.com/adap/flower/pull/2753), " +"[#2651](https://github.com/adap/flower/pull/2651), " +"[#2739](https://github.com/adap/flower/pull/2739), " +"[#2837](https://github.com/adap/flower/pull/2837), " +"[#2788](https://github.com/adap/flower/pull/2788), " +"[#2811](https://github.com/adap/flower/pull/2811), " +"[#2774](https://github.com/adap/flower/pull/2774), " +"[#2790](https://github.com/adap/flower/pull/2790), " +"[#2751](https://github.com/adap/flower/pull/2751), " +"[#2850](https://github.com/adap/flower/pull/2850), " +"[#2812](https://github.com/adap/flower/pull/2812), " +"[#2872](https://github.com/adap/flower/pull/2872), " +"[#2736](https://github.com/adap/flower/pull/2736), " +"[#2756](https://github.com/adap/flower/pull/2756), " +"[#2857](https://github.com/adap/flower/pull/2857), " +"[#2757](https://github.com/adap/flower/pull/2757), " +"[#2810](https://github.com/adap/flower/pull/2810), " +"[#2740](https://github.com/adap/flower/pull/2740), " +"[#2789](https://github.com/adap/flower/pull/2789))" +msgstr "" + +#: ../../source/ref-changelog.md:144 +msgid "" +"**General improvements** " +"([#2803](https://github.com/adap/flower/pull/2803), " +"[#2847](https://github.com/adap/flower/pull/2847), " +"[#2877](https://github.com/adap/flower/pull/2877), " +"[#2690](https://github.com/adap/flower/pull/2690), " +"[#2889](https://github.com/adap/flower/pull/2889), " +"[#2874](https://github.com/adap/flower/pull/2874), " +"[#2819](https://github.com/adap/flower/pull/2819), " +"[#2689](https://github.com/adap/flower/pull/2689), " +"[#2457](https://github.com/adap/flower/pull/2457), " +"[#2870](https://github.com/adap/flower/pull/2870), " +"[#2669](https://github.com/adap/flower/pull/2669), " +"[#2876](https://github.com/adap/flower/pull/2876), " +"[#2885](https://github.com/adap/flower/pull/2885), " +"[#2858](https://github.com/adap/flower/pull/2858), " +"[#2867](https://github.com/adap/flower/pull/2867), " +"[#2351](https://github.com/adap/flower/pull/2351), " +"[#2886](https://github.com/adap/flower/pull/2886), " +"[#2860](https://github.com/adap/flower/pull/2860), " +"[#2828](https://github.com/adap/flower/pull/2828), " +"[#2869](https://github.com/adap/flower/pull/2869), " +"[#2875](https://github.com/adap/flower/pull/2875), " +"[#2733](https://github.com/adap/flower/pull/2733), " +"[#2488](https://github.com/adap/flower/pull/2488), " +"[#2646](https://github.com/adap/flower/pull/2646), " +"[#2879](https://github.com/adap/flower/pull/2879), " +"[#2821](https://github.com/adap/flower/pull/2821), " +"[#2855](https://github.com/adap/flower/pull/2855), " +"[#2800](https://github.com/adap/flower/pull/2800), " +"[#2807](https://github.com/adap/flower/pull/2807), " +"[#2801](https://github.com/adap/flower/pull/2801), " +"[#2804](https://github.com/adap/flower/pull/2804), " +"[#2851](https://github.com/adap/flower/pull/2851), " +"[#2787](https://github.com/adap/flower/pull/2787), " +"[#2852](https://github.com/adap/flower/pull/2852), " +"[#2672](https://github.com/adap/flower/pull/2672), " +"[#2759](https://github.com/adap/flower/pull/2759))" +msgstr "" + +#: ../../source/ref-changelog.md:148 +msgid "" +"**Deprecate** `start_numpy_client` " +"([#2563](https://github.com/adap/flower/pull/2563), " +"[#2718](https://github.com/adap/flower/pull/2718))" +msgstr "" + +#: ../../source/ref-changelog.md:150 +msgid "" +"Until now, clients of type `NumPyClient` needed to be started via " +"`start_numpy_client`. In our efforts to consolidate framework APIs, we " +"have introduced changes, and now all client types should start via " +"`start_client`. To continue using `NumPyClient` clients, you simply need " +"to first call the `.to_client()` method and then pass returned `Client` " +"object to `start_client`. The examples and the documentation have been " +"updated accordingly." +msgstr "" + +#: ../../source/ref-changelog.md:152 +msgid "" +"**Deprecate legacy DP wrappers** " +"([#2749](https://github.com/adap/flower/pull/2749))" +msgstr "" + +#: ../../source/ref-changelog.md:154 +msgid "" +"Legacy DP wrapper classes are deprecated, but still functional. This is " +"in preparation for an all-new pluggable version of differential privacy " +"support in Flower." +msgstr "" + +#: ../../source/ref-changelog.md:156 +msgid "" +"**Make optional arg** `--callable` **in** `flower-client` **a required " +"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" +msgstr "" + +#: ../../source/ref-changelog.md:158 +msgid "" +"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " +"([#2890](https://github.com/adap/flower/pull/2890))" +msgstr "" + +#: ../../source/ref-changelog.md:160 +msgid "" +"**Drop experimental** `Task` **fields** " +"([#2866](https://github.com/adap/flower/pull/2866), " +"[#2865](https://github.com/adap/flower/pull/2865))" +msgstr "" + +#: ../../source/ref-changelog.md:162 +msgid "" +"Experimental fields `sa`, `legacy_server_message` and " +"`legacy_client_message` were removed from `Task` message. The removed " +"fields are superseded by the new `RecordSet` abstraction." +msgstr "" + +#: ../../source/ref-changelog.md:164 +msgid "" +"**Retire MXNet examples** " +"([#2724](https://github.com/adap/flower/pull/2724))" +msgstr "" + +#: ../../source/ref-changelog.md:166 +msgid "" +"The development of the MXNet fremework has ended and the project is now " +"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " +"examples won't receive updates." +msgstr "" + +#: ../../source/ref-changelog.md:168 +msgid "v1.6.0 (2023-11-28)" +msgstr "" + +#: ../../source/ref-changelog.md:174 +msgid "" +"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " +"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " +"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," +" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " +"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " +"`cnxdeveloper`, `k3nfalt` " +msgstr "" + +#: ../../source/ref-changelog.md:178 +msgid "" +"**Add experimental support for Python 3.12** " +"([#2565](https://github.com/adap/flower/pull/2565))" +msgstr "" + +#: ../../source/ref-changelog.md:180 +msgid "" +"**Add new XGBoost examples** " +"([#2612](https://github.com/adap/flower/pull/2612), " +"[#2554](https://github.com/adap/flower/pull/2554), " +"[#2617](https://github.com/adap/flower/pull/2617), " +"[#2618](https://github.com/adap/flower/pull/2618), " +"[#2619](https://github.com/adap/flower/pull/2619), " +"[#2567](https://github.com/adap/flower/pull/2567))" +msgstr "" + +#: ../../source/ref-changelog.md:182 +msgid "" +"We have added a new `xgboost-quickstart` example alongside a new " +"`xgboost-comprehensive` example that goes more in-depth." +msgstr "" + +#: ../../source/ref-changelog.md:184 +msgid "" +"**Add Vertical FL example** " +"([#2598](https://github.com/adap/flower/pull/2598))" +msgstr "" + +#: ../../source/ref-changelog.md:186 +msgid "" +"We had many questions about Vertical Federated Learning using Flower, so " +"we decided to add an simple example for it on the [Titanic " +"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " +"tutorial (in the README)." +msgstr "" + +#: ../../source/ref-changelog.md:188 +msgid "" +"**Support custom** `ClientManager` **in** `start_driver()` " +"([#2292](https://github.com/adap/flower/pull/2292))" +msgstr "" + +#: ../../source/ref-changelog.md:190 +msgid "" +"**Update REST API to support create and delete nodes** " +"([#2283](https://github.com/adap/flower/pull/2283))" +msgstr "" + +#: ../../source/ref-changelog.md:192 +msgid "" +"**Update the Android SDK** " +"([#2187](https://github.com/adap/flower/pull/2187))" +msgstr "" + +#: ../../source/ref-changelog.md:194 +msgid "Add gRPC request-response capability to the Android SDK." +msgstr "" + +#: ../../source/ref-changelog.md:196 +msgid "" +"**Update the C++ SDK** " +"([#2537](https://github.com/adap/flower/pull/2537), " +"[#2528](https://github.com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" +msgstr "" + +#: ../../source/ref-changelog.md:198 +msgid "Add gRPC request-response capability to the C++ SDK." +msgstr "" + +#: ../../source/ref-changelog.md:200 +msgid "" +"**Make HTTPS the new default** " +"([#2591](https://github.com/adap/flower/pull/2591), " +"[#2636](https://github.com/adap/flower/pull/2636))" +msgstr "" + +#: ../../source/ref-changelog.md:202 +msgid "" +"Flower is moving to HTTPS by default. The new `flower-server` requires " +"passing `--certificates`, but users can enable `--insecure` to use HTTP " +"for prototyping. The same applies to `flower-client`, which can either " +"use user-provided credentials or gRPC-bundled certificates to connect to " +"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " +"enable insecure HTTP connections." +msgstr "" + +#: ../../source/ref-changelog.md:204 +msgid "" +"For backward compatibility, `start_client()` and `start_numpy_client()` " +"will still start in insecure mode by default. In a future release, " +"insecure connections will require user opt-in by passing `insecure=True`." +msgstr "" + +#: ../../source/ref-changelog.md:206 +msgid "" +"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" +msgstr "" + +#: ../../source/ref-changelog.md:208 +msgid "" +"Using the `client_fn`, Flower clients can interchangeably run as " +"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" +" `start_simulation`) without requiring changes to how the client class is" +" defined and instantiated. The `to_client()` function is introduced to " +"convert a `NumPyClient` to a `Client`." +msgstr "" + +#: ../../source/ref-changelog.md:210 +msgid "" +"**Add new** `Bulyan` **strategy** " +"([#1817](https://github.com/adap/flower/pull/1817), " +"[#1891](https://github.com/adap/flower/pull/1891))" +msgstr "" + +#: ../../source/ref-changelog.md:212 +msgid "" +"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " +"2018](https://arxiv.org/abs/1802.07927)" +msgstr "" + +#: ../../source/ref-changelog.md:214 +msgid "" +"**Add new** `XGB Bagging` **strategy** " +"([#2611](https://github.com/adap/flower/pull/2611))" +msgstr "" + +#: ../../source/ref-changelog.md:216 ../../source/ref-changelog.md:218 +msgid "" +"**Introduce `WorkloadState`** " +"([#2564](https://github.com/adap/flower/pull/2564), " +"[#2632](https://github.com/adap/flower/pull/2632))" +msgstr "" + +#: ../../source/ref-changelog.md:222 +msgid "" +"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " +"[#2286](https://github.com/adap/flower/pull/2286), " +"[#2509](https://github.com/adap/flower/pull/2509))" +msgstr "" + +#: ../../source/ref-changelog.md:224 +msgid "" +"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " +"[#2400](https://github.com/adap/flower/pull/2400))" +msgstr "" + +#: ../../source/ref-changelog.md:226 +msgid "" +"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " +"[#2507](https://github.com/adap/flower/pull/2507))" +msgstr "" + +#: ../../source/ref-changelog.md:228 +msgid "" +"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " +"[#2508](https://github.com/adap/flower/pull/2508))" +msgstr "" + +#: ../../source/ref-changelog.md:230 +msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" +msgstr "" + +#: ../../source/ref-changelog.md:232 +msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" +msgstr "" + +#: ../../source/ref-changelog.md:234 +msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" +msgstr "" + +#: ../../source/ref-changelog.md:236 +msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" +msgstr "" + +#: ../../source/ref-changelog.md:238 +msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" +msgstr "" + +#: ../../source/ref-changelog.md:240 +msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" +msgstr "" + +#: ../../source/ref-changelog.md:242 +msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" +msgstr "" + +#: ../../source/ref-changelog.md:244 +msgid "" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" +msgstr "" + +#: ../../source/ref-changelog.md:246 +msgid "" +"**General updates to Flower Examples** " +"([#2384](https://github.com/adap/flower/pull/2384), " +"[#2425](https://github.com/adap/flower/pull/2425), " +"[#2526](https://github.com/adap/flower/pull/2526), " +"[#2302](https://github.com/adap/flower/pull/2302), " +"[#2545](https://github.com/adap/flower/pull/2545))" +msgstr "" + +#: ../../source/ref-changelog.md:248 +msgid "" +"**General updates to Flower Baselines** " +"([#2301](https://github.com/adap/flower/pull/2301), " +"[#2305](https://github.com/adap/flower/pull/2305), " +"[#2307](https://github.com/adap/flower/pull/2307), " +"[#2327](https://github.com/adap/flower/pull/2327), " +"[#2435](https://github.com/adap/flower/pull/2435), " +"[#2462](https://github.com/adap/flower/pull/2462), " +"[#2463](https://github.com/adap/flower/pull/2463), " +"[#2461](https://github.com/adap/flower/pull/2461), " +"[#2469](https://github.com/adap/flower/pull/2469), " +"[#2466](https://github.com/adap/flower/pull/2466), " +"[#2471](https://github.com/adap/flower/pull/2471), " +"[#2472](https://github.com/adap/flower/pull/2472), " +"[#2470](https://github.com/adap/flower/pull/2470))" +msgstr "" + +#: ../../source/ref-changelog.md:250 +msgid "" +"**General updates to the simulation engine** " +"([#2331](https://github.com/adap/flower/pull/2331), " +"[#2447](https://github.com/adap/flower/pull/2447), " +"[#2448](https://github.com/adap/flower/pull/2448), " +"[#2294](https://github.com/adap/flower/pull/2294))" +msgstr "" + +#: ../../source/ref-changelog.md:252 +msgid "" +"**General updates to Flower SDKs** " +"([#2288](https://github.com/adap/flower/pull/2288), " +"[#2429](https://github.com/adap/flower/pull/2429), " +"[#2555](https://github.com/adap/flower/pull/2555), " +"[#2543](https://github.com/adap/flower/pull/2543), " +"[#2544](https://github.com/adap/flower/pull/2544), " +"[#2597](https://github.com/adap/flower/pull/2597), " +"[#2623](https://github.com/adap/flower/pull/2623))" +msgstr "" + +#: ../../source/ref-changelog.md:254 +msgid "" +"**General improvements** " +"([#2309](https://github.com/adap/flower/pull/2309), " +"[#2310](https://github.com/adap/flower/pull/2310), " +"[#2313](https://github.com/adap/flower/pull/2313), " +"[#2316](https://github.com/adap/flower/pull/2316), " +"[#2317](https://github.com/adap/flower/pull/2317), " +"[#2349](https://github.com/adap/flower/pull/2349), " +"[#2360](https://github.com/adap/flower/pull/2360), " +"[#2402](https://github.com/adap/flower/pull/2402), " +"[#2446](https://github.com/adap/flower/pull/2446), " +"[#2561](https://github.com/adap/flower/pull/2561), " +"[#2273](https://github.com/adap/flower/pull/2273), " +"[#2267](https://github.com/adap/flower/pull/2267), " +"[#2274](https://github.com/adap/flower/pull/2274), " +"[#2275](https://github.com/adap/flower/pull/2275), " +"[#2432](https://github.com/adap/flower/pull/2432), " +"[#2251](https://github.com/adap/flower/pull/2251), " +"[#2321](https://github.com/adap/flower/pull/2321), " +"[#1936](https://github.com/adap/flower/pull/1936), " +"[#2408](https://github.com/adap/flower/pull/2408), " +"[#2413](https://github.com/adap/flower/pull/2413), " +"[#2401](https://github.com/adap/flower/pull/2401), " +"[#2531](https://github.com/adap/flower/pull/2531), " +"[#2534](https://github.com/adap/flower/pull/2534), " +"[#2535](https://github.com/adap/flower/pull/2535), " +"[#2521](https://github.com/adap/flower/pull/2521), " +"[#2553](https://github.com/adap/flower/pull/2553), " +"[#2596](https://github.com/adap/flower/pull/2596))" +msgstr "" + +#: ../../source/ref-changelog.md:256 ../../source/ref-changelog.md:346 +#: ../../source/ref-changelog.md:410 ../../source/ref-changelog.md:464 +#: ../../source/ref-changelog.md:531 +msgid "Flower received many improvements under the hood, too many to list here." +msgstr "" + +#: ../../source/ref-changelog.md:260 +msgid "" +"**Remove support for Python 3.7** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" +msgstr "" + +#: ../../source/ref-changelog.md:262 +msgid "" +"Python 3.7 support was deprecated in Flower 1.5, and this release removes" +" support. Flower now requires Python 3.8." +msgstr "" + +#: ../../source/ref-changelog.md:264 +msgid "" +"**Remove experimental argument** `rest` **from** `start_client` " +"([#2324](https://github.com/adap/flower/pull/2324))" +msgstr "" + +#: ../../source/ref-changelog.md:266 +msgid "" +"The (still experimental) argument `rest` was removed from `start_client` " +"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " +"experimental REST API instead." +msgstr "" + +#: ../../source/ref-changelog.md:268 +msgid "v1.5.0 (2023-08-31)" +msgstr "" + +#: ../../source/ref-changelog.md:274 +msgid "" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " +msgstr "" + +#: ../../source/ref-changelog.md:278 +msgid "" +"**Introduce new simulation engine** " +"([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" +msgstr "" + +#: ../../source/ref-changelog.md:280 +msgid "" +"The new simulation engine has been rewritten from the ground up, yet it " +"remains fully backwards compatible. It offers much improved stability and" +" memory handling, especially when working with GPUs. Simulations " +"transparently adapt to different settings to scale simulation in CPU-" +"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." +msgstr "" + +#: ../../source/ref-changelog.md:282 +msgid "" +"Comprehensive documentation includes a new [how-to run " +"simulations](https://flower.ai/docs/framework/how-to-run-" +"simulations.html) guide, new [simulation-" +"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " +"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" +"tensorflow.html) notebooks, and a new [YouTube tutorial " +"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." +msgstr "" + +#: ../../source/ref-changelog.md:284 +msgid "" +"**Restructure Flower Docs** " +"([#1824](https://github.com/adap/flower/pull/1824), " +"[#1865](https://github.com/adap/flower/pull/1865), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1887](https://github.com/adap/flower/pull/1887), " +"[#1919](https://github.com/adap/flower/pull/1919), " +"[#1922](https://github.com/adap/flower/pull/1922), " +"[#1920](https://github.com/adap/flower/pull/1920), " +"[#1923](https://github.com/adap/flower/pull/1923), " +"[#1924](https://github.com/adap/flower/pull/1924), " +"[#1962](https://github.com/adap/flower/pull/1962), " +"[#2006](https://github.com/adap/flower/pull/2006), " +"[#2133](https://github.com/adap/flower/pull/2133), " +"[#2203](https://github.com/adap/flower/pull/2203), " +"[#2215](https://github.com/adap/flower/pull/2215), " +"[#2122](https://github.com/adap/flower/pull/2122), " +"[#2223](https://github.com/adap/flower/pull/2223), " +"[#2219](https://github.com/adap/flower/pull/2219), " +"[#2232](https://github.com/adap/flower/pull/2232), " +"[#2233](https://github.com/adap/flower/pull/2233), " +"[#2234](https://github.com/adap/flower/pull/2234), " +"[#2235](https://github.com/adap/flower/pull/2235), " +"[#2237](https://github.com/adap/flower/pull/2237), " +"[#2238](https://github.com/adap/flower/pull/2238), " +"[#2242](https://github.com/adap/flower/pull/2242), " +"[#2231](https://github.com/adap/flower/pull/2231), " +"[#2243](https://github.com/adap/flower/pull/2243), " +"[#2227](https://github.com/adap/flower/pull/2227))" +msgstr "" + +#: ../../source/ref-changelog.md:286 +msgid "" +"Much effort went into a completely restructured Flower docs experience. " +"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " +"divided into Flower Framework, Flower Baselines, Flower Android SDK, " +"Flower iOS SDK, and code example projects." +msgstr "" + +#: ../../source/ref-changelog.md:288 +msgid "" +"**Introduce Flower Swift SDK** " +"([#1858](https://github.com/adap/flower/pull/1858), " +"[#1897](https://github.com/adap/flower/pull/1897))" +msgstr "" + +#: ../../source/ref-changelog.md:290 +msgid "" +"This is the first preview release of the Flower Swift SDK. Flower support" +" on iOS is improving, and alongside the Swift SDK and code example, there" +" is now also an iOS quickstart tutorial." +msgstr "" + +#: ../../source/ref-changelog.md:292 +msgid "" +"**Introduce Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" +msgstr "" + +#: ../../source/ref-changelog.md:294 +msgid "" +"This is the first preview release of the Flower Kotlin SDK. Flower " +"support on Android is improving, and alongside the Kotlin SDK and code " +"example, there is now also an Android quickstart tutorial." +msgstr "" + +#: ../../source/ref-changelog.md:296 +msgid "" +"**Introduce new end-to-end testing infrastructure** " +"([#1842](https://github.com/adap/flower/pull/1842), " +"[#2071](https://github.com/adap/flower/pull/2071), " +"[#2072](https://github.com/adap/flower/pull/2072), " +"[#2068](https://github.com/adap/flower/pull/2068), " +"[#2067](https://github.com/adap/flower/pull/2067), " +"[#2069](https://github.com/adap/flower/pull/2069), " +"[#2073](https://github.com/adap/flower/pull/2073), " +"[#2070](https://github.com/adap/flower/pull/2070), " +"[#2074](https://github.com/adap/flower/pull/2074), " +"[#2082](https://github.com/adap/flower/pull/2082), " +"[#2084](https://github.com/adap/flower/pull/2084), " +"[#2093](https://github.com/adap/flower/pull/2093), " +"[#2109](https://github.com/adap/flower/pull/2109), " +"[#2095](https://github.com/adap/flower/pull/2095), " +"[#2140](https://github.com/adap/flower/pull/2140), " +"[#2137](https://github.com/adap/flower/pull/2137), " +"[#2165](https://github.com/adap/flower/pull/2165))" +msgstr "" + +#: ../../source/ref-changelog.md:298 +msgid "" +"A new testing infrastructure ensures that new changes stay compatible " +"with existing framework integrations or strategies." +msgstr "" + +#: ../../source/ref-changelog.md:300 +msgid "**Deprecate Python 3.7**" +msgstr "" + +#: ../../source/ref-changelog.md:302 +msgid "" +"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" +" Python 3.7 is now deprecated and will be removed in an upcoming release." +msgstr "" + +#: ../../source/ref-changelog.md:304 +msgid "" +"**Add new** `FedTrimmedAvg` **strategy** " +"([#1769](https://github.com/adap/flower/pull/1769), " +"[#1853](https://github.com/adap/flower/pull/1853))" +msgstr "" + +#: ../../source/ref-changelog.md:306 +msgid "" +"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " +"2018](https://arxiv.org/abs/1803.01498)." +msgstr "" + +#: ../../source/ref-changelog.md:308 +msgid "" +"**Introduce start_driver** " +"([#1697](https://github.com/adap/flower/pull/1697))" +msgstr "" + +#: ../../source/ref-changelog.md:310 +msgid "" +"In addition to `start_server` and using the raw Driver API, there is a " +"new `start_driver` function that allows for running `start_server` " +"scripts as a Flower driver with only a single-line code change. Check out" +" the `mt-pytorch` code example to see a working example using " +"`start_driver`." +msgstr "" + +#: ../../source/ref-changelog.md:312 +msgid "" +"**Add parameter aggregation to** `mt-pytorch` **code example** " +"([#1785](https://github.com/adap/flower/pull/1785))" +msgstr "" + +#: ../../source/ref-changelog.md:314 +msgid "" +"The `mt-pytorch` example shows how to aggregate parameters when writing a" +" driver script. The included `driver.py` and `server.py` have been " +"aligned to demonstrate both the low-level way and the high-level way of " +"building server-side logic." +msgstr "" + +#: ../../source/ref-changelog.md:316 +msgid "" +"**Migrate experimental REST API to Starlette** " +"([2171](https://github.com/adap/flower/pull/2171))" +msgstr "" + +#: ../../source/ref-changelog.md:318 +msgid "" +"The (experimental) REST API used to be implemented in " +"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" +" use [Starlette](https://www.starlette.io/) directly." +msgstr "" + +#: ../../source/ref-changelog.md:320 +msgid "" +"Please note: The REST request-response API is still experimental and will" +" likely change significantly over time." +msgstr "" + +#: ../../source/ref-changelog.md:322 +msgid "" +"**Introduce experimental gRPC request-response API** " +"([#1867](https://github.com/adap/flower/pull/1867), " +"[#1901](https://github.com/adap/flower/pull/1901))" +msgstr "" + +#: ../../source/ref-changelog.md:324 +msgid "" +"In addition to the existing gRPC API (based on bidirectional streaming) " +"and the experimental REST API, there is now a new gRPC API that uses a " +"request-response model to communicate with client nodes." +msgstr "" + +#: ../../source/ref-changelog.md:326 +msgid "" +"Please note: The gRPC request-response API is still experimental and will" +" likely change significantly over time." +msgstr "" + +#: ../../source/ref-changelog.md:328 +msgid "" +"**Replace the experimental** `start_client(rest=True)` **with the new** " +"`start_client(transport=\"rest\")` " +"([#1880](https://github.com/adap/flower/pull/1880))" +msgstr "" + +#: ../../source/ref-changelog.md:330 +msgid "" +"The (experimental) `start_client` argument `rest` was deprecated in " +"favour of a new argument `transport`. `start_client(transport=\"rest\")` " +"will yield the same behaviour as `start_client(rest=True)` did before. " +"All code should migrate to the new argument `transport`. The deprecated " +"argument `rest` will be removed in a future release." +msgstr "" + +#: ../../source/ref-changelog.md:332 +msgid "" +"**Add a new gRPC option** " +"([#2197](https://github.com/adap/flower/pull/2197))" +msgstr "" + +#: ../../source/ref-changelog.md:334 +msgid "" +"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" +" option set to 0 by default. This prevents the clients from sending " +"keepalive pings when there is no outstanding stream." +msgstr "" + +#: ../../source/ref-changelog.md:336 +msgid "" +"**Improve example notebooks** " +"([#2005](https://github.com/adap/flower/pull/2005))" +msgstr "" + +#: ../../source/ref-changelog.md:338 +msgid "There's a new 30min Federated Learning PyTorch tutorial!" +msgstr "" + +#: ../../source/ref-changelog.md:340 +msgid "" +"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " +"[#1873](https://github.com/adap/flower/pull/1873), " +"[#1981](https://github.com/adap/flower/pull/1981), " +"[#1988](https://github.com/adap/flower/pull/1988), " +"[#1984](https://github.com/adap/flower/pull/1984), " +"[#1982](https://github.com/adap/flower/pull/1982), " +"[#2112](https://github.com/adap/flower/pull/2112), " +"[#2144](https://github.com/adap/flower/pull/2144), " +"[#2174](https://github.com/adap/flower/pull/2174), " +"[#2225](https://github.com/adap/flower/pull/2225), " +"[#2183](https://github.com/adap/flower/pull/2183))" +msgstr "" + +#: ../../source/ref-changelog.md:342 +msgid "" +"Many examples have received significant updates, including simplified " +"advanced-tensorflow and advanced-pytorch examples, improved macOS " +"compatibility of TensorFlow examples, and code examples for simulation. A" +" major upgrade is that all code examples now have a `requirements.txt` " +"(in addition to `pyproject.toml`)." +msgstr "" + +#: ../../source/ref-changelog.md:344 +msgid "" +"**General improvements** " +"([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" +msgstr "" + +#: ../../source/ref-changelog.md:352 +msgid "v1.4.0 (2023-04-21)" +msgstr "" + +#: ../../source/ref-changelog.md:358 +msgid "" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " +"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " +"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " +"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " +"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" +msgstr "" + +#: ../../source/ref-changelog.md:362 +msgid "" +"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " +"example)** ([#1694](https://github.com/adap/flower/pull/1694), " +"[#1709](https://github.com/adap/flower/pull/1709), " +"[#1715](https://github.com/adap/flower/pull/1715), " +"[#1717](https://github.com/adap/flower/pull/1717), " +"[#1763](https://github.com/adap/flower/pull/1763), " +"[#1795](https://github.com/adap/flower/pull/1795))" +msgstr "" + +#: ../../source/ref-changelog.md:364 +msgid "" +"XGBoost is a tree-based ensemble machine learning algorithm that uses " +"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" +" " +"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," +" and a [code example](https://github.com/adap/flower/tree/main/examples" +"/xgboost-quickstart) that demonstrates the usage of this new strategy in " +"an XGBoost project." +msgstr "" + +#: ../../source/ref-changelog.md:366 +msgid "" +"**Introduce iOS SDK (preview)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" +msgstr "" + +#: ../../source/ref-changelog.md:368 +msgid "" +"This is a major update for anyone wanting to implement Federated Learning" +" on iOS mobile devices. We now have a swift iOS SDK present under " +"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" +" that will facilitate greatly the app creating process. To showcase its " +"use, the [iOS " +"example](https://github.com/adap/flower/tree/main/examples/ios) has also " +"been updated!" +msgstr "" + +#: ../../source/ref-changelog.md:370 +msgid "" +"**Introduce new \"What is Federated Learning?\" tutorial** " +"([#1657](https://github.com/adap/flower/pull/1657), " +"[#1721](https://github.com/adap/flower/pull/1721))" +msgstr "" + +#: ../../source/ref-changelog.md:372 +msgid "" +"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" +"what-is-federated-learning.html) in our documentation explains the basics" +" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" +" Learning to start their journey with Flower. Forward it to anyone who's " +"interested in Federated Learning!" +msgstr "" + +#: ../../source/ref-changelog.md:374 +msgid "" +"**Introduce new Flower Baseline: FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679))" +msgstr "" + +#: ../../source/ref-changelog.md:376 +msgid "" +"This new baseline replicates the MNIST+CNN task from the paper [Federated" +" Optimization in Heterogeneous Networks (Li et al., " +"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," +" which aims at making convergence more robust in heterogeneous settings." +msgstr "" + +#: ../../source/ref-changelog.md:378 +msgid "" +"**Introduce new Flower Baseline: FedAvg FEMNIST** " +"([#1655](https://github.com/adap/flower/pull/1655))" +msgstr "" + +#: ../../source/ref-changelog.md:380 +msgid "" +"This new baseline replicates an experiment evaluating the performance of " +"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " +"Benchmark for Federated Settings (Caldas et al., " +"2018)](https://arxiv.org/abs/1812.01097)." +msgstr "" + +#: ../../source/ref-changelog.md:382 +msgid "" +"**Introduce (experimental) REST API** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" +msgstr "" + +#: ../../source/ref-changelog.md:384 +msgid "" +"A new REST API has been introduced as an alternative to the gRPC-based " +"communication stack. In this initial version, the REST API only supports " +"anonymous clients." +msgstr "" + +#: ../../source/ref-changelog.md:386 +msgid "" +"Please note: The REST API is still experimental and will likely change " +"significantly over time." +msgstr "" + +#: ../../source/ref-changelog.md:388 +msgid "" +"**Improve the (experimental) Driver API** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" +msgstr "" + +#: ../../source/ref-changelog.md:390 +msgid "" +"The Driver API is still an experimental feature, but this release " +"introduces some major upgrades. One of the main improvements is the " +"introduction of an SQLite database to store server state on disk (instead" +" of in-memory). Another improvement is that tasks (instructions or " +"results) that have been delivered will now be deleted. This greatly " +"improves the memory efficiency of a long-running Flower server." +msgstr "" + +#: ../../source/ref-changelog.md:392 +msgid "" +"**Fix spilling issues related to Ray during simulations** " +"([#1698](https://github.com/adap/flower/pull/1698))" +msgstr "" + +#: ../../source/ref-changelog.md:394 +msgid "" +"While running long simulations, `ray` was sometimes spilling huge amounts" +" of data that would make the training unable to continue. This is now " +"fixed! 🎉" +msgstr "" + +#: ../../source/ref-changelog.md:396 +msgid "" +"**Add new example using** `TabNet` **and Flower** " +"([#1725](https://github.com/adap/flower/pull/1725))" +msgstr "" + +#: ../../source/ref-changelog.md:398 +msgid "" +"TabNet is a powerful and flexible framework for training machine learning" +" models on tabular data. We now have a federated example using Flower: " +"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" +"/quickstart-tabnet)." +msgstr "" + +#: ../../source/ref-changelog.md:400 +msgid "" +"**Add new how-to guide for monitoring simulations** " +"([#1649](https://github.com/adap/flower/pull/1649))" +msgstr "" + +#: ../../source/ref-changelog.md:402 +msgid "" +"We now have a documentation guide to help users monitor their performance" +" during simulations." +msgstr "" + +#: ../../source/ref-changelog.md:404 +msgid "" +"**Add training metrics to** `History` **object during simulations** " +"([#1696](https://github.com/adap/flower/pull/1696))" +msgstr "" + +#: ../../source/ref-changelog.md:406 +msgid "" +"The `fit_metrics_aggregation_fn` can be used to aggregate training " +"metrics, but previous releases did not save the results in the `History` " +"object. This is now the case!" +msgstr "" + +#: ../../source/ref-changelog.md:408 +msgid "" +"**General improvements** " +"([#1659](https://github.com/adap/flower/pull/1659), " +"[#1646](https://github.com/adap/flower/pull/1646), " +"[#1647](https://github.com/adap/flower/pull/1647), " +"[#1471](https://github.com/adap/flower/pull/1471), " +"[#1648](https://github.com/adap/flower/pull/1648), " +"[#1651](https://github.com/adap/flower/pull/1651), " +"[#1652](https://github.com/adap/flower/pull/1652), " +"[#1653](https://github.com/adap/flower/pull/1653), " +"[#1659](https://github.com/adap/flower/pull/1659), " +"[#1665](https://github.com/adap/flower/pull/1665), " +"[#1670](https://github.com/adap/flower/pull/1670), " +"[#1672](https://github.com/adap/flower/pull/1672), " +"[#1677](https://github.com/adap/flower/pull/1677), " +"[#1684](https://github.com/adap/flower/pull/1684), " +"[#1683](https://github.com/adap/flower/pull/1683), " +"[#1686](https://github.com/adap/flower/pull/1686), " +"[#1682](https://github.com/adap/flower/pull/1682), " +"[#1685](https://github.com/adap/flower/pull/1685), " +"[#1692](https://github.com/adap/flower/pull/1692), " +"[#1705](https://github.com/adap/flower/pull/1705), " +"[#1708](https://github.com/adap/flower/pull/1708), " +"[#1711](https://github.com/adap/flower/pull/1711), " +"[#1713](https://github.com/adap/flower/pull/1713), " +"[#1714](https://github.com/adap/flower/pull/1714), " +"[#1718](https://github.com/adap/flower/pull/1718), " +"[#1716](https://github.com/adap/flower/pull/1716), " +"[#1723](https://github.com/adap/flower/pull/1723), " +"[#1735](https://github.com/adap/flower/pull/1735), " +"[#1678](https://github.com/adap/flower/pull/1678), " +"[#1750](https://github.com/adap/flower/pull/1750), " +"[#1753](https://github.com/adap/flower/pull/1753), " +"[#1736](https://github.com/adap/flower/pull/1736), " +"[#1766](https://github.com/adap/flower/pull/1766), " +"[#1760](https://github.com/adap/flower/pull/1760), " +"[#1775](https://github.com/adap/flower/pull/1775), " +"[#1776](https://github.com/adap/flower/pull/1776), " +"[#1777](https://github.com/adap/flower/pull/1777), " +"[#1779](https://github.com/adap/flower/pull/1779), " +"[#1784](https://github.com/adap/flower/pull/1784), " +"[#1773](https://github.com/adap/flower/pull/1773), " +"[#1755](https://github.com/adap/flower/pull/1755), " +"[#1789](https://github.com/adap/flower/pull/1789), " +"[#1788](https://github.com/adap/flower/pull/1788), " +"[#1798](https://github.com/adap/flower/pull/1798), " +"[#1799](https://github.com/adap/flower/pull/1799), " +"[#1739](https://github.com/adap/flower/pull/1739), " +"[#1800](https://github.com/adap/flower/pull/1800), " +"[#1804](https://github.com/adap/flower/pull/1804), " +"[#1805](https://github.com/adap/flower/pull/1805))" +msgstr "" + +#: ../../source/ref-changelog.md:416 +msgid "v1.3.0 (2023-02-06)" +msgstr "" + +#: ../../source/ref-changelog.md:422 +msgid "" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" +msgstr "" + +#: ../../source/ref-changelog.md:426 +msgid "" +"**Add support for** `workload_id` **and** `group_id` **in Driver API** " +"([#1595](https://github.com/adap/flower/pull/1595))" +msgstr "" + +#: ../../source/ref-changelog.md:428 +msgid "" +"The (experimental) Driver API now supports a `workload_id` that can be " +"used to identify which workload a task belongs to. It also supports a new" +" `group_id` that can be used, for example, to indicate the current " +"training round. Both the `workload_id` and `group_id` enable client nodes" +" to decide whether they want to handle a task or not." +msgstr "" + +#: ../../source/ref-changelog.md:430 +msgid "" +"**Make Driver API and Fleet API address configurable** " +"([#1637](https://github.com/adap/flower/pull/1637))" +msgstr "" + +#: ../../source/ref-changelog.md:432 +msgid "" +"The (experimental) long-running Flower server (Driver API and Fleet API) " +"can now configure the server address of both Driver API (via `--driver-" +"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" +msgstr "" + +#: ../../source/ref-changelog.md:434 +msgid "" +"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " +"\"0.0.0.0:8086\"`" +msgstr "" + +#: ../../source/ref-changelog.md:436 +msgid "Both IPv4 and IPv6 addresses are supported." +msgstr "" + +#: ../../source/ref-changelog.md:438 +msgid "" +"**Add new example of Federated Learning using fastai and Flower** " +"([#1598](https://github.com/adap/flower/pull/1598))" +msgstr "" + +#: ../../source/ref-changelog.md:440 +msgid "" +"A new code example (`quickstart-fastai`) demonstrates federated learning " +"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " +"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" +"/quickstart-fastai)." +msgstr "" + +#: ../../source/ref-changelog.md:442 +msgid "" +"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" +" versions of Android** " +"([#1603](https://github.com/adap/flower/pull/1603))" +msgstr "" + +#: ../../source/ref-changelog.md:444 +msgid "" +"The Android code example has received a substantial update: the project " +"is compatible with Flower 1.0 (and later), the UI received a full " +"refresh, and the project is updated to be compatible with newer Android " +"tooling." +msgstr "" + +#: ../../source/ref-changelog.md:446 +msgid "" +"**Add new `FedProx` strategy** " +"([#1619](https://github.com/adap/flower/pull/1619))" +msgstr "" + +#: ../../source/ref-changelog.md:448 +msgid "" +"This " +"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" +" is almost identical to " +"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," +" but helps users replicate what is described in this " +"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " +"parameter called `proximal_mu` to regularize the local models with " +"respect to the global models." +msgstr "" + +#: ../../source/ref-changelog.md:450 +msgid "" +"**Add new metrics to telemetry events** " +"([#1640](https://github.com/adap/flower/pull/1640))" +msgstr "" + +#: ../../source/ref-changelog.md:452 +msgid "" +"An updated event structure allows, for example, the clustering of events " +"within the same workload." +msgstr "" + +#: ../../source/ref-changelog.md:454 +msgid "" +"**Add new custom strategy tutorial section** " +"[#1623](https://github.com/adap/flower/pull/1623)" +msgstr "" + +#: ../../source/ref-changelog.md:456 +msgid "" +"The Flower tutorial now has a new section that covers implementing a " +"custom strategy from scratch: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" +msgstr "" + +#: ../../source/ref-changelog.md:458 +msgid "" +"**Add new custom serialization tutorial section** " +"([#1622](https://github.com/adap/flower/pull/1622))" +msgstr "" + +#: ../../source/ref-changelog.md:460 +msgid "" +"The Flower tutorial now has a new section that covers custom " +"serialization: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-customize-the-client-pytorch.ipynb)" +msgstr "" + +#: ../../source/ref-changelog.md:462 +msgid "" +"**General improvements** " +"([#1638](https://github.com/adap/flower/pull/1638), " +"[#1634](https://github.com/adap/flower/pull/1634), " +"[#1636](https://github.com/adap/flower/pull/1636), " +"[#1635](https://github.com/adap/flower/pull/1635), " +"[#1633](https://github.com/adap/flower/pull/1633), " +"[#1632](https://github.com/adap/flower/pull/1632), " +"[#1631](https://github.com/adap/flower/pull/1631), " +"[#1630](https://github.com/adap/flower/pull/1630), " +"[#1627](https://github.com/adap/flower/pull/1627), " +"[#1593](https://github.com/adap/flower/pull/1593), " +"[#1616](https://github.com/adap/flower/pull/1616), " +"[#1615](https://github.com/adap/flower/pull/1615), " +"[#1607](https://github.com/adap/flower/pull/1607), " +"[#1609](https://github.com/adap/flower/pull/1609), " +"[#1608](https://github.com/adap/flower/pull/1608), " +"[#1603](https://github.com/adap/flower/pull/1603), " +"[#1590](https://github.com/adap/flower/pull/1590), " +"[#1580](https://github.com/adap/flower/pull/1580), " +"[#1599](https://github.com/adap/flower/pull/1599), " +"[#1600](https://github.com/adap/flower/pull/1600), " +"[#1601](https://github.com/adap/flower/pull/1601), " +"[#1597](https://github.com/adap/flower/pull/1597), " +"[#1595](https://github.com/adap/flower/pull/1595), " +"[#1591](https://github.com/adap/flower/pull/1591), " +"[#1588](https://github.com/adap/flower/pull/1588), " +"[#1589](https://github.com/adap/flower/pull/1589), " +"[#1587](https://github.com/adap/flower/pull/1587), " +"[#1573](https://github.com/adap/flower/pull/1573), " +"[#1581](https://github.com/adap/flower/pull/1581), " +"[#1578](https://github.com/adap/flower/pull/1578), " +"[#1574](https://github.com/adap/flower/pull/1574), " +"[#1572](https://github.com/adap/flower/pull/1572), " +"[#1586](https://github.com/adap/flower/pull/1586))" +msgstr "" + +#: ../../source/ref-changelog.md:466 +msgid "" +"**Updated documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" +msgstr "" + +#: ../../source/ref-changelog.md:468 ../../source/ref-changelog.md:535 +msgid "" +"As usual, the documentation has improved quite a bit. It is another step " +"in our effort to make the Flower documentation the best documentation of " +"any project. Stay tuned and as always, feel free to provide feedback!" +msgstr "" + +#: ../../source/ref-changelog.md:474 +msgid "v1.2.0 (2023-01-13)" +msgstr "" + +#: ../../source/ref-changelog.md:480 +msgid "" +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." +" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" +msgstr "" + +#: ../../source/ref-changelog.md:484 +msgid "" +"**Introduce new Flower Baseline: FedAvg MNIST** " +"([#1497](https://github.com/adap/flower/pull/1497), " +"[#1552](https://github.com/adap/flower/pull/1552))" +msgstr "" + +#: ../../source/ref-changelog.md:486 +msgid "" +"Over the coming weeks, we will be releasing a number of new reference " +"implementations useful especially to FL newcomers. They will typically " +"revisit well known papers from the literature, and be suitable for " +"integration in your own application or for experimentation, in order to " +"deepen your knowledge of FL in general. Today's release is the first in " +"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" +"pack-fedavg-mnist-cnn/)" +msgstr "" + +#: ../../source/ref-changelog.md:488 +msgid "" +"**Improve GPU support in simulations** " +"([#1555](https://github.com/adap/flower/pull/1555))" +msgstr "" + +#: ../../source/ref-changelog.md:490 +msgid "" +"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" +" to improve GPU support. The update includes some of the hard-earned " +"lessons from scaling simulations in GPU cluster environments. New " +"defaults make running GPU-based simulations substantially more robust." +msgstr "" + +#: ../../source/ref-changelog.md:492 +msgid "" +"**Improve GPU support in Jupyter Notebook tutorials** " +"([#1527](https://github.com/adap/flower/pull/1527), " +"[#1558](https://github.com/adap/flower/pull/1558))" +msgstr "" + +#: ../../source/ref-changelog.md:494 +msgid "" +"Some users reported that Jupyter Notebooks have not always been easy to " +"use on GPU instances. We listened and made improvements to all of our " +"Jupyter notebooks! Check out the updated notebooks here:" +msgstr "" + +#: ../../source/ref-changelog.md:496 +msgid "" +"[An Introduction to Federated Learning](https://flower.ai/docs/framework" +"/tutorial-get-started-with-flower-pytorch.html)" +msgstr "" + +#: ../../source/ref-changelog.md:497 +msgid "" +"[Strategies in Federated Learning](https://flower.ai/docs/framework" +"/tutorial-use-a-federated-learning-strategy-pytorch.html)" +msgstr "" + +#: ../../source/ref-changelog.md:498 +msgid "" +"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" +"-strategy-from-scratch-pytorch.html)" +msgstr "" + +#: ../../source/ref-changelog.md:499 +msgid "" +"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" +"customize-the-client-pytorch.html)" +msgstr "" + +#: ../../source/ref-changelog.md:501 +msgid "" +"**Introduce optional telemetry** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" +msgstr "" + +#: ../../source/ref-changelog.md:503 +msgid "" +"After a [request for " +"feedback](https://github.com/adap/flower/issues/1534) from the community," +" the Flower open-source project introduces optional collection of " +"*anonymous* usage metrics to make well-informed decisions to improve " +"Flower. Doing this enables the Flower team to understand how Flower is " +"used and what challenges users might face." +msgstr "" + +#: ../../source/ref-changelog.md:505 +msgid "" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users who do not want to share anonymous usage metrics. " +"[Read more.](https://flower.ai/docs/telemetry.html)." +msgstr "" + +#: ../../source/ref-changelog.md:507 +msgid "" +"**Introduce (experimental) Driver API** " +"([#1520](https://github.com/adap/flower/pull/1520), " +"[#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" +msgstr "" + +#: ../../source/ref-changelog.md:509 +msgid "" +"Flower now has a new (experimental) Driver API which will enable fully " +"programmable, async, and multi-tenant Federated Learning and Federated " +"Analytics applications. Phew, that's a lot! Going forward, the Driver API" +" will be the abstraction that many upcoming features will be built on - " +"and you can start building those things now, too." +msgstr "" + +#: ../../source/ref-changelog.md:511 +msgid "" +"The Driver API also enables a new execution mode in which the server runs" +" indefinitely. Multiple individual workloads can run concurrently and " +"start and stop their execution independent of the server. This is " +"especially useful for users who want to deploy Flower in production." +msgstr "" + +#: ../../source/ref-changelog.md:513 +msgid "" +"To learn more, check out the `mt-pytorch` code example. We look forward " +"to you feedback!" +msgstr "" + +#: ../../source/ref-changelog.md:515 +msgid "" +"Please note: *The Driver API is still experimental and will likely change" +" significantly over time.*" +msgstr "" + +#: ../../source/ref-changelog.md:517 +msgid "" +"**Add new Federated Analytics with Pandas example** " +"([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535))" +msgstr "" + +#: ../../source/ref-changelog.md:519 +msgid "" +"A new code example (`quickstart-pandas`) demonstrates federated analytics" +" with Pandas and Flower. You can find it here: [quickstart-" +"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" +"pandas)." +msgstr "" + +#: ../../source/ref-changelog.md:521 +msgid "" +"**Add new strategies: Krum and MultiKrum** " +"([#1481](https://github.com/adap/flower/pull/1481))" +msgstr "" + +#: ../../source/ref-changelog.md:523 +msgid "" +"Edoardo, a computer science student at the Sapienza University of Rome, " +"contributed a new `Krum` strategy that enables users to easily use Krum " +"and MultiKrum in their workloads." +msgstr "" + +#: ../../source/ref-changelog.md:525 +msgid "" +"**Update C++ example to be compatible with Flower v1.2.0** " +"([#1495](https://github.com/adap/flower/pull/1495))" +msgstr "" + +#: ../../source/ref-changelog.md:527 +msgid "" +"The C++ code example has received a substantial update to make it " +"compatible with the latest version of Flower." +msgstr "" + +#: ../../source/ref-changelog.md:529 +msgid "" +"**General improvements** " +"([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github.com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" +msgstr "" + +#: ../../source/ref-changelog.md:533 +msgid "" +"**Updated documentation** " +"([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" +msgstr "" + +#: ../../source/ref-changelog.md:537 +msgid "" +"One highlight is the new [first time contributor " +"guide](https://flower.ai/docs/first-time-contributors.html): if you've " +"never contributed on GitHub before, this is the perfect place to start!" +msgstr "" + +#: ../../source/ref-changelog.md:543 +msgid "v1.1.0 (2022-10-31)" +msgstr "" + +#: ../../source/ref-changelog.md:547 +msgid "" +"We would like to give our **special thanks** to all the contributors who " +"made the new version of Flower possible (in `git shortlog` order):" +msgstr "" + +#: ../../source/ref-changelog.md:549 +msgid "" +"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " +"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " +"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " +"`danielnugraha`, `edogab33`" +msgstr "" + +#: ../../source/ref-changelog.md:553 +msgid "" +"**Introduce Differential Privacy wrappers (preview)** " +"([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" +msgstr "" + +#: ../../source/ref-changelog.md:555 +msgid "" +"The first (experimental) preview of pluggable Differential Privacy " +"wrappers enables easy configuration and usage of differential privacy " +"(DP). The pluggable DP wrappers enable framework-agnostic **and** " +"strategy-agnostic usage of both client-side DP and server-side DP. Head " +"over to the Flower docs, a new explainer goes into more detail." +msgstr "" + +#: ../../source/ref-changelog.md:557 +msgid "" +"**New iOS CoreML code example** " +"([#1289](https://github.com/adap/flower/pull/1289))" +msgstr "" + +#: ../../source/ref-changelog.md:559 +msgid "" +"Flower goes iOS! A massive new code example shows how Flower clients can " +"be built for iOS. The code example contains both Flower iOS SDK " +"components that can be used for many tasks, and one task example running " +"on CoreML." +msgstr "" + +#: ../../source/ref-changelog.md:561 +msgid "" +"**New FedMedian strategy** " +"([#1461](https://github.com/adap/flower/pull/1461))" +msgstr "" + +#: ../../source/ref-changelog.md:563 +msgid "" +"The new `FedMedian` strategy implements Federated Median (FedMedian) by " +"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." +msgstr "" + +#: ../../source/ref-changelog.md:565 +msgid "" +"**Log** `Client` **exceptions in Virtual Client Engine** " +"([#1493](https://github.com/adap/flower/pull/1493))" +msgstr "" + +#: ../../source/ref-changelog.md:567 +msgid "" +"All `Client` exceptions happening in the VCE are now logged by default " +"and not just exposed to the configured `Strategy` (via the `failures` " +"argument)." +msgstr "" + +#: ../../source/ref-changelog.md:569 +msgid "" +"**Improve Virtual Client Engine internals** " +"([#1401](https://github.com/adap/flower/pull/1401), " +"[#1453](https://github.com/adap/flower/pull/1453))" +msgstr "" + +#: ../../source/ref-changelog.md:571 +msgid "" +"Some internals of the Virtual Client Engine have been revamped. The VCE " +"now uses Ray 2.0 under the hood, the value type of the `client_resources`" +" dictionary changed to `float` to allow fractions of resources to be " +"allocated." +msgstr "" + +#: ../../source/ref-changelog.md:573 +msgid "" +"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " +"Client Engine**" +msgstr "" + +#: ../../source/ref-changelog.md:575 +msgid "" +"The Virtual Client Engine now has full support for optional `Client` (and" +" `NumPyClient`) methods." +msgstr "" + +#: ../../source/ref-changelog.md:577 +msgid "" +"**Provide type information to packages using** `flwr` " +"([#1377](https://github.com/adap/flower/pull/1377))" +msgstr "" + +#: ../../source/ref-changelog.md:579 +msgid "" +"The package `flwr` is now bundled with a `py.typed` file indicating that " +"the package is typed. This enables typing support for projects or " +"packages that use `flwr` by enabling them to improve their code using " +"static type checkers like `mypy`." +msgstr "" + +#: ../../source/ref-changelog.md:581 +msgid "" +"**Updated code example** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" +msgstr "" + +#: ../../source/ref-changelog.md:583 +msgid "" +"The code examples covering scikit-learn and PyTorch Lightning have been " +"updated to work with the latest version of Flower." +msgstr "" + +#: ../../source/ref-changelog.md:585 +msgid "" +"**Updated documentation** " +"([#1355](https://github.com/adap/flower/pull/1355), " +"[#1558](https://github.com/adap/flower/pull/1558), " +"[#1379](https://github.com/adap/flower/pull/1379), " +"[#1380](https://github.com/adap/flower/pull/1380), " +"[#1381](https://github.com/adap/flower/pull/1381), " +"[#1332](https://github.com/adap/flower/pull/1332), " +"[#1391](https://github.com/adap/flower/pull/1391), " +"[#1403](https://github.com/adap/flower/pull/1403), " +"[#1364](https://github.com/adap/flower/pull/1364), " +"[#1409](https://github.com/adap/flower/pull/1409), " +"[#1419](https://github.com/adap/flower/pull/1419), " +"[#1444](https://github.com/adap/flower/pull/1444), " +"[#1448](https://github.com/adap/flower/pull/1448), " +"[#1417](https://github.com/adap/flower/pull/1417), " +"[#1449](https://github.com/adap/flower/pull/1449), " +"[#1465](https://github.com/adap/flower/pull/1465), " +"[#1467](https://github.com/adap/flower/pull/1467))" +msgstr "" + +#: ../../source/ref-changelog.md:587 +msgid "" +"There have been so many documentation updates that it doesn't even make " +"sense to list them individually." +msgstr "" + +#: ../../source/ref-changelog.md:589 +msgid "" +"**Restructured documentation** " +"([#1387](https://github.com/adap/flower/pull/1387))" +msgstr "" + +#: ../../source/ref-changelog.md:591 +msgid "" +"The documentation has been restructured to make it easier to navigate. " +"This is just the first step in a larger effort to make the Flower " +"documentation the best documentation of any project ever. Stay tuned!" +msgstr "" + +#: ../../source/ref-changelog.md:593 +msgid "" +"**Open in Colab button** " +"([#1389](https://github.com/adap/flower/pull/1389))" +msgstr "" + +#: ../../source/ref-changelog.md:595 +msgid "" +"The four parts of the Flower Federated Learning Tutorial now come with a " +"new `Open in Colab` button. No need to install anything on your local " +"machine, you can now use and learn about Flower in your browser, it's " +"only a single click away." +msgstr "" + +#: ../../source/ref-changelog.md:597 +msgid "" +"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" +msgstr "" + +#: ../../source/ref-changelog.md:599 +msgid "" +"The Flower Federated Learning Tutorial has two brand-new parts covering " +"custom strategies (still WIP) and the distinction between `Client` and " +"`NumPyClient`. The existing parts one and two have also been improved " +"(many small changes and fixes)." +msgstr "" + +#: ../../source/ref-changelog.md:605 +msgid "v1.0.0 (2022-07-28)" +msgstr "" + +#: ../../source/ref-changelog.md:607 +msgid "Highlights" +msgstr "" + +#: ../../source/ref-changelog.md:609 +msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" +msgstr "" + +#: ../../source/ref-changelog.md:610 +msgid "All `Client`/`NumPyClient` methods are now optional" +msgstr "" + +#: ../../source/ref-changelog.md:611 +msgid "Configurable `get_parameters`" +msgstr "" + +#: ../../source/ref-changelog.md:612 +msgid "" +"Tons of small API cleanups resulting in a more coherent developer " +"experience" +msgstr "" + +#: ../../source/ref-changelog.md:616 +msgid "" +"We would like to give our **special thanks** to all the contributors who " +"made Flower 1.0 possible (in reverse [GitHub " +"Contributors](https://github.com/adap/flower/graphs/contributors) order):" +msgstr "" + +#: ../../source/ref-changelog.md:618 +msgid "" +"[@rtaiello](https://github.com/rtaiello), " +"[@g-pichler](https://github.com/g-pichler), [@rob-" +"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" +"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " +"[@nfnt](https://github.com/nfnt), " +"[@tatiana-s](https://github.com/tatiana-s), " +"[@TParcollet](https://github.com/TParcollet), " +"[@vballoli](https://github.com/vballoli), " +"[@negedng](https://github.com/negedng), " +"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " +"[@hei411](https://github.com/hei411), " +"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " +"[@AmitChaulwar](https://github.com/AmitChaulwar), " +"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" +"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " +"[@lbhm](https://github.com/lbhm), " +"[@sishtiaq](https://github.com/sishtiaq), " +"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" +"/Jueun-Park), [@architjen](https://github.com/architjen), " +"[@PratikGarai](https://github.com/PratikGarai), " +"[@mrinaald](https://github.com/mrinaald), " +"[@zliel](https://github.com/zliel), " +"[@MeiruiJiang](https://github.com/MeiruiJiang), " +"[@sancarlim](https://github.com/sancarlim), " +"[@gubertoli](https://github.com/gubertoli), " +"[@Vingt100](https://github.com/Vingt100), " +"[@MakGulati](https://github.com/MakGulati), " +"[@cozek](https://github.com/cozek), " +"[@jafermarq](https://github.com/jafermarq), " +"[@sisco0](https://github.com/sisco0), " +"[@akhilmathurs](https://github.com/akhilmathurs), " +"[@CanTuerk](https://github.com/CanTuerk), " +"[@mariaboerner1987](https://github.com/mariaboerner1987), " +"[@pedropgusmao](https://github.com/pedropgusmao), " +"[@tanertopal](https://github.com/tanertopal), " +"[@danieljanes](https://github.com/danieljanes)." +msgstr "" + +#: ../../source/ref-changelog.md:622 +msgid "" +"**All arguments must be passed as keyword arguments** " +"([#1338](https://github.com/adap/flower/pull/1338))" +msgstr "" + +#: ../../source/ref-changelog.md:624 +msgid "" +"Pass all arguments as keyword arguments, positional arguments are not " +"longer supported. Code that uses positional arguments (e.g., " +"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " +"for each positional argument (e.g., " +"`start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())`)." +msgstr "" + +#: ../../source/ref-changelog.md:626 +msgid "" +"**Introduce configuration object** `ServerConfig` **in** `start_server` " +"**and** `start_simulation` " +"([#1317](https://github.com/adap/flower/pull/1317))" +msgstr "" + +#: ../../source/ref-changelog.md:628 +msgid "" +"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " +"600.0}`, `start_server` and `start_simulation` now expect a configuration" +" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" +" arguments that as the previous config dict, but it makes writing type-" +"safe code easier and the default parameters values more transparent." +msgstr "" + +#: ../../source/ref-changelog.md:630 +msgid "" +"**Rename built-in strategy parameters for clarity** " +"([#1334](https://github.com/adap/flower/pull/1334))" +msgstr "" + +#: ../../source/ref-changelog.md:632 +msgid "" +"The following built-in strategy parameters were renamed to improve " +"readability and consistency with other API's:" +msgstr "" + +#: ../../source/ref-changelog.md:634 +msgid "`fraction_eval` --> `fraction_evaluate`" +msgstr "" + +#: ../../source/ref-changelog.md:635 +msgid "`min_eval_clients` --> `min_evaluate_clients`" +msgstr "" + +#: ../../source/ref-changelog.md:636 +msgid "`eval_fn` --> `evaluate_fn`" +msgstr "" + +#: ../../source/ref-changelog.md:638 +msgid "" +"**Update default arguments of built-in strategies** " +"([#1278](https://github.com/adap/flower/pull/1278))" +msgstr "" + +#: ../../source/ref-changelog.md:640 +msgid "" +"All built-in strategies now use `fraction_fit=1.0` and " +"`fraction_evaluate=1.0`, which means they select *all* currently " +"available clients for training and evaluation. Projects that relied on " +"the previous default values can get the previous behaviour by " +"initializing the strategy in the following way:" +msgstr "" + +#: ../../source/ref-changelog.md:642 +msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" +msgstr "" + +#: ../../source/ref-changelog.md:644 +msgid "" +"**Add** `server_round` **to** `Strategy.evaluate` " +"([#1334](https://github.com/adap/flower/pull/1334))" +msgstr "" + +#: ../../source/ref-changelog.md:646 +msgid "" +"The `Strategy` method `evaluate` now receives the current round of " +"federated learning/evaluation as the first parameter." +msgstr "" + +#: ../../source/ref-changelog.md:648 +msgid "" +"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " +"([#1334](https://github.com/adap/flower/pull/1334))" +msgstr "" + +#: ../../source/ref-changelog.md:650 +msgid "" +"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " +"three parameters: (1) The current round of federated learning/evaluation " +"(`server_round`), (2) the model parameters to evaluate (`parameters`), " +"and (3) a config dictionary (`config`)." +msgstr "" + +#: ../../source/ref-changelog.md:652 +msgid "" +"**Rename** `rnd` **to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" +msgstr "" + +#: ../../source/ref-changelog.md:654 +msgid "" +"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " +"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " +"current round of federated learning/evaluation as their first parameter. " +"To improve reaability and avoid confusion with *random*, this parameter " +"has been renamed from `rnd` to `server_round`." +msgstr "" + +#: ../../source/ref-changelog.md:656 +msgid "" +"**Move** `flwr.dataset` **to** `flwr_baselines` " +"([#1273](https://github.com/adap/flower/pull/1273))" +msgstr "" + +#: ../../source/ref-changelog.md:658 +msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." +msgstr "" + +#: ../../source/ref-changelog.md:660 +msgid "" +"**Remove experimental strategies** " +"([#1280](https://github.com/adap/flower/pull/1280))" +msgstr "" + +#: ../../source/ref-changelog.md:662 +msgid "" +"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " +"`FedFSv1`)." +msgstr "" + +#: ../../source/ref-changelog.md:664 +msgid "" +"**Rename** `Weights` **to** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" +msgstr "" + +#: ../../source/ref-changelog.md:666 +msgid "" +"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " +"capture what this type is all about." +msgstr "" + +#: ../../source/ref-changelog.md:668 +msgid "" +"**Remove antiquated** `force_final_distributed_eval` **from** " +"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" +msgstr "" + +#: ../../source/ref-changelog.md:670 +msgid "" +"The `start_server` parameter `force_final_distributed_eval` has long been" +" a historic artefact, in this release it is finally gone for good." +msgstr "" + +#: ../../source/ref-changelog.md:672 +msgid "" +"**Make** `get_parameters` **configurable** " +"([#1242](https://github.com/adap/flower/pull/1242))" +msgstr "" + +#: ../../source/ref-changelog.md:674 +msgid "" +"The `get_parameters` method now accepts a configuration dictionary, just " +"like `get_properties`, `fit`, and `evaluate`." +msgstr "" + +#: ../../source/ref-changelog.md:676 +msgid "" +"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " +"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" +msgstr "" + +#: ../../source/ref-changelog.md:678 +msgid "" +"The `start_simulation` function now accepts a configuration dictionary " +"`config` instead of the `num_rounds` integer. This improves the " +"consistency between `start_simulation` and `start_server` and makes " +"transitioning between the two easier." +msgstr "" + +#: ../../source/ref-changelog.md:682 +msgid "" +"**Support Python 3.10** " +"([#1320](https://github.com/adap/flower/pull/1320))" +msgstr "" + +#: ../../source/ref-changelog.md:684 +msgid "" +"The previous Flower release introduced experimental support for Python " +"3.10, this release declares Python 3.10 support as stable." +msgstr "" + +#: ../../source/ref-changelog.md:686 +msgid "" +"**Make all** `Client` **and** `NumPyClient` **methods optional** " +"([#1260](https://github.com/adap/flower/pull/1260), " +"[#1277](https://github.com/adap/flower/pull/1277))" +msgstr "" + +#: ../../source/ref-changelog.md:688 +msgid "" +"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " +"`fit`, and `evaluate` are all optional. This enables writing clients that" +" implement, for example, only `fit`, but no other method. No need to " +"implement `evaluate` when using centralized evaluation!" +msgstr "" + +#: ../../source/ref-changelog.md:690 +msgid "" +"**Enable passing a** `Server` **instance to** `start_simulation` " +"([#1281](https://github.com/adap/flower/pull/1281))" +msgstr "" + +#: ../../source/ref-changelog.md:692 +msgid "" +"Similar to `start_server`, `start_simulation` now accepts a full `Server`" +" instance. This enables users to heavily customize the execution of " +"eperiments and opens the door to running, for example, async FL using the" +" Virtual Client Engine." +msgstr "" + +#: ../../source/ref-changelog.md:694 +msgid "" +"**Update code examples** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" +msgstr "" + +#: ../../source/ref-changelog.md:696 +msgid "" +"Many code examples received small or even large maintenance updates, " +"among them are" +msgstr "" + +#: ../../source/ref-changelog.md:698 +msgid "`scikit-learn`" +msgstr "" + +#: ../../source/ref-changelog.md:699 +msgid "`simulation_pytorch`" +msgstr "" + +#: ../../source/ref-changelog.md:700 +msgid "`quickstart_pytorch`" +msgstr "" + +#: ../../source/ref-changelog.md:701 +msgid "`quickstart_simulation`" +msgstr "" + +#: ../../source/ref-changelog.md:702 +msgid "`quickstart_tensorflow`" +msgstr "" + +#: ../../source/ref-changelog.md:703 +msgid "`advanced_tensorflow`" +msgstr "" + +#: ../../source/ref-changelog.md:705 +msgid "" +"**Remove the obsolete simulation example** " +"([#1328](https://github.com/adap/flower/pull/1328))" +msgstr "" + +#: ../../source/ref-changelog.md:707 +msgid "" +"Removes the obsolete `simulation` example and renames " +"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " +"naming of `simulation_pytorch`" +msgstr "" + +#: ../../source/ref-changelog.md:709 +msgid "" +"**Update documentation** " +"([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" +msgstr "" + +#: ../../source/ref-changelog.md:711 +msgid "" +"One substantial documentation update fixes multiple smaller rendering " +"issues, makes titles more succinct to improve navigation, removes a " +"deprecated library, updates documentation dependencies, includes the " +"`flwr.common` module in the API reference, includes support for markdown-" +"based documentation, migrates the changelog from `.rst` to `.md`, and " +"fixes a number of smaller details!" +msgstr "" + +#: ../../source/ref-changelog.md:713 ../../source/ref-changelog.md:768 +#: ../../source/ref-changelog.md:837 ../../source/ref-changelog.md:876 +msgid "**Minor updates**" +msgstr "" + +#: ../../source/ref-changelog.md:715 +msgid "" +"Add round number to fit and evaluate log messages " +"([#1266](https://github.com/adap/flower/pull/1266))" +msgstr "" + +#: ../../source/ref-changelog.md:716 +msgid "" +"Add secure gRPC connection to the `advanced_tensorflow` code example " +"([#847](https://github.com/adap/flower/pull/847))" +msgstr "" + +#: ../../source/ref-changelog.md:717 +msgid "" +"Update developer tooling " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" +msgstr "" + +#: ../../source/ref-changelog.md:718 +msgid "" +"Rename ProtoBuf messages to improve consistency " +"([#1214](https://github.com/adap/flower/pull/1214), " +"[#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" +msgstr "" + +#: ../../source/ref-changelog.md:720 +msgid "v0.19.0 (2022-05-18)" +msgstr "" + +#: ../../source/ref-changelog.md:724 +msgid "" +"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " +"([#919](https://github.com/adap/flower/pull/919), " +"[#1127](https://github.com/adap/flower/pull/1127), " +"[#914](https://github.com/adap/flower/pull/914))" +msgstr "" + +#: ../../source/ref-changelog.md:726 +msgid "" +"The first preview release of Flower Baselines has arrived! We're " +"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " +"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " +"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " +"With this first preview release we're also inviting the community to " +"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" +"contribute-baselines.html)." +msgstr "" + +#: ../../source/ref-changelog.md:728 +msgid "" +"**C++ client SDK (preview) and code example** " +"([#1111](https://github.com/adap/flower/pull/1111))" +msgstr "" + +#: ../../source/ref-changelog.md:730 +msgid "" +"Preview support for Flower clients written in C++. The C++ preview " +"includes a Flower client SDK and a quickstart code example that " +"demonstrates a simple C++ client using the SDK." +msgstr "" + +#: ../../source/ref-changelog.md:732 +msgid "" +"**Add experimental support for Python 3.10 and Python 3.11** " +"([#1135](https://github.com/adap/flower/pull/1135))" +msgstr "" + +#: ../../source/ref-changelog.md:734 +msgid "" +"Python 3.10 is the latest stable release of Python and Python 3.11 is due" +" to be released in October. This Flower release adds experimental support" +" for both Python versions." +msgstr "" + +#: ../../source/ref-changelog.md:736 +msgid "" +"**Aggregate custom metrics through user-provided functions** " +"([#1144](https://github.com/adap/flower/pull/1144))" +msgstr "" + +#: ../../source/ref-changelog.md:738 +msgid "" +"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" +" customize the strategy. Built-in strategies support two new arguments, " +"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " +"allow passing custom metric aggregation functions." +msgstr "" + +#: ../../source/ref-changelog.md:740 +msgid "" +"**User-configurable round timeout** " +"([#1162](https://github.com/adap/flower/pull/1162))" +msgstr "" + +#: ../../source/ref-changelog.md:742 +msgid "" +"A new configuration value allows the round timeout to be set for " +"`start_server` and `start_simulation`. If the `config` dictionary " +"contains a `round_timeout` key (with a `float` value in seconds), the " +"server will wait *at least* `round_timeout` seconds before it closes the " +"connection." +msgstr "" + +#: ../../source/ref-changelog.md:744 +msgid "" +"**Enable both federated evaluation and centralized evaluation to be used " +"at the same time in all built-in strategies** " +"([#1091](https://github.com/adap/flower/pull/1091))" +msgstr "" + +#: ../../source/ref-changelog.md:746 +msgid "" +"Built-in strategies can now perform both federated evaluation (i.e., " +"client-side) and centralized evaluation (i.e., server-side) in the same " +"round. Federated evaluation can be disabled by setting `fraction_eval` to" +" `0.0`." +msgstr "" + +#: ../../source/ref-changelog.md:748 +msgid "" +"**Two new Jupyter Notebook tutorials** " +"([#1141](https://github.com/adap/flower/pull/1141))" +msgstr "" + +#: ../../source/ref-changelog.md:750 +msgid "" +"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " +"basic and intermediate Flower features:" +msgstr "" + +#: ../../source/ref-changelog.md:752 +msgid "" +"*An Introduction to Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" +"-Intro-to-FL-PyTorch.ipynb)" +msgstr "" + +#: ../../source/ref-changelog.md:754 +msgid "" +"*Using Strategies in Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" +"-Strategies-in-FL-PyTorch.ipynb)" +msgstr "" + +#: ../../source/ref-changelog.md:756 +msgid "" +"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " +"([#1076](https://github.com/adap/flower/pull/1076))" +msgstr "" + +#: ../../source/ref-changelog.md:758 +msgid "" +"The new `FedAvgM` strategy implements Federated Averaging with Server " +"Momentum \\[Hsu et al., 2019\\]." +msgstr "" + +#: ../../source/ref-changelog.md:760 +msgid "" +"**New advanced PyTorch code example** " +"([#1007](https://github.com/adap/flower/pull/1007))" +msgstr "" + +#: ../../source/ref-changelog.md:762 +msgid "" +"A new code example (`advanced_pytorch`) demonstrates advanced Flower " +"concepts with PyTorch." +msgstr "" + +#: ../../source/ref-changelog.md:764 +msgid "" +"**New JAX code example** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" +msgstr "" + +#: ../../source/ref-changelog.md:766 +msgid "" +"A new code example (`jax_from_centralized_to_federated`) shows federated " +"learning with JAX and Flower." +msgstr "" + +#: ../../source/ref-changelog.md:770 +msgid "" +"New option to keep Ray running if Ray was already initialized in " +"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" +msgstr "" + +#: ../../source/ref-changelog.md:771 +msgid "" +"Add support for custom `ClientManager` as a `start_simulation` parameter " +"([#1171](https://github.com/adap/flower/pull/1171))" +msgstr "" + +#: ../../source/ref-changelog.md:772 +msgid "" +"New documentation for [implementing " +"strategies](https://flower.ai/docs/framework/how-to-implement-" +"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " +"[#1175](https://github.com/adap/flower/pull/1175))" +msgstr "" + +#: ../../source/ref-changelog.md:773 +msgid "" +"New mobile-friendly documentation theme " +"([#1174](https://github.com/adap/flower/pull/1174))" +msgstr "" + +#: ../../source/ref-changelog.md:774 +msgid "" +"Limit version range for (optional) `ray` dependency to include only " +"compatible releases (`>=1.9.2,<1.12.0`) " +"([#1205](https://github.com/adap/flower/pull/1205))" +msgstr "" + +#: ../../source/ref-changelog.md:778 +msgid "" +"**Remove deprecated support for Python 3.6** " +"([#871](https://github.com/adap/flower/pull/871))" +msgstr "" + +#: ../../source/ref-changelog.md:779 +msgid "" +"**Remove deprecated KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" +msgstr "" + +#: ../../source/ref-changelog.md:780 +msgid "" +"**Remove deprecated no-op extra installs** " +"([#973](https://github.com/adap/flower/pull/973))" +msgstr "" + +#: ../../source/ref-changelog.md:781 +msgid "" +"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " +"([#869](https://github.com/adap/flower/pull/869))" +msgstr "" + +#: ../../source/ref-changelog.md:782 +msgid "" +"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " +"([#1107](https://github.com/adap/flower/pull/1107))" +msgstr "" + +#: ../../source/ref-changelog.md:783 +msgid "" +"**Remove deprecated DefaultStrategy strategy** " +"([#1142](https://github.com/adap/flower/pull/1142))" +msgstr "" + +#: ../../source/ref-changelog.md:784 +msgid "" +"**Remove deprecated support for eval_fn accuracy return value** " +"([#1142](https://github.com/adap/flower/pull/1142))" +msgstr "" + +#: ../../source/ref-changelog.md:785 +msgid "" +"**Remove deprecated support for passing initial parameters as NumPy " +"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" +msgstr "" + +#: ../../source/ref-changelog.md:787 +msgid "v0.18.0 (2022-02-28)" +msgstr "" + +#: ../../source/ref-changelog.md:791 +msgid "" +"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " +"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " +"[#872](https://github.com/adap/flower/pull/872), " +"[#833](https://github.com/adap/flower/pull/833), " +"[#1036](https://github.com/adap/flower/pull/1036))" +msgstr "" + +#: ../../source/ref-changelog.md:793 +msgid "" +"Simulations (using the Virtual Client Engine through `start_simulation`) " +"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " +"installing Flower with the `simulation` extra (`pip install " +"flwr[simulation]`)." +msgstr "" + +#: ../../source/ref-changelog.md:795 +msgid "" +"**New Jupyter Notebook code example** " +"([#833](https://github.com/adap/flower/pull/833))" +msgstr "" + +#: ../../source/ref-changelog.md:797 +msgid "" +"A new code example (`quickstart_simulation`) demonstrates Flower " +"simulations using the Virtual Client Engine through Jupyter Notebook " +"(incl. Google Colab)." +msgstr "" + +#: ../../source/ref-changelog.md:799 +msgid "" +"**Client properties (feature preview)** " +"([#795](https://github.com/adap/flower/pull/795))" +msgstr "" + +#: ../../source/ref-changelog.md:801 +msgid "" +"Clients can implement a new method `get_properties` to enable server-side" +" strategies to query client properties." +msgstr "" + +#: ../../source/ref-changelog.md:803 +msgid "" +"**Experimental Android support with TFLite** " +"([#865](https://github.com/adap/flower/pull/865))" +msgstr "" + +#: ../../source/ref-changelog.md:805 +msgid "" +"Android support has finally arrived in `main`! Flower is both client-" +"agnostic and framework-agnostic by design. One can integrate arbitrary " +"client platforms and with this release, using Flower on Android has " +"become a lot easier." +msgstr "" + +#: ../../source/ref-changelog.md:807 +msgid "" +"The example uses TFLite on the client side, along with a new " +"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " +"still experimental, but they are a first step towards a fully-fledged " +"Android SDK and a unified `FedAvg` implementation that integrated the new" +" functionality from `FedAvgAndroid`." +msgstr "" + +#: ../../source/ref-changelog.md:809 +msgid "" +"**Make gRPC keepalive time user-configurable and decrease default " +"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" +msgstr "" + +#: ../../source/ref-changelog.md:811 +msgid "" +"The default gRPC keepalive time has been reduced to increase the " +"compatibility of Flower with more cloud environments (for example, " +"Microsoft Azure). Users can configure the keepalive time to customize the" +" gRPC stack based on specific requirements." +msgstr "" + +#: ../../source/ref-changelog.md:813 +msgid "" +"**New differential privacy example using Opacus and PyTorch** " +"([#805](https://github.com/adap/flower/pull/805))" +msgstr "" + +#: ../../source/ref-changelog.md:815 +msgid "" +"A new code example (`opacus`) demonstrates differentially-private " +"federated learning with Opacus, PyTorch, and Flower." +msgstr "" + +#: ../../source/ref-changelog.md:817 +msgid "" +"**New Hugging Face Transformers code example** " +"([#863](https://github.com/adap/flower/pull/863))" +msgstr "" + +#: ../../source/ref-changelog.md:819 +msgid "" +"A new code example (`quickstart_huggingface`) demonstrates usage of " +"Hugging Face Transformers with Flower." +msgstr "" + +#: ../../source/ref-changelog.md:821 +msgid "" +"**New MLCube code example** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" +msgstr "" + +#: ../../source/ref-changelog.md:823 +msgid "" +"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " +"with Flower." +msgstr "" + +#: ../../source/ref-changelog.md:825 +msgid "" +"**SSL-enabled server and client** " +"([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" +msgstr "" + +#: ../../source/ref-changelog.md:827 +msgid "" +"SSL enables secure encrypted connections between clients and servers. " +"This release open-sources the Flower secure gRPC implementation to make " +"encrypted communication channels accessible to all Flower users." +msgstr "" + +#: ../../source/ref-changelog.md:829 +msgid "" +"**Updated** `FedAdam` **and** `FedYogi` **strategies** " +"([#885](https://github.com/adap/flower/pull/885), " +"[#895](https://github.com/adap/flower/pull/895))" +msgstr "" + +#: ../../source/ref-changelog.md:831 +msgid "" +"`FedAdam` and `FedAdam` match the latest version of the Adaptive " +"Federated Optimization paper." +msgstr "" + +#: ../../source/ref-changelog.md:833 +msgid "" +"**Initialize** `start_simulation` **with a list of client IDs** " +"([#860](https://github.com/adap/flower/pull/860))" +msgstr "" + +#: ../../source/ref-changelog.md:835 +msgid "" +"`start_simulation` can now be called with a list of client IDs " +"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " +"`client_fn` whenever a client needs to be initialized, which can make it " +"easier to load data partitions that are not accessible through `int` " +"identifiers." +msgstr "" + +#: ../../source/ref-changelog.md:839 +msgid "" +"Update `num_examples` calculation in PyTorch code examples in " +"([#909](https://github.com/adap/flower/pull/909))" +msgstr "" + +#: ../../source/ref-changelog.md:840 +msgid "" +"Expose Flower version through `flwr.__version__` " +"([#952](https://github.com/adap/flower/pull/952))" +msgstr "" + +#: ../../source/ref-changelog.md:841 +msgid "" +"`start_server` in `app.py` now returns a `History` object containing " +"metrics from training ([#974](https://github.com/adap/flower/pull/974))" +msgstr "" + +#: ../../source/ref-changelog.md:842 +msgid "" +"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " +"([#978](https://github.com/adap/flower/pull/978))" +msgstr "" + +#: ../../source/ref-changelog.md:843 +msgid "" +"Increase sleep time after server start to three seconds in all code " +"examples ([#1086](https://github.com/adap/flower/pull/1086))" +msgstr "" + +#: ../../source/ref-changelog.md:844 +msgid "" +"Added a new FAQ section to the documentation " +"([#948](https://github.com/adap/flower/pull/948))" +msgstr "" + +#: ../../source/ref-changelog.md:845 +msgid "" +"And many more under-the-hood changes, library updates, documentation " +"changes, and tooling improvements!" +msgstr "" + +#: ../../source/ref-changelog.md:849 +msgid "" +"**Removed** `flwr_example` **and** `flwr_experimental` **from release " +"build** ([#869](https://github.com/adap/flower/pull/869))" +msgstr "" + +#: ../../source/ref-changelog.md:851 +msgid "" +"The packages `flwr_example` and `flwr_experimental` have been deprecated " +"since Flower 0.12.0 and they are not longer included in Flower release " +"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" +"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " +"an upcoming release." +msgstr "" + +#: ../../source/ref-changelog.md:853 +msgid "v0.17.0 (2021-09-24)" +msgstr "" + +#: ../../source/ref-changelog.md:857 +msgid "" +"**Experimental virtual client engine** " +"([#781](https://github.com/adap/flower/pull/781) " +"[#790](https://github.com/adap/flower/pull/790) " +"[#791](https://github.com/adap/flower/pull/791))" +msgstr "" + +#: ../../source/ref-changelog.md:859 +msgid "" +"One of Flower's goals is to enable research at scale. This release " +"enables a first (experimental) peek at a major new feature, codenamed the" +" virtual client engine. Virtual clients enable simulations that scale to " +"a (very) large number of clients on a single machine or compute cluster. " +"The easiest way to test the new functionality is to look at the two new " +"code examples called `quickstart_simulation` and `simulation_pytorch`." +msgstr "" + +#: ../../source/ref-changelog.md:861 +msgid "" +"The feature is still experimental, so there's no stability guarantee for " +"the API. It's also not quite ready for prime time and comes with a few " +"known caveats. However, those who are curious are encouraged to try it " +"out and share their thoughts." +msgstr "" + +#: ../../source/ref-changelog.md:863 +msgid "" +"**New built-in strategies** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" +msgstr "" + +#: ../../source/ref-changelog.md:865 +msgid "" +"FedYogi - Federated learning strategy using Yogi on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" +msgstr "" + +#: ../../source/ref-changelog.md:866 +msgid "" +"FedAdam - Federated learning strategy using Adam on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" +msgstr "" + +#: ../../source/ref-changelog.md:868 +msgid "" +"**New PyTorch Lightning code example** " +"([#617](https://github.com/adap/flower/pull/617))" +msgstr "" + +#: ../../source/ref-changelog.md:870 +msgid "" +"**New Variational Auto-Encoder code example** " +"([#752](https://github.com/adap/flower/pull/752))" +msgstr "" + +#: ../../source/ref-changelog.md:872 +msgid "" +"**New scikit-learn code example** " +"([#748](https://github.com/adap/flower/pull/748))" +msgstr "" + +#: ../../source/ref-changelog.md:874 +msgid "" +"**New experimental TensorBoard strategy** " +"([#789](https://github.com/adap/flower/pull/789))" +msgstr "" + +#: ../../source/ref-changelog.md:878 +msgid "" +"Improved advanced TensorFlow code example " +"([#769](https://github.com/adap/flower/pull/769))" +msgstr "" + +#: ../../source/ref-changelog.md:879 +msgid "" +"Warning when `min_available_clients` is misconfigured " +"([#830](https://github.com/adap/flower/pull/830))" +msgstr "" + +#: ../../source/ref-changelog.md:880 +msgid "" +"Improved gRPC server docs " +"([#841](https://github.com/adap/flower/pull/841))" +msgstr "" + +#: ../../source/ref-changelog.md:881 +msgid "" +"Improved error message in `NumPyClient` " +"([#851](https://github.com/adap/flower/pull/851))" +msgstr "" + +#: ../../source/ref-changelog.md:882 +msgid "" +"Improved PyTorch quickstart code example " +"([#852](https://github.com/adap/flower/pull/852))" +msgstr "" + +#: ../../source/ref-changelog.md:886 +msgid "" +"**Disabled final distributed evaluation** " +"([#800](https://github.com/adap/flower/pull/800))" +msgstr "" + +#: ../../source/ref-changelog.md:888 +msgid "" +"Prior behaviour was to perform a final round of distributed evaluation on" +" all connected clients, which is often not required (e.g., when using " +"server-side evaluation). The prior behaviour can be enabled by passing " +"`force_final_distributed_eval=True` to `start_server`." +msgstr "" + +#: ../../source/ref-changelog.md:890 +msgid "" +"**Renamed q-FedAvg strategy** " +"([#802](https://github.com/adap/flower/pull/802))" +msgstr "" + +#: ../../source/ref-changelog.md:892 +msgid "" +"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " +"the notation given in the original paper (q-FFL is the optimization " +"objective, q-FedAvg is the proposed solver). Note the original (now " +"deprecated) `QffedAvg` class is still available for compatibility reasons" +" (it will be removed in a future release)." +msgstr "" + +#: ../../source/ref-changelog.md:894 +msgid "" +"**Deprecated and renamed code example** `simulation_pytorch` **to** " +"`simulation_pytorch_legacy` " +"([#791](https://github.com/adap/flower/pull/791))" +msgstr "" + +#: ../../source/ref-changelog.md:896 +msgid "" +"This example has been replaced by a new example. The new example is based" +" on the experimental virtual client engine, which will become the new " +"default way of doing most types of large-scale simulations in Flower. The" +" existing example was kept for reference purposes, but it might be " +"removed in the future." +msgstr "" + +#: ../../source/ref-changelog.md:898 +msgid "v0.16.0 (2021-05-11)" +msgstr "" + +#: ../../source/ref-changelog.md:902 +msgid "" +"**New built-in strategies** " +"([#549](https://github.com/adap/flower/pull/549))" +msgstr "" + +#: ../../source/ref-changelog.md:904 +msgid "(abstract) FedOpt" +msgstr "" + +#: ../../source/ref-changelog.md:907 +msgid "" +"**Custom metrics for server and strategies** " +"([#717](https://github.com/adap/flower/pull/717))" +msgstr "" + +#: ../../source/ref-changelog.md:909 +msgid "" +"The Flower server is now fully task-agnostic, all remaining instances of " +"task-specific metrics (such as `accuracy`) have been replaced by custom " +"metrics dictionaries. Flower 0.15 introduced the capability to pass a " +"dictionary containing custom metrics from client to server. As of this " +"release, custom metrics replace task-specific metrics on the server." +msgstr "" + +#: ../../source/ref-changelog.md:911 +msgid "" +"Custom metric dictionaries are now used in two user-facing APIs: they are" +" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " +"they enable evaluation functions passed to built-in strategies (via " +"`eval_fn`) to return more than two evaluation metrics. Strategies can " +"even return *aggregated* metrics dictionaries for the server to keep " +"track of." +msgstr "" + +#: ../../source/ref-changelog.md:913 +msgid "" +"Strategy implementations should migrate their `aggregate_fit` and " +"`aggregate_evaluate` methods to the new return type (e.g., by simply " +"returning an empty `{}`), server-side evaluation functions should migrate" +" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." +msgstr "" + +#: ../../source/ref-changelog.md:915 +msgid "" +"Flower 0.15-style return types are deprecated (but still supported), " +"compatibility will be removed in a future release." +msgstr "" + +#: ../../source/ref-changelog.md:917 +msgid "" +"**Migration warnings for deprecated functionality** " +"([#690](https://github.com/adap/flower/pull/690))" +msgstr "" + +#: ../../source/ref-changelog.md:919 +msgid "" +"Earlier versions of Flower were often migrated to new APIs, while " +"maintaining compatibility with legacy APIs. This release introduces " +"detailed warning messages if usage of deprecated APIs is detected. The " +"new warning messages often provide details on how to migrate to more " +"recent APIs, thus easing the transition from one release to another." +msgstr "" + +#: ../../source/ref-changelog.md:921 +msgid "" +"Improved docs and docstrings " +"([#691](https://github.com/adap/flower/pull/691) " +"[#692](https://github.com/adap/flower/pull/692) " +"[#713](https://github.com/adap/flower/pull/713))" +msgstr "" + +#: ../../source/ref-changelog.md:923 +msgid "MXNet example and documentation" +msgstr "" + +#: ../../source/ref-changelog.md:925 +msgid "" +"FedBN implementation in example PyTorch: From Centralized To Federated " +"([#696](https://github.com/adap/flower/pull/696) " +"[#702](https://github.com/adap/flower/pull/702) " +"[#705](https://github.com/adap/flower/pull/705))" +msgstr "" + +#: ../../source/ref-changelog.md:929 +msgid "" +"**Serialization-agnostic server** " +"([#721](https://github.com/adap/flower/pull/721))" +msgstr "" + +#: ../../source/ref-changelog.md:931 +msgid "" +"The Flower server is now fully serialization-agnostic. Prior usage of " +"class `Weights` (which represents parameters as deserialized NumPy " +"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " +"`Parameters` objects are fully serialization-agnostic and represents " +"parameters as byte arrays, the `tensor_type` attributes indicates how " +"these byte arrays should be interpreted (e.g., for " +"serialization/deserialization)." +msgstr "" + +#: ../../source/ref-changelog.md:933 +msgid "" +"Built-in strategies implement this approach by handling serialization and" +" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " +"implementations should update to the slightly changed Strategy method " +"definitions. Strategy authors can consult PR " +"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" +" easily migrate to the new format." +msgstr "" + +#: ../../source/ref-changelog.md:935 +msgid "" +"Deprecated `flwr.server.Server.evaluate`, use " +"`flwr.server.Server.evaluate_round` instead " +"([#717](https://github.com/adap/flower/pull/717))" +msgstr "" + +#: ../../source/ref-changelog.md:937 +msgid "v0.15.0 (2021-03-12)" +msgstr "" + +#: ../../source/ref-changelog.md:941 +msgid "" +"**Server-side parameter initialization** " +"([#658](https://github.com/adap/flower/pull/658))" +msgstr "" + +#: ../../source/ref-changelog.md:943 +msgid "" +"Model parameters can now be initialized on the server-side. Server-side " +"parameter initialization works via a new `Strategy` method called " +"`initialize_parameters`." +msgstr "" + +#: ../../source/ref-changelog.md:945 +msgid "" +"Built-in strategies support a new constructor argument called " +"`initial_parameters` to set the initial parameters. Built-in strategies " +"will provide these initial parameters to the server on startup and then " +"delete them to free the memory afterwards." +msgstr "" + +#: ../../source/ref-changelog.md:964 +msgid "" +"If no initial parameters are provided to the strategy, the server will " +"continue to use the current behaviour (namely, it will ask one of the " +"connected clients for its parameters and use these as the initial global " +"parameters)." +msgstr "" + +#: ../../source/ref-changelog.md:966 +msgid "Deprecations" +msgstr "" + +#: ../../source/ref-changelog.md:968 +msgid "" +"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " +"`flwr.server.strategy.FedAvg`, which is equivalent)" +msgstr "" + +#: ../../source/ref-changelog.md:970 +msgid "v0.14.0 (2021-02-18)" +msgstr "" + +#: ../../source/ref-changelog.md:974 +msgid "" +"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " +"([#610](https://github.com/adap/flower/pull/610) " +"[#572](https://github.com/adap/flower/pull/572) " +"[#633](https://github.com/adap/flower/pull/633))" +msgstr "" + +#: ../../source/ref-changelog.md:976 +msgid "" +"Clients can now return an additional dictionary mapping `str` keys to " +"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " +"This means one can return almost arbitrary values from `fit`/`evaluate` " +"and make use of them on the server side!" +msgstr "" + +#: ../../source/ref-changelog.md:978 +msgid "" +"This improvement also allowed for more consistent return types between " +"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " +"dict)` representing the loss, number of examples, and a dictionary " +"holding arbitrary problem-specific values like accuracy." +msgstr "" + +#: ../../source/ref-changelog.md:980 +msgid "" +"In case you wondered: this feature is compatible with existing projects, " +"the additional dictionary return value is optional. New code should " +"however migrate to the new return types to be compatible with upcoming " +"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " +"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " +"details." +msgstr "" + +#: ../../source/ref-changelog.md:982 +msgid "" +"*Code example:* note the additional dictionary return values in both " +"`FlwrClient.fit` and `FlwrClient.evaluate`:" +msgstr "" + +#: ../../source/ref-changelog.md:997 +msgid "" +"**Generalized** `config` **argument in** `Client.fit` **and** " +"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" +msgstr "" + +#: ../../source/ref-changelog.md:999 +msgid "" +"The `config` argument used to be of type `Dict[str, str]`, which means " +"that dictionary values were expected to be strings. The new release " +"generalizes this to enable values of the following types: `bool`, " +"`bytes`, `float`, `int`, `str`." +msgstr "" + +#: ../../source/ref-changelog.md:1001 +msgid "" +"This means one can now pass almost arbitrary values to `fit`/`evaluate` " +"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" +"side and `int(config[\"epochs\"])` on the client side!" +msgstr "" + +#: ../../source/ref-changelog.md:1003 +msgid "" +"*Code example:* note that the `config` dictionary now contains non-`str` " +"values in both `Client.fit` and `Client.evaluate`:" +msgstr "" + +#: ../../source/ref-changelog.md:1020 +msgid "v0.13.0 (2021-01-08)" +msgstr "" + +#: ../../source/ref-changelog.md:1024 +msgid "" +"New example: PyTorch From Centralized To Federated " +"([#549](https://github.com/adap/flower/pull/549))" +msgstr "" + +#: ../../source/ref-changelog.md:1025 +msgid "Improved documentation" +msgstr "" + +#: ../../source/ref-changelog.md:1026 +msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" +msgstr "" + +#: ../../source/ref-changelog.md:1027 +msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" +msgstr "" + +#: ../../source/ref-changelog.md:1028 +msgid "" +"Updated examples documentation " +"([#549](https://github.com/adap/flower/pull/549))" +msgstr "" + +#: ../../source/ref-changelog.md:1029 +msgid "" +"Removed obsolete documentation " +"([#548](https://github.com/adap/flower/pull/548))" +msgstr "" + +#: ../../source/ref-changelog.md:1031 +msgid "Bugfix:" +msgstr "" + +#: ../../source/ref-changelog.md:1033 +msgid "" +"`Server.fit` does not disconnect clients when finished, disconnecting the" +" clients is now handled in `flwr.server.start_server` " +"([#553](https://github.com/adap/flower/pull/553) " +"[#540](https://github.com/adap/flower/issues/540))." +msgstr "" + +#: ../../source/ref-changelog.md:1035 +msgid "v0.12.0 (2020-12-07)" +msgstr "" + +#: ../../source/ref-changelog.md:1037 ../../source/ref-changelog.md:1053 +msgid "Important changes:" +msgstr "" + +#: ../../source/ref-changelog.md:1039 +msgid "" +"Added an example for embedded devices " +"([#507](https://github.com/adap/flower/pull/507))" +msgstr "" + +#: ../../source/ref-changelog.md:1040 +msgid "" +"Added a new NumPyClient (in addition to the existing KerasClient) " +"([#504](https://github.com/adap/flower/pull/504) " +"[#508](https://github.com/adap/flower/pull/508))" +msgstr "" + +#: ../../source/ref-changelog.md:1041 +msgid "" +"Deprecated `flwr_example` package and started to migrate examples into " +"the top-level `examples` directory " +"([#494](https://github.com/adap/flower/pull/494) " +"[#512](https://github.com/adap/flower/pull/512))" +msgstr "" + +#: ../../source/ref-changelog.md:1043 +msgid "v0.11.0 (2020-11-30)" +msgstr "" + +#: ../../source/ref-changelog.md:1045 +msgid "Incompatible changes:" +msgstr "" + +#: ../../source/ref-changelog.md:1047 +msgid "" +"Renamed strategy methods " +"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " +"Flower's public APIs. Other public methods/functions (e.g., every method " +"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " +"which is why we're removing it from the four methods in Strategy. To " +"migrate rename the following `Strategy` methods accordingly:" +msgstr "" + +#: ../../source/ref-changelog.md:1048 +msgid "`on_configure_evaluate` => `configure_evaluate`" +msgstr "" + +#: ../../source/ref-changelog.md:1049 +msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" +msgstr "" + +#: ../../source/ref-changelog.md:1050 +msgid "`on_configure_fit` => `configure_fit`" +msgstr "" + +#: ../../source/ref-changelog.md:1051 +msgid "`on_aggregate_fit` => `aggregate_fit`" +msgstr "" + +#: ../../source/ref-changelog.md:1055 +msgid "" +"Deprecated `DefaultStrategy` " +"([#479](https://github.com/adap/flower/pull/479)). To migrate use " +"`FedAvg` instead." +msgstr "" + +#: ../../source/ref-changelog.md:1056 +msgid "" +"Simplified examples and baselines " +"([#484](https://github.com/adap/flower/pull/484))." +msgstr "" + +#: ../../source/ref-changelog.md:1057 +msgid "" +"Removed presently unused `on_conclude_round` from strategy interface " +"([#483](https://github.com/adap/flower/pull/483))." +msgstr "" + +#: ../../source/ref-changelog.md:1058 +msgid "" +"Set minimal Python version to 3.6.1 instead of 3.6.9 " +"([#471](https://github.com/adap/flower/pull/471))." +msgstr "" + +#: ../../source/ref-changelog.md:1059 +msgid "" +"Improved `Strategy` docstrings " +"([#470](https://github.com/adap/flower/pull/470))." +msgstr "" + +#: ../../source/ref-example-projects.rst:2 +msgid "Example projects" +msgstr "" + +#: ../../source/ref-example-projects.rst:4 +msgid "" +"Flower comes with a number of usage examples. The examples demonstrate " +"how Flower can be used to federate different kinds of existing machine " +"learning pipelines, usually leveraging popular machine learning " +"frameworks such as `PyTorch `_ or `TensorFlow " +"`_." +msgstr "" + +#: ../../source/ref-example-projects.rst:10 +msgid "" +"The following examples are available as standalone projects. Quickstart " +"TensorFlow/Keras ---------------------------" +msgstr "" + +#: ../../source/ref-example-projects.rst:14 +msgid "" +"The TensorFlow/Keras quickstart example shows CIFAR-10 image " +"classification with MobileNetV2:" +msgstr "" + +#: ../../source/ref-example-projects.rst:17 +msgid "" +"`Quickstart TensorFlow (Code) " +"`_" +msgstr "" + +#: ../../source/ref-example-projects.rst:18 +msgid ":doc:`Quickstart TensorFlow (Tutorial) `" +msgstr "" + +#: ../../source/ref-example-projects.rst:19 +msgid "" +"`Quickstart TensorFlow (Blog Post) `_" +msgstr "" + +#: ../../source/ref-example-projects.rst:23 +#: ../../source/tutorial-quickstart-pytorch.rst:5 +msgid "Quickstart PyTorch" +msgstr "" + +#: ../../source/ref-example-projects.rst:25 +msgid "" +"The PyTorch quickstart example shows CIFAR-10 image classification with a" +" simple Convolutional Neural Network:" +msgstr "" + +#: ../../source/ref-example-projects.rst:28 +msgid "" +"`Quickstart PyTorch (Code) " +"`_" +msgstr "" + +#: ../../source/ref-example-projects.rst:29 +msgid ":doc:`Quickstart PyTorch (Tutorial) `" +msgstr "" + +#: ../../source/ref-example-projects.rst:33 +msgid "PyTorch: From Centralized To Federated" +msgstr "" + +#: ../../source/ref-example-projects.rst:35 +msgid "" +"This example shows how a regular PyTorch project can be federated using " +"Flower:" +msgstr "" + +#: ../../source/ref-example-projects.rst:37 +msgid "" +"`PyTorch: From Centralized To Federated (Code) " +"`_" +msgstr "" + +#: ../../source/ref-example-projects.rst:38 +msgid "" +":doc:`PyTorch: From Centralized To Federated (Tutorial) `" +msgstr "" + +#: ../../source/ref-example-projects.rst:42 +msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" +msgstr "" + +#: ../../source/ref-example-projects.rst:44 +msgid "" +"This example shows how Flower can be used to build a federated learning " +"system that run across Raspberry Pi and Nvidia Jetson:" +msgstr "" + +#: ../../source/ref-example-projects.rst:46 +msgid "" +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " +"`_" +msgstr "" + +#: ../../source/ref-example-projects.rst:47 +msgid "" +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " +"`_" +msgstr "" + +#: ../../source/ref-faq.rst:4 +msgid "" +"This page collects answers to commonly asked questions about Federated " +"Learning with Flower." +msgstr "" + +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" +msgstr "" + +#: ../../source/ref-faq.rst:8 +msgid "" +"Yes, it can! Flower even comes with a few under-the-hood optimizations to" +" make it work even better on Colab. Here's a quickstart example:" +msgstr "" + +#: ../../source/ref-faq.rst:10 +msgid "" +"`Flower simulation PyTorch " +"`_" +msgstr "" + +#: ../../source/ref-faq.rst:11 +msgid "" +"`Flower simulation TensorFlow/Keras " +"`_" +msgstr "" + +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" +msgstr "" + +#: ../../source/ref-faq.rst:15 +msgid "" +"Find the `blog post about federated learning on embedded device here " +"`_" +" and the corresponding `GitHub code example " +"`_." +msgstr "" + +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" +msgstr "" + +#: ../../source/ref-faq.rst:19 +msgid "" +"Yes, it does. Please take a look at our `blog post " +"`_ or check out the code examples:" +msgstr "" + +#: ../../source/ref-faq.rst:21 +msgid "" +"`Android Kotlin example `_" +msgstr "" + +#: ../../source/ref-faq.rst:22 +msgid "`Android Java example `_" +msgstr "" + +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" +msgstr "" + +#: ../../source/ref-faq.rst:26 +msgid "" +"Yes, of course. A list of available examples using Flower within a " +"blockchain environment is available here:" +msgstr "" + +#: ../../source/ref-faq.rst:28 +msgid "" +"`Flower meets Nevermined GitHub Repository `_." +msgstr "" + +#: ../../source/ref-faq.rst:29 +msgid "" +"`Flower meets Nevermined YouTube video " +"`_." +msgstr "" + +#: ../../source/ref-faq.rst:30 +msgid "" +"`Flower meets KOSMoS `_." +msgstr "" + +#: ../../source/ref-faq.rst:31 +msgid "" +"`Flower meets Talan blog post `_ ." +msgstr "" + +#: ../../source/ref-faq.rst:32 +msgid "" +"`Flower meets Talan GitHub Repository " +"`_ ." +msgstr "" + +#: ../../source/ref-telemetry.md:1 +msgid "Telemetry" +msgstr "" + +#: ../../source/ref-telemetry.md:3 +msgid "" +"The Flower open-source project collects **anonymous** usage metrics to " +"make well-informed decisions to improve Flower. Doing this enables the " +"Flower team to understand how Flower is used and what challenges users " +"might face." +msgstr "" + +#: ../../source/ref-telemetry.md:5 +msgid "" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users that do not want to share anonymous usage metrics." +msgstr "" + +#: ../../source/ref-telemetry.md:7 +msgid "Principles" +msgstr "" + +#: ../../source/ref-telemetry.md:9 +msgid "We follow strong principles guarding anonymous usage metrics collection:" +msgstr "" + +#: ../../source/ref-telemetry.md:11 +msgid "" +"**Optional:** You will always be able to disable telemetry; read on to " +"learn “[How to opt-out](#how-to-opt-out)”." +msgstr "" + +#: ../../source/ref-telemetry.md:12 +msgid "" +"**Anonymous:** The reported usage metrics are anonymous and do not " +"contain any personally identifiable information (PII). See “[Collected " +"metrics](#collected-metrics)” to understand what metrics are being " +"reported." +msgstr "" + +#: ../../source/ref-telemetry.md:13 +msgid "" +"**Transparent:** You can easily inspect what anonymous metrics are being " +"reported; see the section “[How to inspect what is being reported](#how-" +"to-inspect-what-is-being-reported)”" +msgstr "" + +#: ../../source/ref-telemetry.md:14 +msgid "" +"**Open for feedback:** You can always reach out to us if you have " +"feedback; see the section “[How to contact us](#how-to-contact-us)” for " +"details." +msgstr "" + +#: ../../source/ref-telemetry.md:16 +msgid "How to opt-out" +msgstr "" + +#: ../../source/ref-telemetry.md:18 +msgid "" +"When Flower starts, it will check for an environment variable called " +"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " +"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " +"client, simply do so by prepending your command as in:" +msgstr "" + +#: ../../source/ref-telemetry.md:24 +msgid "" +"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," +" `.bashrc` (or whatever configuration file applies to your environment) " +"to disable Flower telemetry permanently." +msgstr "" + +#: ../../source/ref-telemetry.md:26 +msgid "Collected metrics" +msgstr "" + +#: ../../source/ref-telemetry.md:28 +msgid "Flower telemetry collects the following metrics:" +msgstr "" + +#: ../../source/ref-telemetry.md:30 +msgid "" +"**Flower version.** Understand which versions of Flower are currently " +"being used. This helps us to decide whether we should invest effort into " +"releasing a patch version for an older version of Flower or instead use " +"the bandwidth to build new features." +msgstr "" + +#: ../../source/ref-telemetry.md:32 +msgid "" +"**Operating system.** Enables us to answer questions such as: *Should we " +"create more guides for Linux, macOS, or Windows?*" +msgstr "" + +#: ../../source/ref-telemetry.md:34 +msgid "" +"**Python version.** Knowing the Python version helps us, for example, to " +"decide whether we should invest effort into supporting old versions of " +"Python or stop supporting them and start taking advantage of new Python " +"features." +msgstr "" + +#: ../../source/ref-telemetry.md:36 +msgid "" +"**Hardware properties.** Understanding the hardware environment that " +"Flower is being used in helps to decide whether we should, for example, " +"put more effort into supporting low-resource environments." +msgstr "" + +#: ../../source/ref-telemetry.md:38 +msgid "" +"**Execution mode.** Knowing what execution mode Flower starts in enables " +"us to understand how heavily certain features are being used and better " +"prioritize based on that." +msgstr "" + +#: ../../source/ref-telemetry.md:40 +msgid "" +"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " +"time a Flower workload starts. This allows us to understand which device " +"types not only start Flower workloads but also successfully complete " +"them." +msgstr "" + +#: ../../source/ref-telemetry.md:42 +msgid "" +"**Source.** Flower telemetry tries to store a random source ID in " +"`~/.flwr/source` the first time a telemetry event is generated. The " +"source ID is important to identify whether an issue is recurring or " +"whether an issue is triggered by multiple clusters running concurrently " +"(which often happens in simulation). For example, if a device runs " +"multiple workloads at the same time, and this results in an issue, then, " +"in order to reproduce the issue, multiple workloads must be started at " +"the same time." +msgstr "" + +#: ../../source/ref-telemetry.md:44 +msgid "" +"You may delete the source ID at any time. If you wish for all events " +"logged under a specific source ID to be deleted, you can send a deletion " +"request mentioning the source ID to `telemetry@flower.ai`. All events " +"related to that source ID will then be permanently deleted." +msgstr "" + +#: ../../source/ref-telemetry.md:46 +msgid "" +"We will not collect any personally identifiable information. If you think" +" any of the metrics collected could be misused in any way, please [get in" +" touch with us](#how-to-contact-us). We will update this page to reflect " +"any changes to the metrics collected and publish changes in the " +"changelog." +msgstr "" + +#: ../../source/ref-telemetry.md:48 +msgid "" +"If you think other metrics would be helpful for us to better guide our " +"decisions, please let us know! We will carefully review them; if we are " +"confident that they do not compromise user privacy, we may add them." +msgstr "" + +#: ../../source/ref-telemetry.md:50 +msgid "How to inspect what is being reported" +msgstr "" + +#: ../../source/ref-telemetry.md:52 +msgid "" +"We wanted to make it very easy for you to inspect what anonymous usage " +"metrics are reported. You can view all the reported telemetry information" +" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " +"is disabled by default. You may use logging independently from " +"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " +"without sending any metrics." +msgstr "" + +#: ../../source/ref-telemetry.md:58 +msgid "" +"The inspect Flower telemetry without sending any anonymous usage metrics," +" use both environment variables:" +msgstr "" + +#: ../../source/ref-telemetry.md:64 +msgid "How to contact us" +msgstr "" + +#: ../../source/ref-telemetry.md:66 +msgid "" +"We want to hear from you. If you have any feedback or ideas on how to " +"improve the way we handle anonymous usage metrics, reach out to us via " +"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " +"(`telemetry@flower.ai`)." +msgstr "" + +#: ../../source/tutorial-quickstart-android.rst:-1 +msgid "" +"Read this Federated Learning quickstart tutorial for creating an Android " +"app using Flower." +msgstr "" + +#: ../../source/tutorial-quickstart-android.rst:5 +msgid "Quickstart Android" +msgstr "" + +#: ../../source/tutorial-quickstart-android.rst:10 +msgid "" +"Let's build a federated learning system using TFLite and Flower on " +"Android!" +msgstr "" + +#: ../../source/tutorial-quickstart-android.rst:12 +msgid "" +"Please refer to the `full code example " +"`_ to learn " +"more." +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with FastAI to train a vision model on CIFAR-10." +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:5 +msgid "Quickstart fastai" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:10 +msgid "Let's build a federated learning system using fastai and Flower!" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:12 +msgid "" +"Please refer to the `full code example " +"`_ " +"to learn more." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:-1 +msgid "" +"Check out this Federating Learning quickstart tutorial for using Flower " +"with HuggingFace Transformers in order to fine-tune an LLM." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:5 +msgid "Quickstart 🤗 Transformers" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:10 +msgid "" +"Let's build a federated learning system using Hugging Face Transformers " +"and Flower!" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:12 +msgid "" +"We will leverage Hugging Face to federate the training of language models" +" over multiple clients using Flower. More specifically, we will fine-tune" +" a pre-trained Transformer model (distilBERT) for sequence classification" +" over a dataset of IMDB ratings. The end goal is to detect if a movie " +"rating is positive or negative." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:18 +msgid "Dependencies" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:20 +msgid "" +"To follow along this tutorial you will need to install the following " +"packages: :code:`datasets`, :code:`evaluate`, :code:`flwr`, " +":code:`torch`, and :code:`transformers`. This can be done using " +":code:`pip`:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:30 +msgid "Standard Hugging Face workflow" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:33 +msgid "Handling the data" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:35 +msgid "" +"To fetch the IMDB dataset, we will use Hugging Face's :code:`datasets` " +"library. We then need to tokenize the data and create :code:`PyTorch` " +"dataloaders, this is all done in the :code:`load_data` function:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:81 +msgid "Training and testing the model" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:83 +msgid "" +"Once we have a way of creating our trainloader and testloader, we can " +"take care of the training and testing. This is very similar to any " +":code:`PyTorch` training or testing loop:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:121 +msgid "Creating the model itself" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:123 +msgid "" +"To create the model itself, we will just load the pre-trained distillBERT" +" model using Hugging Face’s :code:`AutoModelForSequenceClassification` :" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:136 +msgid "Federating the example" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:139 +msgid "Creating the IMDBClient" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:141 +msgid "" +"To federate our example to multiple clients, we first need to write our " +"Flower client class (inheriting from :code:`flwr.client.NumPyClient`). " +"This is very easy, as our model is a standard :code:`PyTorch` model:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:169 +msgid "" +"The :code:`get_parameters` function lets the server get the client's " +"parameters. Inversely, the :code:`set_parameters` function allows the " +"server to send its parameters to the client. Finally, the :code:`fit` " +"function trains the model locally for the client, and the " +":code:`evaluate` function tests the model locally and returns the " +"relevant metrics." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:175 +msgid "Starting the server" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:177 +msgid "" +"Now that we have a way to instantiate clients, we need to create our " +"server in order to aggregate the results. Using Flower, this can be done " +"very easily by first choosing a strategy (here, we are using " +":code:`FedAvg`, which will define the global weights as the average of " +"all the clients' weights at each round) and then using the " +":code:`flwr.server.start_server` function:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:205 +msgid "" +"The :code:`weighted_average` function is there to provide a way to " +"aggregate the metrics distributed amongst the clients (basically this " +"allows us to display a nice average accuracy and loss for every round)." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:209 +msgid "Putting everything together" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:211 +msgid "We can now start client instances using:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:221 +msgid "" +"And they will be able to connect to the server and start the federated " +"training." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:223 +msgid "" +"If you want to check out everything put together, you should check out " +"the `full code example `_ ." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:226 +msgid "" +"Of course, this is a very basic example, and a lot can be added or " +"modified, it was just to showcase how simply we could federate a Hugging " +"Face workflow using Flower." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:229 +msgid "" +"Note that in this example we used :code:`PyTorch`, but we could have very" +" well used :code:`TensorFlow`." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:-1 +msgid "" +"Read this Federated Learning quickstart tutorial for creating an iOS app " +"using Flower to train a neural network on MNIST." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:5 +msgid "Quickstart iOS" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:10 +msgid "" +"In this tutorial we will learn how to train a Neural Network on MNIST " +"using Flower and CoreML on iOS devices." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:12 +msgid "" +"First of all, for running the Flower Python server, it is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `. For the Flower client " +"implementation in iOS, it is recommended to use Xcode as our IDE." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:15 +msgid "" +"Our example consists of one Python *server* and two iPhone *clients* that" +" all have the same model." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:17 +msgid "" +"*Clients* are responsible for generating individual weight updates for " +"the model based on their local datasets. These updates are then sent to " +"the *server* which will aggregate them to produce a better model. " +"Finally, the *server* sends this improved version of the model back to " +"each *client*. A complete cycle of weight updates is called a *round*." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:21 +msgid "" +"Now that we have a rough idea of what is going on, let's get started to " +"setup our Flower server environment. We first need to install Flower. You" +" can do this by using pip:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:27 +msgid "Or Poetry:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:34 +#: ../../source/tutorial-quickstart-pytorch.rst:37 +#: ../../source/tutorial-quickstart-scikitlearn.rst:40 +#: ../../source/tutorial-quickstart-tensorflow.rst:29 +#: ../../source/tutorial-quickstart-xgboost.rst:55 +msgid "Flower Client" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:36 +msgid "" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training using CoreML as our local training pipeline and " +"MNIST as our dataset. For simplicity reasons we will use the complete " +"Flower client with CoreML, that has been implemented and stored inside " +"the Swift SDK. The client implementation can be seen below:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:72 +msgid "" +"Let's create a new application project in Xcode and add :code:`flwr` as a" +" dependency in your project. For our application, we will store the logic" +" of our app in :code:`FLiOSModel.swift` and the UI elements in " +":code:`ContentView.swift`. We will focus more on :code:`FLiOSModel.swift`" +" in this quickstart. Please refer to the `full code example " +"`_ to learn more " +"about the app." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:75 +msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:83 +msgid "" +"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " +"will be bundled inside the application during deployment to your iOS " +"device. We need to pass the url to access mlmodel and run CoreML machine " +"learning processes, it can be retrieved by calling the function " +":code:`Bundle.main.url`. For the MNIST dataset, we need to preprocess it " +"into :code:`MLBatchProvider` object. The preprocessing is done inside " +":code:`DataLoader.swift`." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:99 +msgid "" +"Since CoreML does not allow the model parameters to be seen before " +"training, and accessing the model parameters during or after the training" +" can only be done by specifying the layer name, we need to know this " +"information beforehand, through looking at the model specification, which" +" are written as proto files. The implementation can be seen in " +":code:`MLModelInspect`." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:102 +msgid "" +"After we have all of the necessary information, let's create our Flower " +"client." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:117 +msgid "" +"Then start the Flower gRPC client and start communicating to the server " +"by passing our Flower client to the function :code:`startFlwrGRPC`." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:124 +msgid "" +"That's it for the client. We only have to implement :code:`Client` or " +"call the provided :code:`MLFlwrClient` and call :code:`startFlwrGRPC()`. " +"The attribute :code:`hostname` and :code:`port` tells the client which " +"server to connect to. This can be done by entering the hostname and port " +"in the application before clicking the start button to start the " +"federated learning process." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:129 +#: ../../source/tutorial-quickstart-pytorch.rst:203 +#: ../../source/tutorial-quickstart-scikitlearn.rst:167 +#: ../../source/tutorial-quickstart-tensorflow.rst:98 +#: ../../source/tutorial-quickstart-xgboost.rst:309 +msgid "Flower Server" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:131 +#: ../../source/tutorial-quickstart-pytorch.rst:205 +#: ../../source/tutorial-quickstart-tensorflow.rst:100 +msgid "" +"For simple workloads we can start a Flower server and leave all the " +"configuration possibilities at their default values. In a file named " +":code:`server.py`, import Flower and start the server:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:142 +#: ../../source/tutorial-quickstart-pytorch.rst:216 +#: ../../source/tutorial-quickstart-scikitlearn.rst:230 +#: ../../source/tutorial-quickstart-tensorflow.rst:112 +msgid "Train the model, federated!" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:144 +#: ../../source/tutorial-quickstart-pytorch.rst:218 +#: ../../source/tutorial-quickstart-tensorflow.rst:114 +#: ../../source/tutorial-quickstart-xgboost.rst:525 +msgid "" +"With both client and server ready, we can now run everything and see " +"federated learning in action. FL systems usually have a server and " +"multiple clients. We therefore have to start the server first:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:152 +msgid "" +"Once the server is running we can start the clients in different " +"terminals. Build and run the client through your Xcode, one through Xcode" +" Simulator and the other by deploying it to your iPhone. To see more " +"about how to deploy your app to iPhone or Simulator visit `here " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:156 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system in your ios device. The full `source code " +"`_ for this " +"example can be found in :code:`examples/ios`." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Jax to train a linear regression model on a scikit-learn dataset." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:5 +msgid "Quickstart JAX" +msgstr "" + +#: ../../source/tutorial-quickstart-pandas.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Pandas to perform Federated Analytics." +msgstr "" + +#: ../../source/tutorial-quickstart-pandas.rst:5 +msgid "Quickstart Pandas" +msgstr "" + +#: ../../source/tutorial-quickstart-pandas.rst:10 +msgid "Let's build a federated analytics system using Pandas and Flower!" +msgstr "" + +#: ../../source/tutorial-quickstart-pandas.rst:12 +msgid "" +"Please refer to the `full code example " +"`_ " +"to learn more." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with PyTorch to train a CNN model on MNIST." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:13 +msgid "" +"In this tutorial we will learn how to train a Convolutional Neural " +"Network on CIFAR10 using Flower and PyTorch." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:15 +#: ../../source/tutorial-quickstart-xgboost.rst:39 +msgid "" +"First of all, it is recommended to create a virtual environment and run " +"everything within a :doc:`virtualenv `." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:17 +#: ../../source/tutorial-quickstart-scikitlearn.rst:14 +msgid "" +"Our example consists of one *server* and two *clients* all having the " +"same model." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:19 +msgid "" +"*Clients* are responsible for generating individual weight-updates for " +"the model based on their local datasets. These updates are then sent to " +"the *server* which will aggregate them to produce a better model. " +"Finally, the *server* sends this improved version of the model back to " +"each *client*. A complete cycle of weight updates is called a *round*." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:23 +msgid "" +"Now that we have a rough idea of what is going on, let's get started. We " +"first need to install Flower. You can do this by running :" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:29 +msgid "" +"Since we want to use PyTorch to solve a computer vision task, let's go " +"ahead and install PyTorch and the **torchvision** library:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:39 +msgid "" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training with two clients and one server. Our training " +"procedure and network architecture are based on PyTorch's `Deep Learning " +"with PyTorch " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:41 +msgid "" +"In a file called :code:`client.py`, import Flower and PyTorch related " +"packages:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:56 +msgid "In addition, we define the device allocation in PyTorch with:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:62 +msgid "" +"We use PyTorch to load CIFAR10, a popular colored image classification " +"dataset for machine learning. The PyTorch :code:`DataLoader()` downloads " +"the training and test data that are then normalized." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:78 +msgid "" +"Define the loss and optimizer with PyTorch. The training of the dataset " +"is done by looping over the dataset, measure the corresponding loss and " +"optimize it." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:94 +msgid "" +"Define then the validation of the machine learning network. We loop over" +" the test set and measure the loss and accuracy of the test set." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:113 +msgid "" +"After defining the training and testing of a PyTorch machine learning " +"model, we use the functions for the Flower clients." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:115 +msgid "" +"The Flower clients will use a simple CNN adapted from 'PyTorch: A 60 " +"Minute Blitz':" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:142 +msgid "" +"After loading the data set with :code:`load_data()` we define the Flower " +"interface." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:144 +#: ../../source/tutorial-quickstart-tensorflow.rst:54 +msgid "" +"The Flower server interacts with clients through an interface called " +":code:`Client`. When the server selects a particular client for training," +" it sends training instructions over the network. The client receives " +"those instructions and calls one of the :code:`Client` methods to run " +"your code (i.e., to train the neural network we defined earlier)." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:150 +msgid "" +"Flower provides a convenience class called :code:`NumPyClient` which " +"makes it easier to implement the :code:`Client` interface when your " +"workload uses PyTorch. Implementing :code:`NumPyClient` usually means " +"defining the following methods (:code:`set_parameters` is optional " +"though):" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:156 +#: ../../source/tutorial-quickstart-scikitlearn.rst:119 +msgid "return the model weight as a list of NumPy ndarrays" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:157 +#: ../../source/tutorial-quickstart-scikitlearn.rst:121 +msgid ":code:`set_parameters` (optional)" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:158 +#: ../../source/tutorial-quickstart-scikitlearn.rst:121 +msgid "" +"update the local model weights with the parameters received from the " +"server" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:160 +#: ../../source/tutorial-quickstart-scikitlearn.rst:124 +msgid "set the local model weights" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:161 +#: ../../source/tutorial-quickstart-scikitlearn.rst:125 +msgid "train the local model" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:162 +#: ../../source/tutorial-quickstart-scikitlearn.rst:126 +msgid "receive the updated local model weights" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:164 +#: ../../source/tutorial-quickstart-scikitlearn.rst:128 +msgid "test the local model" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:166 +msgid "which can be implemented in the following way:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:189 +#: ../../source/tutorial-quickstart-tensorflow.rst:82 +msgid "" +"We can now create an instance of our class :code:`CifarClient` and add " +"one line to actually run this client:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:196 +#: ../../source/tutorial-quickstart-tensorflow.rst:90 +msgid "" +"That's it for the client. We only have to implement :code:`Client` or " +":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " +"implement a client of type :code:`NumPyClient` you'll need to first call " +"its :code:`to_client()` method. The string :code:`\"[::]:8080\"` tells " +"the client which server to connect to. In our case we can run the server " +"and the client on the same machine, therefore we use " +":code:`\"[::]:8080\"`. If we run a truly federated workload with the " +"server and clients running on different machines, all that needs to " +"change is the :code:`server_address` we point the client at." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:226 +#: ../../source/tutorial-quickstart-scikitlearn.rst:239 +#: ../../source/tutorial-quickstart-tensorflow.rst:122 +#: ../../source/tutorial-quickstart-xgboost.rst:533 +msgid "" +"Once the server is running we can start the clients in different " +"terminals. Open a new terminal and start the first client:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:233 +#: ../../source/tutorial-quickstart-scikitlearn.rst:246 +#: ../../source/tutorial-quickstart-tensorflow.rst:129 +#: ../../source/tutorial-quickstart-xgboost.rst:540 +msgid "Open another terminal and start the second client:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:239 +#: ../../source/tutorial-quickstart-scikitlearn.rst:252 +#: ../../source/tutorial-quickstart-xgboost.rst:546 +msgid "" +"Each client will have its own dataset. You should now see how the " +"training does in the very first terminal (the one that started the " +"server):" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:271 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system. The full `source code " +"`_ for this example can be found in :code:`examples" +"/quickstart-pytorch`." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with PyTorch Lightning to train an Auto Encoder model on MNIST." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 +msgid "Quickstart PyTorch Lightning" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:10 +msgid "" +"Let's build a horizontal federated learning system using PyTorch " +"Lightning and Flower!" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 +msgid "" +"Please refer to the `full code example " +"`_ to learn more." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with scikit-learn to train a linear regression model." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:5 +msgid "Quickstart scikit-learn" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:10 +msgid "" +"In this tutorial, we will learn how to train a :code:`Logistic " +"Regression` model on MNIST using Flower and scikit-learn." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:12 +msgid "" +"It is recommended to create a virtual environment and run everything " +"within this :doc:`virtualenv `." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:16 +msgid "" +"*Clients* are responsible for generating individual model parameter " +"updates for the model based on their local datasets. These updates are " +"then sent to the *server* which will aggregate them to produce an updated" +" global model. Finally, the *server* sends this improved version of the " +"model back to each *client*. A complete cycle of parameters updates is " +"called a *round*." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:20 +msgid "" +"Now that we have a rough idea of what is going on, let's get started. We " +"first need to install Flower. You can do this by running:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:26 +msgid "Since we want to use scikit-learn, let's go ahead and install it:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:32 +msgid "Or simply install all dependencies using Poetry:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:42 +msgid "" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training with two clients and one server. However, before " +"setting up the client and server, we will define all functionalities that" +" we need for our federated learning setup within :code:`utils.py`. The " +":code:`utils.py` contains different functions defining all the machine " +"learning basics:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:45 +msgid ":code:`get_model_parameters()`" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:46 +msgid "Returns the parameters of a :code:`sklearn` LogisticRegression model" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:47 +msgid ":code:`set_model_params()`" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:48 +msgid "Sets the parameters of a :code:`sklearn` LogisticRegression model" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:50 +msgid ":code:`set_initial_params()`" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:50 +msgid "Initializes the model parameters that the Flower server will ask for" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +msgid "" +"Please check out :code:`utils.py` `here " +"`_ for more details. The pre-defined functions are used in" +" the :code:`client.py` and imported. The :code:`client.py` also requires " +"to import several packages such as Flower and scikit-learn:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:67 +msgid "" +"Prior to local training, we need to load the MNIST dataset, a popular " +"image classification dataset of handwritten digits for machine learning, " +"and partition the dataset for FL. This can be conveniently achieved using" +" `Flower Datasets `_. The " +":code:`FederatedDataset.load_partition()` method loads the partitioned " +"training set for each partition ID defined in the :code:`--partition-id` " +"argument." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:95 +msgid "" +"Next, the logistic regression model is defined and initialized with " +":code:`utils.set_initial_params()`." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:107 +msgid "" +"The Flower server interacts with clients through an interface called " +":code:`Client`. When the server selects a particular client for training," +" it sends training instructions over the network. The client receives " +"those instructions and calls one of the :code:`Client` methods to run " +"your code (i.e., to fit the logistic regression we defined earlier)." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:113 +msgid "" +"Flower provides a convenience class called :code:`NumPyClient` which " +"makes it easier to implement the :code:`Client` interface when your " +"workload uses scikit-learn. Implementing :code:`NumPyClient` usually " +"means defining the following methods (:code:`set_parameters` is optional " +"though):" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:122 +msgid "is directly imported with :code:`utils.set_model_params()`" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:130 +msgid "The methods can be implemented in the following way:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:153 +msgid "" +"We can now create an instance of our class :code:`MnistClient` and add " +"one line to actually run this client:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:160 +msgid "" +"That's it for the client. We only have to implement :code:`Client` or " +":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " +"implement a client of type :code:`NumPyClient` you'll need to first call " +"its :code:`to_client()` method. The string :code:`\"0.0.0.0:8080\"` tells" +" the client which server to connect to. In our case we can run the server" +" and the client on the same machine, therefore we use " +":code:`\"0.0.0.0:8080\"`. If we run a truly federated workload with the " +"server and clients running on different machines, all that needs to " +"change is the :code:`server_address` we pass to the client." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:169 +msgid "" +"The following Flower server is a little bit more advanced and returns an " +"evaluation function for the server-side evaluation. First, we import " +"again all required libraries such as Flower and scikit-learn." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:172 +msgid ":code:`server.py`, import Flower and start the server:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:185 +msgid "" +"The number of federated learning rounds is set in :code:`fit_round()` and" +" the evaluation is defined in :code:`get_evaluate_fn()`. The evaluation " +"function is called after each federated learning round and gives you " +"information about loss and accuracy. Note that we also make use of Flower" +" Datasets here to load the test split of the MNIST dataset for server-" +"side evaluation." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:213 +msgid "" +"The :code:`main` contains the server-side parameter initialization " +":code:`utils.set_initial_params()` as well as the aggregation strategy " +":code:`fl.server.strategy:FedAvg()`. The strategy is the default one, " +"federated averaging (or FedAvg), with two clients and evaluation after " +"each federated learning round. The server can be started with the command" +" :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " +"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:232 +msgid "" +"With both client and server ready, we can now run everything and see " +"federated learning in action. Federated learning systems usually have a " +"server and multiple clients. We, therefore, have to start the server " +"first:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:286 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system. The full `source code " +"`_ for this example can be found in :code:`examples/sklearn-logreg-" +"mnist`." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with TensorFlow to train a MobilNetV2 model on CIFAR-10." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:5 +msgid "Quickstart TensorFlow" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:13 +msgid "Let's build a federated learning system in less than 20 lines of code!" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:15 +msgid "Before Flower can be imported we have to install it:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:21 +msgid "" +"Since we want to use the Keras API of TensorFlow (TF), we have to install" +" TF as well:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:31 +msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:38 +msgid "" +"We use the Keras utilities of TF to load CIFAR10, a popular colored image" +" classification dataset for machine learning. The call to " +":code:`tf.keras.datasets.cifar10.load_data()` downloads CIFAR10, caches " +"it locally, and then returns the entire training and test set as NumPy " +"ndarrays." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:47 +msgid "" +"Next, we need a model. For the purpose of this tutorial, we use " +"MobilNetV2 with 10 output classes:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:60 +msgid "" +"Flower provides a convenience class called :code:`NumPyClient` which " +"makes it easier to implement the :code:`Client` interface when your " +"workload uses Keras. The :code:`NumPyClient` interface defines three " +"methods which can be implemented in the following way:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:135 +msgid "Each client will have its own dataset." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:137 +msgid "" +"You should now see how the training does in the very first terminal (the " +"one that started the server):" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:169 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system. The full `source code " +"`_ for this can be found in :code:`examples" +"/quickstart-tensorflow/client.py`." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with XGBoost to train classification models on trees." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:5 +msgid "Quickstart XGBoost" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:14 +msgid "Federated XGBoost" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:16 +msgid "" +"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " +"implementation of gradient-boosted decision tree (**GBDT**), that " +"maximises the computational boundaries for boosted tree methods. It's " +"primarily designed to enhance both the performance and computational " +"speed of machine learning models. In XGBoost, trees are constructed " +"concurrently, unlike the sequential approach taken by GBDT." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:20 +msgid "" +"Often, for tabular data on medium-sized datasets with fewer than 10k " +"training examples, XGBoost surpasses the results of deep learning " +"techniques." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:23 +msgid "Why federated XGBoost?" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:25 +msgid "" +"Indeed, as the demand for data privacy and decentralized learning grows, " +"there's an increasing requirement to implement federated XGBoost systems " +"for specialised applications, like survival analysis and financial fraud " +"detection." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:27 +msgid "" +"Federated learning ensures that raw data remains on the local device, " +"making it an attractive approach for sensitive domains where data " +"security and privacy are paramount. Given the robustness and efficiency " +"of XGBoost, combining it with federated learning offers a promising " +"solution for these specific challenges." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:30 +msgid "" +"In this tutorial we will learn how to train a federated XGBoost model on " +"HIGGS dataset using Flower and :code:`xgboost` package. We use a simple " +"example (`full code xgboost-quickstart " +"`_)" +" with two *clients* and one *server* to demonstrate how federated XGBoost" +" works, and then we dive into a more complex example (`full code xgboost-" +"comprehensive `_) to run various experiments." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:37 +msgid "Environment Setup" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:41 +msgid "" +"We first need to install Flower and Flower Datasets. You can do this by " +"running :" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:47 +msgid "" +"Since we want to use :code:`xgboost` package to build up XGBoost trees, " +"let's go ahead and install :code:`xgboost`:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:57 +msgid "" +"*Clients* are responsible for generating individual weight-updates for " +"the model based on their local datasets. Now that we have all our " +"dependencies installed, let's run a simple distributed training with two " +"clients and one server." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:60 +msgid "" +"In a file called :code:`client.py`, import xgboost, Flower, Flower " +"Datasets and other related functions:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:87 +msgid "Dataset partition and hyper-parameter selection" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:89 +msgid "" +"Prior to local training, we require loading the HIGGS dataset from Flower" +" Datasets and conduct data partitioning for FL:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:102 +msgid "" +"In this example, we split the dataset into two partitions with uniform " +"distribution (:code:`IidPartitioner(num_partitions=2)`). Then, we load " +"the partition for the given client based on :code:`node_id`:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:121 +msgid "" +"After that, we do train/test splitting on the given partition (client's " +"local data), and transform data format for :code:`xgboost` package." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:134 +msgid "" +"The functions of :code:`train_test_split` and " +":code:`transform_dataset_to_dmatrix` are defined as below:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:158 +msgid "Finally, we define the hyper-parameters used for XGBoost training." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:174 +msgid "" +"The :code:`num_local_round` represents the number of iterations for local" +" tree boost. We use CPU for the training in default. One can shift it to " +"GPU by setting :code:`tree_method` to :code:`gpu_hist`. We use AUC as " +"evaluation metric." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:181 +msgid "Flower client definition for XGBoost" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:183 +msgid "" +"After loading the dataset we define the Flower client. We follow the " +"general rule to define :code:`XgbClient` class inherited from " +":code:`fl.client.Client`." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:193 +msgid "" +"The :code:`self.bst` is used to keep the Booster objects that remain " +"consistent across rounds, allowing them to store predictions from trees " +"integrated in earlier rounds and maintain other essential data structures" +" for training." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:196 +msgid "" +"Then, we override :code:`get_parameters`, :code:`fit` and " +":code:`evaluate` methods insides :code:`XgbClient` class as follows." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:210 +msgid "" +"Unlike neural network training, XGBoost trees are not started from a " +"specified random weights. In this case, we do not use " +":code:`get_parameters` and :code:`set_parameters` to initialise model " +"parameters for XGBoost. As a result, let's return an empty tensor in " +":code:`get_parameters` when it is called by the server at the first " +"round." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:251 +msgid "" +"In :code:`fit`, at the first round, we call :code:`xgb.train()` to build " +"up the first set of trees. the returned Booster object and config are " +"stored in :code:`self.bst` and :code:`self.config`, respectively. From " +"the second round, we load the global model sent from server to " +":code:`self.bst`, and then update model weights on local training data " +"with function :code:`local_boost` as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:269 +msgid "" +"Given :code:`num_local_round`, we update trees by calling " +":code:`self.bst.update` method. After training, the last " +":code:`N=num_local_round` trees will be extracted to send to the server." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:291 +msgid "" +"In :code:`evaluate`, we call :code:`self.bst.eval_set` function to " +"conduct evaluation on valid set. The AUC value will be returned." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:294 +msgid "" +"Now, we can create an instance of our class :code:`XgbClient` and add one" +" line to actually run this client:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:300 +msgid "" +"That's it for the client. We only have to implement :code:`Client`and " +"call :code:`fl.client.start_client()`. The string :code:`\"[::]:8080\"` " +"tells the client which server to connect to. In our case we can run the " +"server and the client on the same machine, therefore we use " +":code:`\"[::]:8080\"`. If we run a truly federated workload with the " +"server and clients running on different machines, all that needs to " +"change is the :code:`server_address` we point the client at." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:311 +msgid "" +"These updates are then sent to the *server* which will aggregate them to " +"produce a better model. Finally, the *server* sends this improved version" +" of the model back to each *client* to finish a complete FL round." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:314 +msgid "" +"In a file named :code:`server.py`, import Flower and FedXgbBagging from " +":code:`flwr.server.strategy`." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:316 +msgid "We first define a strategy for XGBoost bagging aggregation." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:339 +msgid "" +"We use two clients for this example. An " +":code:`evaluate_metrics_aggregation` function is defined to collect and " +"wighted average the AUC values from clients." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:342 +msgid "Then, we start the server:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:354 +msgid "Tree-based bagging aggregation" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:356 +msgid "" +"You must be curious about how bagging aggregation works. Let's look into " +"the details." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:358 +msgid "" +"In file :code:`flwr.server.strategy.fedxgb_bagging.py`, we define " +":code:`FedXgbBagging` inherited from :code:`flwr.server.strategy.FedAvg`." +" Then, we override the :code:`aggregate_fit`, :code:`aggregate_evaluate` " +"and :code:`evaluate` methods as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:454 +msgid "" +"In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost " +"trees by calling :code:`aggregate()` function:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:513 +msgid "" +"In this function, we first fetch the number of trees and the number of " +"parallel trees for the current and previous model by calling " +":code:`_get_tree_nums`. Then, the fetched information will be aggregated." +" After that, the trees (containing model weights) are aggregated to " +"generate a new tree model." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:518 +msgid "" +"After traversal of all clients' models, a new global model is generated, " +"followed by the serialisation, and sending back to each client." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:523 +msgid "Launch Federated XGBoost!" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:585 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"XGBoost system. The AUC values can be checked in " +":code:`metrics_distributed`. One can see that the average AUC increases " +"over FL rounds." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:590 +msgid "" +"The full `source code `_ for this example can be found in :code:`examples" +"/xgboost-quickstart`." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:594 +msgid "Comprehensive Federated XGBoost" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:596 +msgid "" +"Now that you have known how federated XGBoost work with Flower, it's time" +" to run some more comprehensive experiments by customising the " +"experimental settings. In the xgboost-comprehensive example (`full code " +"`_), we provide more options to define various experimental" +" setups, including aggregation strategies, data partitioning and " +"centralised/distributed evaluation. We also support :doc:`Flower " +"simulation ` making it easy to simulate large " +"client cohorts in a resource-aware manner. Let's take a look!" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:603 +msgid "Cyclic training" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:605 +msgid "" +"In addition to bagging aggregation, we offer a cyclic training scheme, " +"which performs FL in a client-by-client fashion. Instead of aggregating " +"multiple clients, there is only one single client participating in the " +"training per round in the cyclic training scenario. The trained local " +"XGBoost trees will be passed to the next client as an initialised model " +"for next round's boosting." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:609 +msgid "" +"To do this, we first customise a :code:`ClientManager` in " +":code:`server_utils.py`:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:649 +msgid "" +"The customised :code:`ClientManager` samples all available clients in " +"each FL round based on the order of connection to the server. Then, we " +"define a new strategy :code:`FedXgbCyclic` in " +":code:`flwr.server.strategy.fedxgb_cyclic.py`, in order to sequentially " +"select only one client in given round and pass the received model to next" +" client." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:690 +msgid "" +"Unlike the original :code:`FedAvg`, we don't perform aggregation here. " +"Instead, we just make a copy of the received client model as global model" +" by overriding :code:`aggregate_fit`." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:693 +msgid "" +"Also, the customised :code:`configure_fit` and :code:`configure_evaluate`" +" methods ensure the clients to be sequentially selected given FL round:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:757 +msgid "Customised data partitioning" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:759 +msgid "" +"In :code:`dataset.py`, we have a function :code:`instantiate_partitioner`" +" to instantiate the data partitioner based on the given " +":code:`num_partitions` and :code:`partitioner_type`. Currently, we " +"provide four supported partitioner type to simulate the uniformity/non-" +"uniformity in data quantity (uniform, linear, square, exponential)." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:790 +msgid "Customised centralised/distributed evaluation" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:792 +msgid "" +"To facilitate centralised evaluation, we define a function in " +":code:`server_utils.py`:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:824 +msgid "" +"This function returns a evaluation function which instantiates a " +":code:`Booster` object and loads the global model weights to it. The " +"evaluation is conducted by calling :code:`eval_set()` method, and the " +"tested AUC value is reported." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:827 +msgid "" +"As for distributed evaluation on the clients, it's same as the quick-" +"start example by overriding the :code:`evaluate()` method insides the " +":code:`XgbClient` class in :code:`client_utils.py`." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:831 +msgid "Flower simulation" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:832 +msgid "" +"We also provide an example code (:code:`sim.py`) to use the simulation " +"capabilities of Flower to simulate federated XGBoost training on either a" +" single machine or a cluster of machines." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:866 +msgid "" +"After importing all required packages, we define a :code:`main()` " +"function to perform the simulation process:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:921 +msgid "" +"We first load the dataset and perform data partitioning, and the pre-" +"processed data is stored in a :code:`list`. After the simulation begins, " +"the clients won't need to pre-process their partitions again." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:924 +msgid "Then, we define the strategies and other hyper-parameters:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:975 +msgid "" +"After that, we start the simulation by calling " +":code:`fl.simulation.start_simulation`:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:995 +msgid "" +"One of key parameters for :code:`start_simulation` is :code:`client_fn` " +"which returns a function to construct a client. We define it as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1038 +msgid "Arguments parser" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1040 +msgid "" +"In :code:`utils.py`, we define the arguments parsers for clients, server " +"and simulation, allowing users to specify different experimental " +"settings. Let's first see the sever side:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1086 +msgid "" +"This allows user to specify training strategies / the number of total " +"clients / FL rounds / participating clients / clients for evaluation, and" +" evaluation fashion. Note that with :code:`--centralised-eval`, the sever" +" will do centralised evaluation and all functionalities for client " +"evaluation will be disabled." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1090 +msgid "Then, the argument parser on client side:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1144 +msgid "" +"This defines various options for client data partitioning. Besides, " +"clients also have an option to conduct evaluation on centralised test set" +" by setting :code:`--centralised-eval`, as well as an option to perform " +"scaled learning rate based on the number of clients by setting :code" +":`--scaled-lr`." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1148 +msgid "We also have an argument parser for simulation:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1226 +msgid "This integrates all arguments for both client and server sides." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1229 +msgid "Example commands" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1231 +msgid "" +"To run a centralised evaluated experiment with bagging strategy on 5 " +"clients with exponential distribution for 50 rounds, we first start the " +"server as below:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1238 +msgid "Then, on each client terminal, we start the clients:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1244 +msgid "To run the same experiment with Flower simulation:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1250 +msgid "" +"The full `code `_ for this comprehensive example can be found in" +" :code:`examples/xgboost-comprehensive`." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 +msgid "Build a strategy from scratch" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 +msgid "" +"Welcome to the third part of the Flower federated learning tutorial. In " +"previous parts of this tutorial, we introduced federated learning with " +"PyTorch and Flower (`part 1 `__) and we learned how strategies " +"can be used to customize the execution on both the server and the clients" +" (`part 2 `__)." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 +msgid "" +"In this notebook, we'll continue to customize the federated learning " +"system we built previously by creating a custom version of FedAvg (again," +" using `Flower `__ and `PyTorch " +"`__)." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 +msgid "" +"`Star Flower on GitHub `__ ⭐️ and join " +"the Flower community on Slack to connect, ask questions, and get help: " +"`Join Slack `__ 🌼 We'd love to hear from " +"you in the ``#introductions`` channel! And if anything is unclear, head " +"over to the ``#questions`` channel." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:17 +msgid "Let's build a new ``Strategy`` from scratch!" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:29 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:29 +msgid "Preparation" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:31 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:32 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:31 +msgid "" +"Before we begin with the actual code, let's make sure that we have " +"everything we need." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:43 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:44 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:43 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:43 +msgid "Installing dependencies" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:45 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:46 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:45 +msgid "First, we install the necessary packages:" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:65 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:66 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:65 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:65 +msgid "" +"Now that we have all dependencies installed, we can import everything we " +"need for this tutorial:" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:101 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:102 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:101 +msgid "" +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " +"GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:114 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:115 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:114 +msgid "Data loading" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:116 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:116 +msgid "" +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``. We introduce a new parameter" +" ``num_clients`` which allows us to call ``load_datasets`` with different" +" numbers of clients." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:167 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:168 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:167 +msgid "Model training/evaluation" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:169 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:170 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 +msgid "" +"Let's continue with the usual model definition (including " +"``set_parameters`` and ``get_parameters``), training and test functions:" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:258 +msgid "Flower client" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:260 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:260 +msgid "" +"To implement the Flower client, we (again) create a subclass of " +"``flwr.client.NumPyClient`` and implement the three methods " +"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " +"``cid`` to the client and use it log additional details:" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:308 +msgid "Let's test what we have so far before we continue:" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:339 +msgid "Build a Strategy from scratch" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:341 +msgid "" +"Let’s overwrite the ``configure_fit`` method such that it passes a higher" +" learning rate (potentially also other hyperparameters) to the optimizer " +"of a fraction of the clients. We will keep the sampling of the clients as" +" it is in ``FedAvg`` and then change the configuration dictionary (one of" +" the ``FitIns`` attributes)." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:507 +msgid "" +"The only thing left is to use the newly created custom Strategy " +"``FedCustom`` when starting the experiment:" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:534 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:932 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:697 +msgid "Recap" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:536 +msgid "" +"In this notebook, we’ve seen how to implement a custom strategy. A custom" +" strategy enables granular control over client node configuration, result" +" aggregation, and more. To define a custom strategy, you only have to " +"overwrite the abstract methods of the (abstract) base class ``Strategy``." +" To make custom strategies even more powerful, you can pass custom " +"functions to the constructor of your new class (``__init__``) and then " +"call these functions whenever needed." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:550 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:948 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:729 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:715 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 +msgid "" +"Before you continue, make sure to join the Flower community on Slack: " +"`Join Slack `__" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:552 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:950 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:731 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:717 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 +msgid "" +"There's a dedicated ``#questions`` channel if you need help, but we'd " +"also love to hear who you are in ``#introductions``!" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:554 +msgid "" +"The `Flower Federated Learning Tutorial - Part 4 " +"`__ introduces ``Client``, the flexible API underlying " +"``NumPyClient``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 +msgid "Customize the client" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:11 +msgid "" +"Welcome to the fourth part of the Flower federated learning tutorial. In " +"the previous parts of this tutorial, we introduced federated learning " +"with PyTorch and Flower (`part 1 `__), we learned how " +"strategies can be used to customize the execution on both the server and " +"the clients (`part 2 `__), and we built our own " +"custom strategy from scratch (`part 3 `__)." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 +msgid "" +"In this notebook, we revisit ``NumPyClient`` and introduce a new " +"baseclass for building clients, simply named ``Client``. In previous " +"parts of this tutorial, we've based our client on ``NumPyClient``, a " +"convenience class which makes it easy to work with machine learning " +"libraries that have good NumPy interoperability. With ``Client``, we gain" +" a lot of flexibility that we didn't have before, but we'll also have to " +"do a few things the we didn't have to do before." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:18 +msgid "" +"Let's go deeper and see what it takes to move from ``NumPyClient`` to " +"``Client``!" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:30 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:29 +msgid "Step 0: Preparation" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:117 +msgid "" +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:259 +msgid "Step 1: Revisiting NumPyClient" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:261 +msgid "" +"So far, we've implemented our client by subclassing " +"``flwr.client.NumPyClient``. The three methods we implemented are " +"``get_parameters``, ``fit``, and ``evaluate``. Finally, we wrap the " +"creation of instances of this class in a function called ``client_fn``:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:309 +msgid "" +"We've seen this before, there's nothing new so far. The only *tiny* " +"difference compared to the previous notebook is naming, we've changed " +"``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " +"``numpyclient_fn``. Let's run it to see the output we get:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:339 +msgid "" +"This works as expected, two clients are training for three rounds of " +"federated learning." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:341 +msgid "" +"Let's dive a little bit deeper and discuss how Flower executes this " +"simulation. Whenever a client is selected to do some work, " +"``start_simulation`` calls the function ``numpyclient_fn`` to create an " +"instance of our ``FlowerNumPyClient`` (along with loading the model and " +"the data)." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:343 +msgid "" +"But here's the perhaps surprising part: Flower doesn't actually use the " +"``FlowerNumPyClient`` object directly. Instead, it wraps the object to " +"makes it look like a subclass of ``flwr.client.Client``, not " +"``flwr.client.NumPyClient``. In fact, the Flower core framework doesn't " +"know how to handle ``NumPyClient``'s, it only knows how to handle " +"``Client``'s. ``NumPyClient`` is just a convenience abstraction built on " +"top of ``Client``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:345 +msgid "" +"Instead of building on top of ``NumPyClient``, we can directly build on " +"top of ``Client``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:357 +msgid "Step 2: Moving from ``NumPyClient`` to ``Client``" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:359 +msgid "" +"Let's try to do the same thing using ``Client`` instead of " +"``NumPyClient``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:465 +msgid "" +"Before we discuss the code in more detail, let's try to run it! Gotta " +"make sure our new ``Client``-based client works, right?" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:490 +msgid "" +"That's it, we're now using ``Client``. It probably looks similar to what " +"we've done with ``NumPyClient``. So what's the difference?" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:492 +msgid "" +"First of all, it's more code. But why? The difference comes from the fact" +" that ``Client`` expects us to take care of parameter serialization and " +"deserialization. For Flower to be able to send parameters over the " +"network, it eventually needs to turn these parameters into ``bytes``. " +"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " +"serialization. Turning raw bytes into something more useful (like NumPy " +"``ndarray``'s) is called deserialization. Flower needs to do both: it " +"needs to serialize parameters on the server-side and send them to the " +"client, the client needs to deserialize them to use them for local " +"training, and then serialize the updated parameters again to send them " +"back to the server, which (finally!) deserializes them again in order to " +"aggregate them with the updates received from other clients." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:495 +msgid "" +"The only *real* difference between Client and NumPyClient is that " +"NumPyClient takes care of serialization and deserialization for you. It " +"can do so because it expects you to return parameters as NumPy ndarray's," +" and it knows how to handle these. This makes working with machine " +"learning libraries that have good NumPy support (most of them) a breeze." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:497 +msgid "" +"In terms of API, there's one major difference: all methods in Client take" +" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " +"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " +"``NumPyClient`` on the other hand have multiple arguments (e.g., " +"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" +" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " +"``NumPyClient.fit``) if there are multiple things to handle. These " +"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " +"values you're used to from ``NumPyClient``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:510 +msgid "Step 3: Custom serialization" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:512 +msgid "" +"Here we will explore how to implement custom serialization with a simple " +"example." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:514 +msgid "" +"But first what is serialization? Serialization is just the process of " +"converting an object into raw bytes, and equally as important, " +"deserialization is the process of converting raw bytes back into an " +"object. This is very useful for network communication. Indeed, without " +"serialization, you could not just a Python object through the internet." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:516 +msgid "" +"Federated Learning relies heavily on internet communication for training " +"by sending Python objects back and forth between the clients and the " +"server. This means that serialization is an essential part of Federated " +"Learning." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:518 +msgid "" +"In the following section, we will write a basic example where instead of " +"sending a serialized version of our ``ndarray``\\ s containing our " +"parameters, we will first convert the ``ndarray`` into sparse matrices, " +"before sending them. This technique can be used to save bandwidth, as in " +"certain cases where the weights of a model are sparse (containing many 0 " +"entries), converting them to a sparse matrix can greatly improve their " +"bytesize." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:521 +msgid "Our custom serialization/deserialization functions" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:523 +msgid "" +"This is where the real serialization/deserialization will happen, " +"especially in ``ndarray_to_sparse_bytes`` for serialization and " +"``sparse_bytes_to_ndarray`` for deserialization." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:525 +msgid "" +"Note that we imported the ``scipy.sparse`` library in order to convert " +"our arrays." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:613 +msgid "Client-side" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:615 +msgid "" +"To be able to serialize our ``ndarray``\\ s into sparse parameters, we " +"will just have to call our custom functions in our " +"``flwr.client.Client``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:617 +msgid "" +"Indeed, in ``get_parameters`` we need to serialize the parameters we got " +"from our network using our custom ``ndarrays_to_sparse_parameters`` " +"defined above." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:619 +msgid "" +"In ``fit``, we first need to deserialize the parameters coming from the " +"server using our custom ``sparse_parameters_to_ndarrays`` and then we " +"need to serialize our local results with " +"``ndarrays_to_sparse_parameters``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:621 +msgid "" +"In ``evaluate``, we will only need to deserialize the global parameters " +"with our custom function." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:725 +msgid "Server-side" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:727 +msgid "" +"For this example, we will just use ``FedAvg`` as a strategy. To change " +"the serialization and deserialization here, we only need to reimplement " +"the ``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other" +" functions of the strategy will be inherited from the super class " +"``FedAvg``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:729 +msgid "As you can see only one line as change in ``evaluate``:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:735 +msgid "" +"And for ``aggregate_fit``, we will first deserialize every result we " +"received:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:744 +msgid "And then serialize the aggregated result:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:903 +msgid "We can now run our custom serialization example!" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:934 +msgid "" +"In this part of the tutorial, we've seen how we can build clients by " +"subclassing either ``NumPyClient`` or ``Client``. ``NumPyClient`` is a " +"convenience abstraction that makes it easier to work with machine " +"learning libraries that have good NumPy interoperability. ``Client`` is a" +" more flexible abstraction that allows us to do things that are not " +"possible in ``NumPyClient``. In order to do so, it requires us to handle " +"parameter serialization and deserialization ourselves." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:952 +msgid "" +"This is the final part of the Flower tutorial (for now!), " +"congratulations! You're now well equipped to understand the rest of the " +"documentation. There are many topics we didn't cover in the tutorial, we " +"recommend the following resources:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:954 +msgid "`Read Flower Docs `__" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:955 +msgid "" +"`Check out Flower Code Examples " +"`__" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:956 +msgid "" +"`Use Flower Baselines for your research " +"`__" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:957 +msgid "" +"`Watch Flower Summit 2023 videos `__" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:9 +msgid "Get started with Flower" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:11 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:11 +msgid "Welcome to the Flower federated learning tutorial!" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:13 +msgid "" +"In this notebook, we'll build a federated learning system using Flower, " +"`Flower Datasets `__ and PyTorch. In " +"part 1, we use PyTorch for the model training pipeline and data loading. " +"In part 2, we continue to federate the PyTorch-based pipeline using " +"Flower." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:17 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 +msgid "Let's get started!" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:31 +msgid "" +"Before we begin with any actual code, let's make sure that we have " +"everything we need." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:45 +msgid "" +"Next, we install the necessary packages for PyTorch (``torch`` and " +"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower " +"(``flwr``):" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:105 +msgid "" +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware accelerator:" +" GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:118 +msgid "Loading the data" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:120 +msgid "" +"Federated learning can be applied to many different types of tasks across" +" different domains. In this tutorial, we introduce federated learning by " +"training a simple convolutional neural network (CNN) on the popular " +"CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that " +"distinguish between images from ten different classes: 'airplane', " +"'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and " +"'truck'." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:131 +msgid "" +"We simulate having multiple datasets from multiple organizations (also " +"called the \"cross-silo\" setting in federated learning) by splitting the" +" original CIFAR-10 dataset into multiple partitions. Each partition will " +"represent the data from a single organization. We're doing this purely " +"for experimentation purposes, in the real world there's no need for data " +"splitting because each organization already has their own data (so the " +"data is naturally partitioned)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:133 +msgid "" +"Each organization will act as a client in the federated learning system. " +"So having ten organizations participate in a federation means having ten " +"clients connected to the federated learning server." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:144 +msgid "" +"Let's now create the Federated Dataset abstraction that from ``flwr-" +"datasets`` that partitions the CIFAR-10. We will create small training " +"and test set for each edge device and wrap each of them into a PyTorch " +"``DataLoader``:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:198 +msgid "" +"We now have a list of ten training sets and ten validation sets " +"(``trainloaders`` and ``valloaders``) representing the data of ten " +"different organizations. Each ``trainloader``/``valloader`` pair contains" +" 4500 training examples and 500 validation examples. There's also a " +"single ``testloader`` (we did not split the test set). Again, this is " +"only necessary for building research or educational systems, actual " +"federated learning systems have their data naturally distributed across " +"multiple partitions." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:201 +msgid "" +"Let's take a look at the first batch of images and labels in the first " +"training set (i.e., ``trainloaders[0]``) before we move on:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:240 +msgid "" +"The output above shows a random batch of images from the first " +"``trainloader`` in our list of ten ``trainloaders``. It also prints the " +"labels associated with each image (i.e., one of the ten possible labels " +"we've seen above). If you run the cell again, you should see another " +"batch of images." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:252 +msgid "Step 1: Centralized Training with PyTorch" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:263 +msgid "" +"Next, we're going to use PyTorch to define a simple convolutional neural " +"network. This introduction assumes basic familiarity with PyTorch, so it " +"doesn't cover the PyTorch-related aspects in full detail. If you want to " +"dive deeper into PyTorch, we recommend `DEEP LEARNING WITH PYTORCH: A 60 " +"MINUTE BLITZ " +"`__." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:275 +msgid "Defining the model" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:277 +msgid "" +"We use the simple CNN described in the `PyTorch tutorial " +"`__:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:314 +msgid "Let's continue with the usual training and test functions:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:374 +msgid "Training the model" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:376 +msgid "" +"We now have all the basic building blocks we need: a dataset, a model, a " +"training function, and a test function. Let's put them together to train " +"the model on the dataset of one of our organizations " +"(``trainloaders[0]``). This simulates the reality of most machine " +"learning projects today: each organization has their own data and trains " +"models only on this internal data:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:406 +msgid "" +"Training the simple CNN on our CIFAR-10 split for 5 epochs should result " +"in a test set accuracy of about 41%, which is not good, but at the same " +"time, it doesn't really matter for the purposes of this tutorial. The " +"intent was just to show a simplistic centralized training pipeline that " +"sets the stage for what comes next - federated learning!" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:418 +msgid "Step 2: Federated Learning with Flower" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:420 +msgid "" +"Step 1 demonstrated a simple centralized training pipeline. All data was " +"in one place (i.e., a single ``trainloader`` and a single ``valloader``)." +" Next, we'll simulate a situation where we have multiple datasets in " +"multiple organizations and where we train a model over these " +"organizations using federated learning." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:432 +msgid "Updating model parameters" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:434 +msgid "" +"In federated learning, the server sends the global model parameters to " +"the client, and the client updates the local model with the parameters " +"received from the server. It then trains the model on the local data " +"(which changes the model parameters locally) and sends the " +"updated/changed model parameters back to the server (or, alternatively, " +"it sends just the gradients back to the server, not the full model " +"parameters)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:436 +msgid "" +"We need two helper functions to update the local model with parameters " +"received from the server and to get the updated model parameters from the" +" local model: ``set_parameters`` and ``get_parameters``. The following " +"two functions do just that for the PyTorch model above." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:438 +msgid "" +"The details of how this works are not really important here (feel free to" +" consult the PyTorch documentation if you want to learn more). In " +"essence, we use ``state_dict`` to access PyTorch model parameter tensors." +" The parameter tensors are then converted to/from a list of NumPy " +"ndarray's (which Flower knows how to serialize/deserialize):" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:466 +msgid "Implementing a Flower client" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:468 +msgid "" +"With that out of the way, let's move on to the interesting part. " +"Federated learning systems consist of a server and multiple clients. In " +"Flower, we create clients by implementing subclasses of " +"``flwr.client.Client`` or ``flwr.client.NumPyClient``. We use " +"``NumPyClient`` in this tutorial because it is easier to implement and " +"requires us to write less boilerplate." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:470 +msgid "" +"To implement the Flower client, we create a subclass of " +"``flwr.client.NumPyClient`` and implement the three methods " +"``get_parameters``, ``fit``, and ``evaluate``:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:472 +msgid "``get_parameters``: Return the current local model parameters" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:473 +msgid "" +"``fit``: Receive model parameters from the server, train the model " +"parameters on the local data, and return the (updated) model parameters " +"to the server" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:474 +msgid "" +"``evaluate``: Receive model parameters from the server, evaluate the " +"model parameters on the local data, and return the evaluation result to " +"the server" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:476 +msgid "" +"We mentioned that our clients will use the previously defined PyTorch " +"components for model training and evaluation. Let's see a simple Flower " +"client implementation that brings everything together:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:513 +msgid "" +"Our class ``FlowerClient`` defines how local training/evaluation will be " +"performed and allows Flower to call the local training/evaluation through" +" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" +" *single client* in our federated learning system. Federated learning " +"systems have multiple clients (otherwise, there's not much to federate), " +"so each client will be represented by its own instance of " +"``FlowerClient``. If we have, for example, three clients in our workload," +" then we'd have three instances of ``FlowerClient``. Flower calls " +"``FlowerClient.fit`` on the respective instance when the server selects a" +" particular client for training (and ``FlowerClient.evaluate`` for " +"evaluation)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:517 +msgid "Using the Virtual Client Engine" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:519 +msgid "" +"In this notebook, we want to simulate a federated learning system with 10" +" clients on a single machine. This means that the server and all 10 " +"clients will live on a single machine and share resources such as CPU, " +"GPU, and memory. Having 10 clients would mean having 10 instances of " +"``FlowerClient`` in memory. Doing this on a single machine can quickly " +"exhaust the available memory resources, even if only a subset of these " +"clients participates in a single round of federated learning." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:521 +msgid "" +"In addition to the regular capabilities where server and clients run on " +"multiple machines, Flower, therefore, provides special simulation " +"capabilities that create ``FlowerClient`` instances only when they are " +"actually necessary for training or evaluation. To enable the Flower " +"framework to create clients when necessary, we need to implement a " +"function called ``client_fn`` that creates a ``FlowerClient`` instance on" +" demand. Flower calls ``client_fn`` whenever it needs an instance of one " +"particular client to call ``fit`` or ``evaluate`` (those instances are " +"usually discarded after use, so they should not keep any local state). " +"Clients are identified by a client ID, or short ``cid``. The ``cid`` can " +"be used, for example, to load different local data partitions for " +"different clients, as can be seen below:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:556 +msgid "Starting the training" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:558 +msgid "" +"We now have the class ``FlowerClient`` which defines client-side " +"training/evaluation and ``client_fn`` which allows Flower to create " +"``FlowerClient`` instances whenever it needs to call ``fit`` or " +"``evaluate`` on one particular client. The last step is to start the " +"actual simulation using ``flwr.simulation.start_simulation``." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:560 +msgid "" +"The function ``start_simulation`` accepts a number of arguments, amongst " +"them the ``client_fn`` used to create ``FlowerClient`` instances, the " +"number of clients to simulate (``num_clients``), the number of federated " +"learning rounds (``num_rounds``), and the strategy. The strategy " +"encapsulates the federated learning approach/algorithm, for example, " +"*Federated Averaging* (FedAvg)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:562 +msgid "" +"Flower has a number of built-in strategies, but we can also use our own " +"strategy implementations to customize nearly all aspects of the federated" +" learning approach. For this example, we use the built-in ``FedAvg`` " +"implementation and customize it using a few basic parameters. The last " +"step is the actual call to ``start_simulation`` which - you guessed it - " +"starts the simulation:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:608 +msgid "Behind the scenes" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:610 +msgid "So how does this work? How does Flower execute this simulation?" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:612 +#, python-format +msgid "" +"When we call ``start_simulation``, we tell Flower that there are 10 " +"clients (``num_clients=10``). Flower then goes ahead an asks the " +"``FedAvg`` strategy to select clients. ``FedAvg`` knows that it should " +"select 100% of the available clients (``fraction_fit=1.0``), so it goes " +"ahead and selects 10 random clients (i.e., 100% of 10)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:614 +msgid "" +"Flower then asks the selected 10 clients to train the model. When the " +"server receives the model parameter updates from the clients, it hands " +"those updates over to the strategy (*FedAvg*) for aggregation. The " +"strategy aggregates those updates and returns the new global model, which" +" then gets used in the next round of federated learning." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:626 +msgid "Where's the accuracy?" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:628 +msgid "" +"You may have noticed that all metrics except for ``losses_distributed`` " +"are empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:630 +msgid "" +"Flower can automatically aggregate losses returned by individual clients," +" but it cannot do the same for metrics in the generic metrics dictionary " +"(the one with the ``accuracy`` key). Metrics dictionaries can contain " +"very different kinds of metrics and even key/value pairs that are not " +"metrics at all, so the framework does not (and can not) know how to " +"handle these automatically." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:632 +msgid "" +"As users, we need to tell the framework how to handle/aggregate these " +"custom metrics, and we do so by passing metric aggregation functions to " +"the strategy. The strategy will then call these functions whenever it " +"receives fit or evaluate metrics from clients. The two possible functions" +" are ``fit_metrics_aggregation_fn`` and " +"``evaluate_metrics_aggregation_fn``." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:634 +msgid "" +"Let's create a simple weighted averaging function to aggregate the " +"``accuracy`` metric we return from ``evaluate``:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:660 +msgid "" +"The only thing left to do is to tell the strategy to call this function " +"whenever it receives evaluation metric dictionaries from the clients:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:697 +msgid "" +"We now have a full system that performs federated training and federated " +"evaluation. It uses the ``weighted_average`` function to aggregate custom" +" evaluation metrics and calculates a single ``accuracy`` metric across " +"all clients on the server side." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:699 +msgid "" +"The other two categories of metrics (``losses_centralized`` and " +"``metrics_centralized``) are still empty because they only apply when " +"centralized evaluation is being used. Part two of the Flower tutorial " +"will cover centralized evaluation." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:711 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 +msgid "Final remarks" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:713 +msgid "" +"Congratulations, you just trained a convolutional neural network, " +"federated over 10 clients! With that, you understand the basics of " +"federated learning with Flower. The same approach you've seen can be used" +" with other machine learning frameworks (not just PyTorch) and tasks (not" +" just CIFAR-10 images classification), for example NLP with Hugging Face " +"Transformers or speech with SpeechBrain." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:715 +msgid "" +"In the next notebook, we're going to cover some more advanced concepts. " +"Want to customize your strategy? Initialize parameters on the server " +"side? Or evaluate the aggregated model on the server side? We'll cover " +"all this and more in the next tutorial." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:733 +msgid "" +"The `Flower Federated Learning Tutorial - Part 2 " +"`__ goes into more depth about strategies and all " +"the advanced things you can build with them." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:9 +msgid "Use a federated learning strategy" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:11 +msgid "" +"Welcome to the next part of the federated learning tutorial. In previous " +"parts of this tutorial, we introduced federated learning with PyTorch and" +" Flower (`part 1 `__)." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:13 +msgid "" +"In this notebook, we'll begin to customize the federated learning system " +"we built in the introductory notebook (again, using `Flower " +"`__ and `PyTorch `__)." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:17 +msgid "Let's move beyond FedAvg with Flower strategies!" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:309 +msgid "Strategy customization" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:311 +msgid "" +"So far, everything should look familiar if you've worked through the " +"introductory notebook. With that, we're ready to introduce a number of " +"new features." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:323 +msgid "Server-side parameter **initialization**" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:325 +msgid "" +"Flower, by default, initializes the global model by asking one random " +"client for the initial parameters. In many cases, we want more control " +"over parameter initialization though. Flower therefore allows you to " +"directly pass the initial parameters to the Strategy:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:370 +msgid "" +"Passing ``initial_parameters`` to the ``FedAvg`` strategy prevents Flower" +" from asking one of the clients for the initial parameters. If we look " +"closely, we can see that the logs do not show any calls to the " +"``FlowerClient.get_parameters`` method." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:382 +msgid "Starting with a customized strategy" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:384 +msgid "" +"We've seen the function ``start_simulation`` before. It accepts a number " +"of arguments, amongst them the ``client_fn`` used to create " +"``FlowerClient`` instances, the number of clients to simulate " +"``num_clients``, the number of rounds ``num_rounds``, and the strategy." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:386 +msgid "" +"The strategy encapsulates the federated learning approach/algorithm, for " +"example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different " +"strategy this time:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:424 +msgid "Server-side parameter **evaluation**" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:426 +msgid "" +"Flower can evaluate the aggregated model on the server-side or on the " +"client-side. Client-side and server-side evaluation are similar in some " +"ways, but different in others." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:428 +msgid "" +"**Centralized Evaluation** (or *server-side evaluation*) is conceptually " +"simple: it works the same way that evaluation in centralized machine " +"learning does. If there is a server-side dataset that can be used for " +"evaluation purposes, then that's great. We can evaluate the newly " +"aggregated model after each round of training without having to send the " +"model to clients. We're also fortunate in the sense that our entire " +"evaluation dataset is available at all times." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:430 +msgid "" +"**Federated Evaluation** (or *client-side evaluation*) is more complex, " +"but also more powerful: it doesn't require a centralized dataset and " +"allows us to evaluate models over a larger set of data, which often " +"yields more realistic evaluation results. In fact, many scenarios require" +" us to use **Federated Evaluation** if we want to get representative " +"evaluation results at all. But this power comes at a cost: once we start " +"to evaluate on the client side, we should be aware that our evaluation " +"dataset can change over consecutive rounds of learning if those clients " +"are not always available. Moreover, the dataset held by each client can " +"also change over consecutive rounds. This can lead to evaluation results " +"that are not stable, so even if we would not change the model, we'd see " +"our evaluation results fluctuate over consecutive rounds." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:433 +msgid "" +"We've seen how federated evaluation works on the client side (i.e., by " +"implementing the ``evaluate`` method in ``FlowerClient``). Now let's see " +"how we can evaluate aggregated model parameters on the server-side:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:490 +msgid "Sending/receiving arbitrary values to/from clients" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:492 +msgid "" +"In some situations, we want to configure client-side execution (training," +" evaluation) from the server-side. One example for that is the server " +"asking the clients to train for a certain number of local epochs. Flower " +"provides a way to send configuration values from the server to the " +"clients using a dictionary. Let's look at an example where the clients " +"receive values from the server through the ``config`` parameter in " +"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " +"method receives the configuration dictionary through the ``config`` " +"parameter and can then read values from this dictionary. In this example," +" it reads ``server_round`` and ``local_epochs`` and uses those values to " +"improve the logging and configure the number of local training epochs:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:546 +msgid "" +"So how can we send this config dictionary from server to clients? The " +"built-in Flower Strategies provide way to do this, and it works similarly" +" to the way server-side evaluation works. We provide a function to the " +"strategy, and the strategy calls this function for every round of " +"federated learning:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:576 +msgid "" +"Next, we'll just pass this function to the FedAvg strategy before " +"starting the simulation:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:613 +msgid "" +"As we can see, the client logs now include the current round of federated" +" learning (which they read from the ``config`` dictionary). We can also " +"configure local training to run for one epoch during the first and second" +" round of federated learning, and then for two epochs during the third " +"round." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:615 +msgid "" +"Clients can also return arbitrary values to the server. To do so, they " +"return a dictionary from ``fit`` and/or ``evaluate``. We have seen and " +"used this concept throughout this notebook without mentioning it " +"explicitly: our ``FlowerClient`` returns a dictionary containing a custom" +" key/value pair as the third return value in ``evaluate``." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:627 +msgid "Scaling federated learning" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:629 +msgid "" +"As a last step in this notebook, let's see how we can use Flower to " +"experiment with a large number of clients." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:651 +#, python-format +msgid "" +"We now have 1000 partitions, each holding 45 training and 5 validation " +"examples. Given that the number of training examples on each client is " +"quite small, we should probably train the model a bit longer, so we " +"configure the clients to perform 3 local training epochs. We should also " +"adjust the fraction of clients selected for training during each round " +"(we don't want all 1000 clients participating in every round), so we " +"adjust ``fraction_fit`` to ``0.05``, which means that only 5% of " +"available clients (so 50 clients) will be selected for training each " +"round:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:699 +msgid "" +"In this notebook, we've seen how we can gradually enhance our system by " +"customizing the strategy, initializing parameters on the server side, " +"choosing a different strategy, and evaluating models on the server-side. " +"That's quite a bit of flexibility with so little code, right?" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:701 +msgid "" +"In the later sections, we've seen how we can communicate arbitrary values" +" between server and clients to fully customize client-side execution. " +"With that capability, we built a large-scale Federated Learning " +"simulation using the Flower Virtual Client Engine and ran an experiment " +"involving 1000 clients in the same workload - all in a Jupyter Notebook!" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:719 +msgid "" +"The `Flower Federated Learning Tutorial - Part 3 " +"`__ shows how to build a fully custom ``Strategy`` from " +"scratch." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:9 +msgid "What is Federated Learning?" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:13 +msgid "" +"In this tutorial, you will learn what federated learning is, build your " +"first system in Flower, and gradually extend it. If you work through all " +"parts of the tutorial, you will be able to build advanced federated " +"learning systems that approach the current state of the art in the field." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 +msgid "" +"🧑‍🏫 This tutorial starts at zero and expects no familiarity with " +"federated learning. Only a basic understanding of data science and Python" +" programming is assumed." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:17 +msgid "" +"`Star Flower on GitHub `__ ⭐️ and join " +"the open-source Flower community on Slack to connect, ask questions, and " +"get help: `Join Slack `__ 🌼 We'd love to " +"hear from you in the ``#introductions`` channel! And if anything is " +"unclear, head over to the ``#questions`` channel." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 +msgid "Classic machine learning" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 +msgid "" +"Before we begin to discuss federated learning, let us quickly recap how " +"most machine learning works today." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:35 +msgid "" +"In machine learning, we have a model, and we have data. The model could " +"be a neural network (as depicted here), or something else, like classical" +" linear regression." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 +msgid "|191c6b8b5e1d46f99de4872746afa8af|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 +msgid "Model and data" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:47 +msgid "" +"We train the model using the data to perform a useful task. A task could " +"be to detect objects in images, transcribe an audio recording, or play a " +"game like Go." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 +msgid "|21b83f3feb024a049617190555a13549|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 +msgid "Train model using data" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 +msgid "" +"Now, in practice, the training data we work with doesn't originate on the" +" machine we train the model on. It gets created somewhere else." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 +msgid "" +"It originates on a smartphone by the user interacting with an app, a car " +"collecting sensor data, a laptop receiving input via the keyboard, or a " +"smart speaker listening to someone trying to sing a song." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 +msgid "|0dd15b4df7e3422f88aaf74cb401bfa7|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 +msgid "Data on a phone" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:73 +msgid "" +"What's also important to mention, this \"somewhere else\" is usually not " +"just one place, it's many places. It could be several devices all running" +" the same app. But it could also be several organizations, all generating" +" data for the same task." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 +msgid "|60e16f6be7354ca793444e01aa7adf25|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 +msgid "Data is on many devices" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 +msgid "" +"So to use machine learning, or any kind of data analysis, the approach " +"that has been used in the past was to collect all data on a central " +"server. This server can be somewhere in a data center, or somewhere in " +"the cloud." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 +msgid "|a7032acbd65948a8beef8bccbbb9b83a|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 +msgid "Central data collection" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:97 +msgid "" +"Once all the data is collected in one place, we can finally use machine " +"learning algorithms to train our model on the data. This is the machine " +"learning approach that we've basically always relied on." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 +msgid "|dd0e05706e584ee29e07cd39e6af5498|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 +msgid "Central model training" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:130 +msgid "Challenges of classical machine learning" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 +msgid "" +"The classic machine learning approach we've just seen can be used in some" +" cases. Great examples include categorizing holiday photos, or analyzing " +"web traffic. Cases, where all the data is naturally available on a " +"centralized server." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 +msgid "|2a2031018a1c4f81a69ea16df4947bd0|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 +msgid "Centralized possible" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:144 +msgid "" +"But the approach can not be used in many other cases. Cases, where the " +"data is not available on a centralized server, or cases where the data " +"available on one server is not enough to train a good model." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 +msgid "|5e841497933340d3b5c2efbf37e3e6a6|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 +msgid "Centralized impossible" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 +msgid "" +"There are many reasons why the classic centralized machine learning " +"approach does not work for a large number of highly important real-world " +"use cases. Those reasons include:" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 +msgid "" +"**Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD " +"(Brazil), PDPL (Argentina), KVKK (Turkey), POPI (South Africa), FSS " +"(Russia), CDPR (China), PDPB (India), PIPA (Korea), APPI (Japan), PDP " +"(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " +"protect sensitive data from being moved. In fact, those regulations " +"sometimes even prevent single organizations from combining their own " +"users' data for artificial intelligence training because those users live" +" in different parts of the world, and their data is governed by different" +" data protection regulations." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 +msgid "" +"**User preference**: In addition to regulation, there are use cases where" +" users just expect that no data leaves their device, ever. If you type " +"your passwords and credit card info into the digital keyboard of your " +"phone, you don't expect those passwords to end up on the server of the " +"company that developed that keyboard, do you? In fact, that use case was " +"the reason federated learning was invented in the first place." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:161 +msgid "" +"**Data volume**: Some sensors, like cameras, produce such a high data " +"volume that it is neither feasible nor economic to collect all the data " +"(due to, for example, bandwidth or communication efficiency). Think about" +" a national rail service with hundreds of train stations across the " +"country. If each of these train stations is outfitted with a number of " +"security cameras, the volume of raw on-device data they produce requires " +"incredibly powerful and exceedingly expensive infrastructure to process " +"and store. And most of the data isn't even useful." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:164 +msgid "Examples where centralized machine learning does not work include:" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 +msgid "" +"Sensitive healthcare records from multiple hospitals to train cancer " +"detection models" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 +msgid "" +"Financial information from different organizations to detect financial " +"fraud" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 +msgid "Location data from your electric car to make better range prediction" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 +msgid "End-to-end encrypted messages to train better auto-complete models" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 +msgid "" +"The popularity of privacy-enhancing systems like the `Brave " +"`__ browser or the `Signal `__ " +"messenger shows that users care about privacy. In fact, they choose the " +"privacy-enhancing version over other alternatives, if such an alternative" +" exists. But what can we do to apply machine learning and data science to" +" these cases to utilize private data? After all, these are all areas that" +" would benefit significantly from recent advances in AI." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 +msgid "Federated learning" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 +msgid "" +"Federated learning simply reverses this approach. It enables machine " +"learning on distributed data by moving the training to the data, instead " +"of moving the data to the training. Here's the single-sentence " +"explanation:" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 +msgid "Central machine learning: move the data to the computation" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 +msgid "Federated (machine) learning: move the computation to the data" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 +msgid "" +"By doing so, it enables us to use machine learning (and other data " +"science approaches) in areas where it wasn't possible before. We can now " +"train excellent medical AI models by enabling different hospitals to work" +" together. We can solve financial fraud by training AI models on the data" +" of different financial institutions. We can build novel privacy-" +"enhancing applications (such as secure messaging) that have better built-" +"in AI than their non-privacy-enhancing alternatives. And those are just a" +" few of the examples that come to mind. As we deploy federated learning, " +"we discover more and more areas that can suddenly be reinvented because " +"they now have access to vast amounts of previously inaccessible data." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 +msgid "" +"So how does federated learning work, exactly? Let's start with an " +"intuitive explanation." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:199 +msgid "Federated learning in five steps" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:202 +msgid "Step 0: Initialize global model" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:204 +msgid "" +"We start by initializing the model on the server. This is exactly the " +"same in classic centralized learning: we initialize the model parameters," +" either randomly or from a previously saved checkpoint." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 +msgid "|19687aecbc3a485da999b66fe2051005|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 +msgid "Initialize global model" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:217 +msgid "" +"Step 1: Send model to a number of connected organizations/devices (client" +" nodes)" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 +msgid "" +"Next, we send the parameters of the global model to the connected client " +"nodes (think: edge devices like smartphones or servers belonging to " +"organizations). This is to ensure that each participating node starts " +"their local training using the same model parameters. We often use only a" +" few of the connected nodes instead of all nodes. The reason for this is " +"that selecting more and more client nodes has diminishing returns." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 +msgid "|32ef0bbade4d4500b7be97cf62405661|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 +msgid "Send global model" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:232 +msgid "" +"Step 2: Train model locally on the data of each organization/device " +"(client node)" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:234 +msgid "" +"Now that all (selected) client nodes have the latest version of the " +"global model parameters, they start the local training. They use their " +"own local dataset to train their own local model. They don't train the " +"model until full convergence, but they only train for a little while. " +"This could be as little as one epoch on the local data, or even just a " +"few steps (mini-batches)." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 +msgid "|9d57ed324b304a698263f5a983a56a6b|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 +msgid "Train on local data" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:247 +msgid "Step 3: Return model updates back to the server" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:249 +msgid "" +"After local training, each client node has a slightly different version " +"of the model parameters they originally received. The parameters are all " +"different because each client node has different examples in its local " +"dataset. The client nodes then send those model updates back to the " +"server. The model updates they send can either be the full model " +"parameters or just the gradients that were accumulated during local " +"training." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 +msgid "|d41510e6781c4bf18c234c6bfb8d4937|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 +msgid "Send model updates" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:262 +msgid "Step 4: Aggregate model updates into a new global model" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:264 +msgid "" +"The server receives model updates from the selected client nodes. If it " +"selected 100 client nodes, it now has 100 slightly different versions of " +"the original global model, each trained on the local data of one client. " +"But didn't we want to have one model that contains the learnings from the" +" data of all 100 client nodes?" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 +msgid "" +"In order to get one single model, we have to combine all the model " +"updates we received from the client nodes. This process is called " +"*aggregation*, and there are many different ways to do it. The most basic" +" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " +"`__), often abbreviated as *FedAvg*. " +"*FedAvg* takes the 100 model updates and, as the name suggests, averages " +"them. To be more precise, it takes the *weighted average* of the model " +"updates, weighted by the number of examples each client used for " +"training. The weighting is important to make sure that each data example " +"has the same \"influence\" on the resulting global model. If one client " +"has 10 examples, and another client has 100 examples, then - without " +"weighting - each of the 10 examples would influence the global model ten " +"times as much as each of the 100 examples." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 +msgid "|a0198a7ebbfb4b9289e7312711cbc967|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 +msgid "Aggregate model updates" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:280 +msgid "Step 5: Repeat steps 1 to 4 until the model converges" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:282 +msgid "" +"Steps 1 to 4 are what we call a single round of federated learning. The " +"global model parameters get sent to the participating client nodes (step " +"1), the client nodes train on their local data (step 2), they send their " +"updated models to the server (step 3), and the server then aggregates the" +" model updates to get a new version of the global model (step 4)." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:284 +msgid "" +"During a single round, each client node that participates in that " +"iteration only trains for a little while. This means that after the " +"aggregation step (step 4), we have a model that has been trained on all " +"the data of all participating client nodes, but only for a little while. " +"We then have to repeat this training process over and over again to " +"eventually arrive at a fully trained model that performs well across the " +"data of all client nodes." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:289 +msgid "" +"Congratulations, you now understand the basics of federated learning. " +"There's a lot more to discuss, of course, but that was federated learning" +" in a nutshell. In later parts of this tutorial, we will go into more " +"detail. Interesting questions include: How can we select the best client " +"nodes that should participate in the next round? What's the best way to " +"aggregate model updates? How can we handle failing client nodes " +"(stragglers)?" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:294 +msgid "" +"Just like we can train a model on the decentralized data of different " +"client nodes, we can also evaluate the model on that data to receive " +"valuable metrics. This is called federated evaluation, sometimes " +"abbreviated as FE. In fact, federated evaluation is an integral part of " +"most federated learning systems." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 +msgid "Federated analytics" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 +msgid "" +"In many cases, machine learning isn't necessary to derive value from " +"data. Data analysis can yield valuable insights, but again, there's often" +" not enough data to get a clear answer. What's the average age at which " +"people develop a certain type of health condition? Federated analytics " +"enables such queries over multiple client nodes. It is usually used in " +"conjunction with other privacy-enhancing technologies like secure " +"aggregation to prevent the server from seeing the results submitted by " +"individual client nodes." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 +msgid "" +"Differential privacy (DP) is often mentioned in the context of Federated " +"Learning. It is a privacy-preserving method used when analyzing and " +"sharing statistical data, ensuring the privacy of individual " +"participants. DP achieves this by adding statistical noise to the model " +"updates, ensuring any individual participants’ information cannot be " +"distinguished or re-identified. This technique can be considered an " +"optimization that provides a quantifiable privacy protection measure." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:326 +msgid "Flower" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 +msgid "" +"Federated learning, federated evaluation, and federated analytics require" +" infrastructure to move machine learning models back and forth, train and" +" evaluate them on local data, and then aggregate the updated models. " +"Flower provides the infrastructure to do exactly that in an easy, " +"scalable, and secure way. In short, Flower presents a unified approach to" +" federated learning, analytics, and evaluation. It allows the user to " +"federate any workload, any ML framework, and any programming language." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 +msgid "|2c13f726c8c843fc8aae997bf906125b|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 +msgid "" +"Flower federated learning server and client nodes (car, scooter, personal" +" computer, roomba, and phone)" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 +msgid "" +"Congratulations, you just learned the basics of federated learning and " +"how it relates to the classic (centralized) machine learning!" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 +msgid "" +"In the next part of this tutorial, we are going to build a first " +"federated learning system with Flower." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 +msgid "" +"The `Flower Federated Learning Tutorial - Part 1 " +"`__ shows how to build a simple federated learning system " +"with PyTorch and Flower." +msgstr "" + diff --git a/doc/locales/ko/LC_MESSAGES/sphinx.po b/doc/locales/ko/LC_MESSAGES/sphinx.po new file mode 100644 index 000000000000..bcfa940edeac --- /dev/null +++ b/doc/locales/ko/LC_MESSAGES/sphinx.po @@ -0,0 +1,37 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) 2024 Flower Labs GmbH +# This file is distributed under the same license as the Flower package. +# FIRST AUTHOR , 2024. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: Flower main\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2024-05-13 09:48+0200\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language: ko\n" +"Language-Team: ko \n" +"Plural-Forms: nplurals=1; plural=0;\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.14.0\n" + +#: ../../source/_templates/base.html:18 +msgid "About these documents" +msgstr "" + +#: ../../source/_templates/base.html:21 +msgid "Index" +msgstr "" + +#: ../../source/_templates/base.html:24 +msgid "Search" +msgstr "" + +#: ../../source/_templates/base.html:27 +msgid "Copyright" +msgstr "" + diff --git a/doc/source/_templates/sidebar/search.html b/doc/source/_templates/sidebar/search.html new file mode 100644 index 000000000000..11525eb21a6b --- /dev/null +++ b/doc/source/_templates/sidebar/search.html @@ -0,0 +1,6 @@ + + diff --git a/doc/source/conf.py b/doc/source/conf.py index 88cb5c05b1d8..1c53a827dcf5 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -86,7 +86,7 @@ author = "The Flower Authors" # The full version, including alpha/beta/rc tags -release = "1.8.0" +release = "1.9.0" # -- General configuration --------------------------------------------------- @@ -123,6 +123,12 @@ # The full name is still at the top of the page add_module_names = False +# Customizations for the sphinx_copybutton extension +# Omit prompt text when copying code blocks +copybutton_prompt_text = "$ " +# Copy all lines when line continuation character is detected +copybutton_line_continuation_character = "\\" + def find_test_modules(package_path): """Go through the python files and exclude every *_test.py file.""" @@ -162,7 +168,6 @@ def find_test_modules(package_path): # Renamed pages "installation": "how-to-install-flower.html", "configuring-clients.html": "how-to-configure-clients.html", - "quickstart_mxnet": "tutorial-quickstart-mxnet.html", "quickstart_pytorch_lightning": "tutorial-quickstart-pytorch-lightning.html", "quickstart_huggingface": "tutorial-quickstart-huggingface.html", "quickstart_pytorch": "tutorial-quickstart-pytorch.html", @@ -194,7 +199,6 @@ def find_test_modules(package_path): "quickstart-pandas": "tutorial-quickstart-pandas.html", "quickstart-fastai": "tutorial-quickstart-fastai.html", "quickstart-pytorch-lightning": "tutorial-quickstart-pytorch-lightning.html", - "quickstart-mxnet": "tutorial-quickstart-mxnet.html", "quickstart-scikitlearn": "tutorial-quickstart-scikitlearn.html", "quickstart-xgboost": "tutorial-quickstart-xgboost.html", "quickstart-android": "tutorial-quickstart-android.html", @@ -240,6 +244,10 @@ def find_test_modules(package_path): "people": "index.html", "organizations": "index.html", "publications": "index.html", + "quickstart_mxnet": "index.html", + "quickstart-mxnet": "index.html", + "tutorial-quickstart-mxnet": "index.html", + "example-mxnet-walk-through": "index.html", } # -- Options for HTML output ------------------------------------------------- diff --git a/doc/source/contributor-how-to-build-docker-images.rst b/doc/source/contributor-how-to-build-docker-images.rst index 5dead265bee2..b97ee2c434ce 100644 --- a/doc/source/contributor-how-to-build-docker-images.rst +++ b/doc/source/contributor-how-to-build-docker-images.rst @@ -1,8 +1,8 @@ How to build Docker Flower images locally ========================================= -Flower provides pre-made docker images on `Docker Hub `_ -that include all necessary dependencies for running the server. You can also build your own custom +Flower provides pre-made docker images on `Docker Hub `_ +that include all necessary dependencies for running the SuperLink. You can also build your own custom docker images from scratch with a different version of Python or Ubuntu if that is what you need. In this guide, we will explain what images exist and how to build them locally. @@ -20,15 +20,15 @@ Before we can start, we need to meet a few prerequisites in our local developmen :doc:`Run Flower using Docker ` which covers this step in more detail. -Currently, Flower provides two images, a base image and a server image. There will also be a client -image soon. The base image, as the name suggests, contains basic dependencies that both the server -and the client need. This includes system dependencies, Python and Python tools. The server image is -based on the base image, but it additionally installs the Flower server using ``pip``. +Currently, Flower provides two images, a ``base`` image and a ``superlink`` image. The base image, +as the name suggests, contains basic dependencies that the SuperLink needs. +This includes system dependencies, Python and Python tools. The SuperLink image is +based on the base image, but it additionally installs the SuperLink using ``pip``. The build instructions that assemble the images are located in the respective Dockerfiles. You can find them in the subdirectories of ``src/docker``. -Both, base and server image are configured via build arguments. Through build arguments, we can make +Both, base and SuperLink image are configured via build arguments. Through build arguments, we can make our build more flexible. For example, in the base image, we can specify the version of Python to install using the ``PYTHON_VERSION`` build argument. Some of the build arguments have default values, others must be specified when building the image. All available build arguments for each @@ -66,7 +66,7 @@ The following example creates a base image with Python 3.11.0, pip 23.0.1 and se .. code-block:: bash - $ cd src/docker/base/ + $ cd src/docker/base/ubuntu $ docker build \ --build-arg PYTHON_VERSION=3.11.0 \ --build-arg PIP_VERSION=23.0.1 \ @@ -76,8 +76,8 @@ The following example creates a base image with Python 3.11.0, pip 23.0.1 and se The name of image is ``flwr_base`` and the tag ``0.1.0``. Remember that the build arguments as well as the name and tag can be adapted to your needs. These values serve as examples only. -Building the server image -------------------------- +Building the SuperLink image +---------------------------- .. list-table:: :widths: 25 45 15 15 @@ -89,47 +89,54 @@ Building the server image - Example * - ``BASE_REPOSITORY`` - The repository name of the base image. - - Defaults to ``flwr/server``. + - Defaults to ``flwr/base``. - - * - ``BASE_IMAGE_TAG`` - - The image tag of the base image. - - Defaults to ``py3.11-ubuntu22.04``. + * - ``PYTHON_VERSION`` + - The Python version of the base image. + - Defaults to ``py3.11``. + - + * - ``UBUNTU_VERSION`` + - The Ubuntu version of the base image. + - Defaults to ``ubuntu22.04``. + - + * - ``FLWR_PACKAGE`` + - The PyPI package to install. + - Defaults to ``flwr``. - * - ``FLWR_VERSION`` - Version of Flower to be installed. - Yes - - ``1.7.0`` + - ``1.8.0`` + -The following example creates a server image with the official Flower base image py3.11-ubuntu22.04 -and Flower 1.7.0: +The following example creates a SuperLink image with the official Flower base image +py3.11-ubuntu22.04 and Flower 1.8.0: .. code-block:: bash - $ cd src/docker/server/ + $ cd src/docker/superlink/ $ docker build \ - --build-arg BASE_IMAGE_TAG=py3.11-ubuntu22.04 \ - --build-arg FLWR_VERSION=1.7.0 \ - -t flwr_server:0.1.0 . + --build-arg FLWR_VERSION=1.8.0 \ + -t flwr_superlink:0.1.0 . -The name of image is ``flwr_server`` and the tag ``0.1.0``. Remember that the build arguments as well -as the name and tag can be adapted to your needs. These values serve as examples only. +The name of image is ``flwr_superlink`` and the tag ``0.1.0``. Remember that the build arguments as +well as the name and tag can be adapted to your needs. These values serve as examples only. If you want to use your own base image instead of the official Flower base image, all you need to do -is set the ``BASE_REPOSITORY`` and ``BASE_IMAGE_TAG`` build arguments. The value of -``BASE_REPOSITORY`` must match the name of your image and the value of ``BASE_IMAGE_TAG`` must match -the tag of your image. +is set the ``BASE_REPOSITORY``, ``PYTHON_VERSION`` and ``UBUNTU_VERSION`` build arguments. .. code-block:: bash - $ cd src/docker/server/ + $ cd src/docker/superlink/ $ docker build \ --build-arg BASE_REPOSITORY=flwr_base \ - --build-arg BASE_IMAGE_TAG=0.1.0 \ - --build-arg FLWR_VERSION=1.7.0 \ - -t flwr_server:0.1.0 . + --build-arg PYTHON_VERSION=3.11 \ + --build-arg UBUNTU_VERSION=ubuntu22.04 \ + --build-arg FLWR_VERSION=1.8.0 \ + -t flwr_superlink:0.1.0 . After creating the image, we can test whether the image is working: .. code-block:: bash - $ docker run --rm flwr_server:0.1.0 --help + $ docker run --rm flwr_superlink:0.1.0 --help diff --git a/doc/source/contributor-how-to-install-development-versions.rst b/doc/source/contributor-how-to-install-development-versions.rst index 558ec7f8ec46..15e2939ef138 100644 --- a/doc/source/contributor-how-to-install-development-versions.rst +++ b/doc/source/contributor-how-to-install-development-versions.rst @@ -48,13 +48,13 @@ Install ``flwr`` from a specific GitHub branch (``branch-name``): Open Jupyter Notebooks on Google Colab -------------------------------------- -Open the notebook ``doc/source/tutorial-get-started-with-flower-pytorch.ipynb``: +Open the notebook ``doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb``: -- https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial-get-started-with-flower-pytorch.ipynb +- https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb Open a development version of the same notebook from branch `branch-name` by changing ``main`` to ``branch-name`` (right after ``blob``): -- https://colab.research.google.com/github/adap/flower/blob/branch-name/doc/source/tutorial-get-started-with-flower-pytorch.ipynb +- https://colab.research.google.com/github/adap/flower/blob/branch-name/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb Install a `whl` on Google Colab: diff --git a/doc/source/contributor-how-to-set-up-a-virtual-env.rst b/doc/source/contributor-how-to-set-up-a-virtual-env.rst index a3b1e7f9ad12..8b684e24c658 100644 --- a/doc/source/contributor-how-to-set-up-a-virtual-env.rst +++ b/doc/source/contributor-how-to-set-up-a-virtual-env.rst @@ -10,8 +10,13 @@ Python Version Flower requires at least `Python 3.8 `_, but `Python 3.10 `_ or above is recommended. -Virutualenv with Pyenv/Virtualenv ---------------------------------- +.. note:: + Due to a known incompatibility with `ray `_, + we currently recommend utilizing at most `Python 3.11 `_ for + running Flower simulations. + +Virtualenv with Pyenv/Virtualenv +-------------------------------- One of the recommended virtual environment is `pyenv `_/`virtualenv `_. Please see `Flower examples `_ for details. diff --git a/doc/source/contributor-tutorial-contribute-on-github.rst b/doc/source/contributor-tutorial-contribute-on-github.rst index 6da81ce73662..3ba93865fe1c 100644 --- a/doc/source/contributor-tutorial-contribute-on-github.rst +++ b/doc/source/contributor-tutorial-contribute-on-github.rst @@ -190,12 +190,13 @@ Creating and merging a pull request (PR) In this example you can see that the request is to merge the branch ``doc-fixes`` from my forked repository to branch ``main`` from the Flower repository. + The title should be changed to adhere to the :ref:`pr_title_format` guidelines, otherwise it won't be possible to merge the PR. So in this case, + a correct title might be ``docs(framework:skip) Fix typos``. + The input box in the middle is there for you to describe what your PR does and to link it to existing issues. We have placed comments (that won't be rendered once the PR is opened) to guide you through the process. - It is important to follow the instructions described in comments. For instance, in order to not break how our changelog system works, - you should read the information above the ``Changelog entry`` section carefully. - You can also checkout some examples and details in the :ref:`changelogentry` appendix. + It is important to follow the instructions described in comments. At the bottom you will find the button to open the PR. This will notify reviewers that a new PR has been opened and that they should look over it to merge or to request changes. @@ -304,39 +305,11 @@ Open PR - Commit the changes (commit messages are always imperative: "Do something", in this case "Change …") - Push the changes to your fork -- Open a PR (as shown above) +- Open a PR (as shown above) with title ``docs(framework) Update how-to guide title`` - Wait for it to be approved! - Congrats! 🥳 You're now officially a Flower contributor! -How to write a good PR title ----------------------------- - -A well-crafted PR title helps team members quickly understand the purpose and scope of the changes being proposed. Here's a guide to help you write a good GitHub PR title: - -1. Be Clear and Concise: Provide a clear summary of the changes in a concise manner. -1. Use Actionable Verbs: Start with verbs like "Add," "Update," or "Fix" to indicate the purpose. -1. Include Relevant Information: Mention the affected feature or module for context. -1. Keep it Short: Avoid lengthy titles for easy readability. -1. Use Proper Capitalization and Punctuation: Follow grammar rules for clarity. - -Let's start with a few examples for titles that should be avoided because they do not provide meaningful information: - -* Implement Algorithm -* Database -* Add my_new_file.py to codebase -* Improve code in module -* Change SomeModule - -Here are a few positive examples which provide helpful information without repeating how they do it, as that is already visible in the "Files changed" section of the PR: - -* Update docs banner to mention Flower Summit 2023 -* Remove unnecessary XGBoost dependency -* Remove redundant attributes in strategies subclassing FedAvg -* Add CI job to deploy the staging system when the ``main`` branch changes -* Add new amazing library which will be used to improve the simulation engine - - Next steps ---------- @@ -348,71 +321,32 @@ Once you have made your first PR, and want to contribute more, be sure to check Appendix -------- -.. _changelogentry: +.. _pr_title_format: -Changelog entry +PR title format *************** -When opening a new PR, inside its description, there should be a ``Changelog entry`` header. - -Above this header you should see the following comment that explains how to write your changelog entry: - - Inside the following 'Changelog entry' section, - you should put the description of your changes that will be added to the changelog alongside your PR title. - - If the section is completely empty (without any token) or non-existent, - the changelog will just contain the title of the PR for the changelog entry, without any description. - - If the section contains some text other than tokens, it will use it to add a description to the change. - - If the section contains one of the following tokens it will ignore any other text and put the PR under the corresponding section of the changelog: - - is for classifying a PR as a general improvement. - - is to not add the PR to the changelog - - is to add a general baselines change to the PR - - is to add a general examples change to the PR - - is to add a general sdk change to the PR - - is to add a general simulations change to the PR - - Note that only one token should be used. - -Its content must have a specific format. We will break down what each possibility does: - -- If the ``### Changelog entry`` section contains nothing or doesn't exist, the following text will be added to the changelog:: - - - **PR TITLE** ([#PR_NUMBER](https://github.com/adap/flower/pull/PR_NUMBER)) - -- If the ``### Changelog entry`` section contains a description (and no token), the following text will be added to the changelog:: - - - **PR TITLE** ([#PR_NUMBER](https://github.com/adap/flower/pull/PR_NUMBER)) - - DESCRIPTION FROM THE CHANGELOG ENTRY - -- If the ``### Changelog entry`` section contains ````, nothing will change in the changelog. - -- If the ``### Changelog entry`` section contains ````, the following text will be added to the changelog:: - - - **General improvements** ([#PR_NUMBER](https://github.com/adap/flower/pull/PR_NUMBER)) - -- If the ``### Changelog entry`` section contains ````, the following text will be added to the changelog:: +We enforce the following PR title format: - - **General updates to Flower Baselines** ([#PR_NUMBER](https://github.com/adap/flower/pull/PR_NUMBER)) +.. code-block:: -- If the ``### Changelog entry`` section contains ````, the following text will be added to the changelog:: + () - - **General updates to Flower Examples** ([#PR_NUMBER](https://github.com/adap/flower/pull/PR_NUMBER)) +(or ``(:skip) `` to ignore the PR in the changelog) -- If the ``### Changelog entry`` section contains ````, the following text will be added to the changelog:: +Where ```` needs to be in ``{ci, fix, feat, docs, refactor, break}``, ```` +should be in ``{framework, baselines, datasets, examples, or '*' when modifying multiple projects}``, and ```` starts with a capitalised verb in the imperative mood. - - **General updates to Flower SDKs** ([#PR_NUMBER](https://github.com/adap/flower/pull/PR_NUMBER)) +Valid examples: -- If the ``### Changelog entry`` section contains ````, the following text will be added to the changelog:: +- ``feat(framework) Add flwr build CLI command`` +- ``refactor(examples:skip) Improve quickstart-pytorch logging`` +- ``ci(*:skip) Enforce PR title format`` - - **General updates to Flower Simulations** ([#PR_NUMBER](https://github.com/adap/flower/pull/PR_NUMBER)) +Invalid examples: -Note that only one token must be provided, otherwise, only the first action (in the order listed above), will be performed. +- ``feat(framework): Add flwr build CLI command`` (extra ``:``) +- ``feat(skip) Add flwr build CLI command`` (missing ````) +- ``feat(framework) add flwr build CLI command`` (non capitalised verb) +- ``feat(framework) Add flwr build CLI command.`` (dot at the end) +- ``Add flwr build CLI command.`` (missing ``()``) diff --git a/doc/source/contributor-tutorial-get-started-as-a-contributor.rst b/doc/source/contributor-tutorial-get-started-as-a-contributor.rst index 9136fea96bf6..43f9739987ac 100644 --- a/doc/source/contributor-tutorial-get-started-as-a-contributor.rst +++ b/doc/source/contributor-tutorial-get-started-as-a-contributor.rst @@ -102,6 +102,33 @@ Run Linters and Tests $ ./dev/test.sh +Add a pre-commit hook +~~~~~~~~~~~~~~~~~~~~~ + +Developers may integrate a pre-commit hook into their workflow utilizing the `pre-commit `_ library. The pre-commit hook is configured to execute two primary operations: ``./dev/format.sh`` and ``./dev/test.sh`` scripts. + +There are multiple ways developers can use this: + +1. Install the pre-commit hook to your local git directory by simply running: + + :: + + $ pre-commit install + + - Each ``git commit`` will trigger the execution of formatting and linting/test scripts. + - If in a hurry, bypass the hook using ``--no-verify`` with the ``git commit`` command. + :: + + $ git commit --no-verify -m "Add new feature" + +2. For developers who prefer not to install the hook permanently, it is possible to execute a one-time check prior to committing changes by using the following command: + + :: + + $ pre-commit run --all-files + + This executes the formatting and linting checks/tests on all the files without modifying the default behavior of ``git commit``. + Run Github Actions (CI) locally ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/example-mxnet-walk-through.rst b/doc/source/example-mxnet-walk-through.rst deleted file mode 100644 index c215f709ffb2..000000000000 --- a/doc/source/example-mxnet-walk-through.rst +++ /dev/null @@ -1,360 +0,0 @@ -Example: MXNet - Run MXNet Federated -==================================== - -This tutorial will show you how to use Flower to build a federated version of an existing MXNet workload. -We are using MXNet to train a Sequential model on the MNIST dataset. -We will structure the example similar to our `PyTorch - From Centralized To Federated `_ walkthrough. MXNet and PyTorch are very similar and a very good comparison between MXNet and PyTorch is given `here `_. -First, we build a centralized training approach based on the `Handwritten Digit Recognition `_ tutorial. -Then, we build upon the centralized training code to run the training in a federated fashion. - -Before we start setting up our MXNet example, we install the :code:`mxnet` and :code:`flwr` packages: - -.. code-block:: shell - - $ pip install mxnet - $ pip install flwr - - -MNIST Training with MXNet -------------------------- - -We begin with a brief description of the centralized training code based on a :code:`Sequential` model. -If you want a more in-depth explanation of what's going on then have a look at the official `MXNet tutorial `_. - -Let's create a new file called:code:`mxnet_mnist.py` with all the components required for a traditional (centralized) MNIST training. -First, the MXNet package :code:`mxnet` needs to be imported. -You can see that we do not yet import the :code:`flwr` package for federated learning. This will be done later. - -.. code-block:: python - - from __future__ import print_function - from typing import Tuple - import mxnet as mx - from mxnet import gluon - from mxnet.gluon import nn - from mxnet import autograd as ag - import mxnet.ndarray as F - from mxnet import nd - - # Fixing the random seed - mx.random.seed(42) - -The :code:`load_data()` function loads the MNIST training and test sets. - -.. code-block:: python - - def load_data() -> Tuple[mx.io.NDArrayIter, mx.io.NDArrayIter]: - print("Download Dataset") - # Download MNIST data - mnist = mx.test_utils.get_mnist() - batch_size = 100 - train_data = mx.io.NDArrayIter( - mnist["train_data"], mnist["train_label"], batch_size, shuffle=True - ) - val_data = mx.io.NDArrayIter(mnist["test_data"], mnist["test_label"], batch_size) - return train_data, val_data - -As already mentioned, we will use the MNIST dataset for this machine learning workload. The model architecture (a very simple :code:`Sequential` model) is defined in :code:`model()`. - -.. code-block:: python - - def model(): - # Define simple Sequential model - net = nn.Sequential() - net.add(nn.Dense(256, activation="relu")) - net.add(nn.Dense(64, activation="relu")) - net.add(nn.Dense(10)) - net.collect_params().initialize() - return net - -We now need to define the training (function :code:`train()`) which loops over the training set and measures the loss for each batch of training examples. - -.. code-block:: python - - def train( - net: mx.gluon.nn, train_data: mx.io.NDArrayIter, epoch: int, device: mx.context - ) -> None: - trainer = gluon.Trainer(net.collect_params(), "sgd", {"learning_rate": 0.03}) - # Use Accuracy and Cross Entropy Loss as the evaluation metric. - accuracy_metric = mx.metric.Accuracy() - loss_metric = mx.metric.CrossEntropy() - metrics = mx.metric.CompositeEvalMetric() - for child_metric in [accuracy_metric, loss_metric]: - metrics.add(child_metric) - softmax_cross_entropy_loss = gluon.loss.SoftmaxCrossEntropyLoss() - for i in range(epoch): - # Reset the train data iterator. - train_data.reset() - # Calculate number of samples - num_examples = 0 - # Loop over the train data iterator. - for batch in train_data: - # Splits train data into multiple slices along batch_axis - # and copy each slice into a context. - data = gluon.utils.split_and_load( - batch.data[0], ctx_list=device, batch_axis=0 - ) - # Splits train labels into multiple slices along batch_axis - # and copy each slice into a context. - label = gluon.utils.split_and_load( - batch.label[0], ctx_list=device, batch_axis=0 - ) - outputs = [] - # Inside training scope - with ag.record(): - for x, y in zip(data, label): - z = net(x) - # Computes softmax cross entropy loss. - loss = softmax_cross_entropy_loss(z, y) - # Backpropogate the error for one iteration. - loss.backward() - outputs.append(z.softmax()) - num_examples += len(x) - # Updates internal evaluation - metric.update(label, outputs) - # Make one step of parameter update. Trainer needs to know the - # batch size of data to normalize the gradient by 1/batch_size. - trainer.step(batch.data[0].shape[0]) - # Gets the evaluation result. - trainings_metric = metrics.get_name_value() - print("Accuracy & loss at epoch %d: %s" % (i, trainings_metric)) - return trainings_metric, num_examples - -The evaluation of the model is defined in function :code:`test()`. The function loops over all test samples and measures the loss and accuracy of the model based on the test dataset. - -.. code-block:: python - - def test( - net: mx.gluon.nn, val_data: mx.io.NDArrayIter, device: mx.context - ) -> Tuple[float, float]: - # Use Accuracy and Cross Entropy Loss as the evaluation metric. - accuracy_metric = mx.metric.Accuracy() - loss_metric = mx.metric.CrossEntropy() - metrics = mx.metric.CompositeEvalMetric() - for child_metric in [accuracy_metric, loss_metric]: - metrics.add(child_metric) - # Reset the validation data iterator. - val_data.reset() - # Get number of samples for val_dat - num_examples = 0 - # Loop over the validation data iterator. - for batch in val_data: - # Splits validation data into multiple slices along batch_axis - # and copy each slice into a context. - data = gluon.utils.split_and_load(batch.data[0], ctx_list=device, batch_axis=0) - # Splits validation label into multiple slices along batch_axis - # and copy each slice into a context. - label = gluon.utils.split_and_load( - batch.label[0], ctx_list=device, batch_axis=0 - ) - outputs = [] - for x in data: - outputs.append(net(x).softmax()) - num_examples += len(x) - # Updates internal evaluation - metrics.update(label, outputs) - return metrics.get_name_value(), num_examples - -Having defined the data loading, model architecture, training, and evaluation we can put everything together and train our model on MNIST. Note that the GPU/CPU device for the training and testing is defined within the :code:`ctx` (context). - -.. code-block:: python - - def main(): - # Setup context to GPU and if not available to CPU - DEVICE = [mx.gpu() if mx.test_utils.list_gpus() else mx.cpu()] - # Load train and validation data - train_data, val_data = load_data() - # Define sequential model - net = model() - # Start forward propagation to initialize model parameters (optional) - init = nd.random.uniform(shape=(2, 784)) - net(init) - # Start model training based on training set - train(net=net, train_data=train_data, epoch=5, device=DEVICE) - # Evaluate model using loss and accuracy - eval_metric, _ = test(net=net, val_data=val_data, device=DEVICE) - acc = eval_metric[0] - loss = eval_metric[1] - print("Evaluation Loss: ", loss) - print("Evaluation Accuracy: ", acc) - - if __name__ == "__main__": - main() - -You can now run your (centralized) MXNet machine learning workload: - -.. code-block:: python - - python3 mxnet_mnist.py - -So far this should all look fairly familiar if you've used MXNet (or even PyTorch) before. -Let's take the next step and use what we've built to create a simple federated learning system consisting of one server and two clients. - -MXNet meets Flower ------------------- - -So far, it was not easily possible to use MXNet workloads for federated learning because federated learning is not supported in MXNet. Since Flower is fully agnostic towards the underlying machine learning framework, it can be used to federated arbitrary machine learning workloads. This section will show you how Flower can be used to federate our centralized MXNet workload. - -The concept to federate an existing workload is always the same and easy to understand. -We have to start a *server* and then use the code in :code:`mxnet_mnist.py` for the *clients* that are connected to the *server*. -The *server* sends model parameters to the clients. The *clients* run the training and update the parameters. -The updated parameters are sent back to the *server* which averages all received parameter updates. -This describes one round of the federated learning process and we repeat this for multiple rounds. - -Our example consists of one *server* and two *clients*. Let's set up :code:`server.py` first. The *server* needs to import the Flower package :code:`flwr`. -Next, we use the :code:`start_server` function to start a server and tell it to perform three rounds of federated learning. - -.. code-block:: python - - import flwr as fl - - if __name__ == "__main__": - fl.server.start_server(server_address="0.0.0.0:8080", config=fl.server.ServerConfig(num_rounds=3)) - -We can already start the *server*: - -.. code-block:: python - - python3 server.py - -Finally, we will define our *client* logic in :code:`client.py` and build upon the previously defined MXNet training in :code:`mxnet_mnist.py`. -Our *client* needs to import :code:`flwr`, but also :code:`mxnet` to update the parameters on our MXNet model: - -.. code-block:: python - - from typing import Dict, List, Tuple - - import flwr as fl - import numpy as np - import mxnet as mx - from mxnet import nd - - import mxnet_mnist - - -Implementing a Flower *client* basically means implementing a subclass of either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. -Our implementation will be based on :code:`flwr.client.NumPyClient` and we'll call it :code:`MNISTClient`. -:code:`NumPyClient` is slightly easier to implement than :code:`Client` if you use a framework with good NumPy interoperability (like PyTorch or MXNet) because it avoids some of the boilerplate that would otherwise be necessary. -:code:`MNISTClient` needs to implement four methods, two methods for getting/setting model parameters, one method for training the model, and one method for testing the model: - -#. :code:`set_parameters (optional)` - * set the model parameters on the local model that are received from the server - * transform MXNet :code:`NDArray`'s to NumPy :code:`ndarray`'s - * loop over the list of model parameters received as NumPy :code:`ndarray`'s (think list of neural network layers) -#. :code:`get_parameters` - * get the model parameters and return them as a list of NumPy :code:`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects) -#. :code:`fit` - * update the parameters of the local model with the parameters received from the server - * train the model on the local training set - * get the updated local model weights and return them to the server -#. :code:`evaluate` - * update the parameters of the local model with the parameters received from the server - * evaluate the updated model on the local test set - * return the local loss and accuracy to the server - -The challenging part is to transform the MXNet parameters from :code:`NDArray` to :code:`NumPy Arrays` to make it readable for Flower. - -The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make use of the functions :code:`train()` and :code:`test()` previously defined in :code:`mxnet_mnist.py`. -So what we really do here is we tell Flower through our :code:`NumPyClient` subclass which of our already defined functions to call for training and evaluation. -We included type annotations to give you a better understanding of the data types that get passed around. - -.. code-block:: python - - class MNISTClient(fl.client.NumPyClient): - """Flower client implementing MNIST classification using MXNet.""" - - def __init__( - self, - model: mxnet_mnist.model(), - train_data: mx.io.NDArrayIter, - val_data: mx.io.NDArrayIter, - device: mx.context, - ) -> None: - self.model = model - self.train_data = train_data - self.val_data = val_data - self.device = device - - def get_parameters(self, config) -> List[np.ndarray]: - # Return model parameters as a list of NumPy Arrays - param = [] - for val in self.model.collect_params(".*weight").values(): - p = val.data() - # convert parameters from MXNet NDArray to Numpy Array required by Flower Numpy Client - param.append(p.asnumpy()) - return param - - def set_parameters(self, parameters: List[np.ndarray]) -> None: - # Collect model parameters and set new weight values - params = zip(self.model.collect_params(".*weight").keys(), parameters) - for key, value in params: - self.model.collect_params().setattr(key, value) - - def fit( - self, parameters: List[np.ndarray], config: Dict[str, str] - ) -> Tuple[List[np.ndarray], int]: - # Set model parameters, train model, return updated model parameters - self.set_parameters(parameters) - [accuracy, loss], num_examples = mxnet_mnist.train( - self.model, self.train_data, epoch=2, device=self.device - ) - results = {"accuracy": accuracy[1], "loss": loss[1]} - return self.get_parameters(config={}), num_examples, results - - def evaluate( - self, parameters: List[np.ndarray], config: Dict[str, str] - ) -> Tuple[int, float, float]: - # Set model parameters, evaluate model on local test dataset, return result - self.set_parameters(parameters) - [accuracy, loss], num_examples = mxnet_mnist.test( - self.model, self.val_data, device=self.device - ) - print("Evaluation accuracy & loss", accuracy, loss) - return ( - float(loss[1]), - num_examples, - {"accuracy": float(accuracy[1])}, - ) - -Having defined data loading, model architecture, training, and evaluation we can put everything together and train our :code:`Sequential` model on MNIST. - -.. code-block:: python - - def main() -> None: - """Load data, start MNISTClient.""" - - # Setup context to GPU and if not available to CPU - DEVICE = [mx.gpu() if mx.test_utils.list_gpus() else mx.cpu()] - - # Load data - train_data, val_data = mxnet_mnist.load_data() - - # Define model from centralized training - model = mxnet_mnist.model() - - # Make one forward propagation to initialize parameters - init = nd.random.uniform(shape=(2, 784)) - model(init) - - # Start Flower client - client = MNISTClient(model, train_data, val_data, DEVICE) - fl.client.start_numpy_client(server_address="0.0.0.0:8080", client) - - - if __name__ == "__main__": - main() - -And that's it. You can now open two additional terminal windows and run - -.. code-block:: python - - python3 client.py - -in each window (make sure that the server is still running before you do so) and see your MXNet project run federated learning across two clients. Congratulations! - -Next Steps ----------- - -The full source code for this example: `MXNet: From Centralized To Federated (Code) `_. -Our example is of course somewhat over-simplified because both clients load the exact same dataset, which isn't realistic. -You're now prepared to explore this topic further. How about using a CNN or using a different dataset? How about adding more clients? diff --git a/doc/source/how-to-authenticate-supernodes.rst b/doc/source/how-to-authenticate-supernodes.rst new file mode 100644 index 000000000000..472ba64a8310 --- /dev/null +++ b/doc/source/how-to-authenticate-supernodes.rst @@ -0,0 +1,74 @@ +Authenticate SuperNodes +======================= + +Flower has built-in support for authenticated SuperNodes that you can use to verify the identities of each SuperNode connecting to a SuperLink. +Flower node authentication works similar to how GitHub SSH authentication works: + +* SuperLink (server) stores a list of known (client) node public keys +* Using ECDH, both SuperNode and SuperLink independently derive a shared secret +* Shared secret is used to compute the HMAC value of the message sent from SuperNode to SuperLink as a token +* SuperLink verifies the token + +We recommend you to check out the complete `code example `_ demonstrating federated learning with Flower in an authenticated setting. + +.. note:: + This guide covers a preview feature that might change in future versions of Flower. + +.. note:: + For increased security, node authentication can only be used when encrypted connections (SSL/TLS) are enabled. + +Enable node authentication in :code:`SuperLink` +----------------------------------------------- + +To enable node authentication, first you need to configure SSL/TLS connections to secure the SuperLink<>SuperNode communication. You can find the complete guide +`here `_. +After configuring secure connections, you can enable client authentication in a long-running Flower :code:`SuperLink`. +Use the following terminal command to start a Flower :code:`SuperNode` that has both secure connections and node authentication enabled: + +.. code-block:: bash + + flower-superlink + --certificates certificates/ca.crt certificates/server.pem certificates/server.key + --require-client-authentication ./keys/client_public_keys.csv ./keys/server_credentials ./keys/server_credentials.pub + +Let's break down the :code:`--require-client-authentication` flag: + +1. The first argument is a path to a CSV file storing all known node public keys. You need to store all known node public keys that are allowed to participate in a federation in one CSV file (:code:`.csv`). + + A valid CSV file storing known node public keys should list the keys in OpenSSH format, separated by commas and without any comments. For an example, refer to our code sample, which contains a CSV file with two known node public keys. + +2. The second and third arguments are paths to the server's private and public keys. For development purposes, you can generate a private and public key pair using :code:`ssh-keygen -t ecdsa -b 384`. + +.. note:: + In Flower 1.9, there is no support for dynamically removing, editing, or adding known node public keys to the SuperLink. + To change the set of known nodes, you need to shut the server down, edit the CSV file, and start the server again. + Support for dynamically changing the set of known nodes is on the roadmap to be released in Flower 1.10 (ETA: June). + + +Enable node authentication in :code:`SuperNode` +------------------------------------------------- + +Similar to the long-running Flower server (:code:`SuperLink`), you can easily enable node authentication in the long-running Flower client (:code:`SuperNode`). +Use the following terminal command to start an authenticated :code:`SuperNode`: + +.. code-block:: bash + + flower-client-app client:app + --root-certificates certificates/ca.crt + --server 127.0.0.1:9092 + --authentication-keys ./keys/client_credentials ./keys/client_credentials.pub + +The :code:`--authentication-keys` flag expects two arguments: a path to the node's private key file and a path to the node's public key file. For development purposes, you can generate a private and public key pair using :code:`ssh-keygen -t ecdsa -b 384`. + + +Security notice +--------------- + +The system's security relies on the credentials of the SuperLink and each SuperNode. Therefore, it is imperative to safeguard and safely store the credentials to avoid security risks such as Public Key Infrastructure (PKI) impersonation attacks. +The node authentication mechanism also involves human interaction, so please ensure that all of the communication is done in a secure manner, using trusted communication methods. + + +Conclusion +---------- + +You should now have learned how to start a long-running Flower server (:code:`SuperLink`) and client (:code:`SuperNode`) with node authentication enabled. You should also know the significance of the private key and store it safely to minimize security risks. diff --git a/doc/source/how-to-enable-ssl-connections.rst b/doc/source/how-to-enable-ssl-connections.rst index 051dd5711497..5c762fe1169a 100644 --- a/doc/source/how-to-enable-ssl-connections.rst +++ b/doc/source/how-to-enable-ssl-connections.rst @@ -1,14 +1,14 @@ Enable SSL connections ====================== -This guide describes how to a SSL-enabled secure Flower server can be started and -how a Flower client can establish a secure connections to it. +This guide describes how to a SSL-enabled secure Flower server (:code:`SuperLink`) can be started and +how a Flower client (:code:`SuperNode`) can establish a secure connections to it. A complete code example demonstrating a secure connection can be found `here `_. -The code example comes with a README.md file which will explain how to start it. Although it is -already SSL-enabled, it might be less descriptive on how. Stick to this guide for a deeper +The code example comes with a :code:`README.md` file which explains how to start it. Although it is +already SSL-enabled, it might be less descriptive on how it does so. Stick to this guide for a deeper introduction to the topic. @@ -19,7 +19,6 @@ Using SSL-enabled connections requires certificates to be passed to the server a the purpose of this guide we are going to generate self-signed certificates. As this can become quite complex we are going to ask you to run the script in :code:`examples/advanced-tensorflow/certificates/generate.sh` - with the following command sequence: .. code-block:: bash @@ -29,67 +28,44 @@ with the following command sequence: This will generate the certificates in :code:`examples/advanced-tensorflow/.cache/certificates`. -The approach how the SSL certificates are generated in this example can serve as an inspiration and -starting point but should not be taken as complete for production environments. Please refer to other +The approach for generating SSL certificates in the context of this example can serve as an inspiration and +starting point, but it should not be used as a reference for production environments. Please refer to other sources regarding the issue of correctly generating certificates for production environments. +For non-critical prototyping or research projects, it might be sufficient to use the self-signed certificates generated using +the scripts mentioned in this guide. -In case you are a researcher you might be just fine using the self-signed certificates generated using -the scripts which are part of this guide. - - -Server ------- - -We are now going to show how to write a sever which uses the previously generated scripts. -.. code-block:: python +Server (SuperLink) +------------------ - from pathlib import Path - import flwr as fl +Use the following terminal command to start a sever (SuperLink) that uses the previously generated certificates: - # Start server - fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=4), - certificates=( - Path(".cache/certificates/ca.crt").read_bytes(), - Path(".cache/certificates/server.pem").read_bytes(), - Path(".cache/certificates/server.key").read_bytes(), - ) - ) - -When providing certificates, the server expects a tuple of three certificates. :code:`Path` can be used to easily read the contents of those files into byte strings, which is the data type :code:`start_server` expects. +.. code-block:: bash + flower-superlink --certificates certificates/ca.crt certificates/server.pem certificates/server.key -Client ------- +When providing certificates, the server expects a tuple of three certificates paths: CA certificate, server certificate and server private key. -We are now going to show how to write a client which uses the previously generated scripts: -.. code-block:: python +Client (SuperNode) +------------------ - from pathlib import Path - import flwr as fl +Use the following terminal command to start a client (SuperNode) that uses the previously generated certificates: - # Define client somewhere - client = MyFlowerClient() +.. code-block:: bash - # Start client - fl.client.start_client( - "localhost:8080", - client=client.to_client(), - root_certificates=Path(".cache/certificates/ca.crt").read_bytes(), - ) + flower-client-app client:app + --root-certificates certificates/ca.crt + --server 127.0.0.1:9092 -When setting :code:`root_certificates`, the client expects the PEM-encoded root certificates as a byte string. -We are again using :code:`Path` to simplify reading those as byte strings. +When setting :code:`root_certificates`, the client expects a file path to PEM-encoded root certificates. Conclusion ---------- -You should now have learned how to generate self-signed certificates using the given script, start a -SSL-enabled server, and have a client establish a secure connection to it. +You should now have learned how to generate self-signed certificates using the given script, start an +SSL-enabled server and have a client establish a secure connection to it. Additional resources diff --git a/doc/source/how-to-install-flower.rst b/doc/source/how-to-install-flower.rst index aebe5f7316de..964b23125c0b 100644 --- a/doc/source/how-to-install-flower.rst +++ b/doc/source/how-to-install-flower.rst @@ -48,7 +48,7 @@ Verify installation The following command can be used to verify if Flower was successfully installed. If everything worked, it should print the version of Flower to the command line:: python -c "import flwr;print(flwr.__version__)" - 1.5.0 + 1.8.0 Advanced installation options diff --git a/doc/source/how-to-run-flower-using-docker.rst b/doc/source/how-to-run-flower-using-docker.rst index ed034c820142..9b1f16bba610 100644 --- a/doc/source/how-to-run-flower-using-docker.rst +++ b/doc/source/how-to-run-flower-using-docker.rst @@ -2,14 +2,14 @@ Run Flower using Docker ======================= The simplest way to get started with Flower is by using the pre-made Docker images, which you can -find on `Docker Hub `_. +find on `Docker Hub `__. Before you start, make sure that the Docker daemon is running: .. code-block:: bash $ docker -v - Docker version 24.0.7, build afdd53b + Docker version 26.0.0, build 2ae903e If you do not see the version of Docker but instead get an error saying that the command was not found, you will need to install Docker first. You can find installation instruction @@ -21,8 +21,14 @@ was not found, you will need to install Docker first. You can find installation you can follow the `Post-installation steps `_ on the official Docker website. -Flower server -------------- +.. important:: + + To ensure optimal performance and compatibility, the SuperLink, SuperNode and ServerApp image + must have the same version when running together. This guarantees seamless integration and + avoids potential conflicts or issues that may arise from using different versions. + +Flower SuperLink +---------------- Quickstart ~~~~~~~~~~ @@ -31,84 +37,364 @@ If you're looking to try out Flower, you can use the following command: .. code-block:: bash - $ docker run --rm -p 9091:9091 -p 9092:9092 flwr/server:1.7.0-py3.11-ubuntu22.04 \ - --insecure + $ docker run --rm -p 9091:9091 -p 9092:9092 flwr/superlink:1.8.0 --insecure -The command will pull the Docker image with the tag ``1.7.0-py3.11-ubuntu22.04`` from Docker Hub. -The tag contains the information which Flower, Python and Ubuntu is used. In this case, it -uses Flower 1.7.0, Python 3.11 and Ubuntu 22.04. The ``--rm`` flag tells Docker to remove -the container after it exits. +The command pulls the Docker image with the tag ``1.8.0`` from Docker Hub. The tag specifies +the Flower version. In this case, Flower 1.8.0. The ``--rm`` flag tells Docker to remove the +container after it exits. .. note:: - By default, the Flower server keeps state in-memory. When using the Docker flag - ``--rm``, the state is not persisted between container starts. We will show below how to save the - state in a file on your host system. + By default, the Flower SuperLink keeps state in-memory. When using the Docker flag ``--rm``, the + state is not persisted between container starts. We will show below how to save the state in a + file on your host system. The ``-p :`` flag tells Docker to map the ports ``9091``/``9092`` of the host to ``9091``/``9092`` of the container, allowing you to access the Driver API on ``http://localhost:9091`` and the Fleet API on ``http://localhost:9092``. Lastly, any flag that comes after the tag is passed -to the Flower server. Here, we are passing the flag ``--insecure``. +to the Flower SuperLink. Here, we are passing the flag ``--insecure``. .. attention:: - The ``--insecure`` flag enables insecure communication (using HTTP, not HTTPS) and should only be used - for testing purposes. We strongly recommend enabling - `SSL `_ + The ``--insecure`` flag enables insecure communication (using HTTP, not HTTPS) and should only be + used for testing purposes. We strongly recommend enabling + `SSL `__ when deploying to a production environment. -You can use ``--help`` to view all available flags that the server supports: +You can use ``--help`` to view all available flags that the SuperLink supports: .. code-block:: bash - $ docker run --rm flwr/server:1.7.0-py3.11-ubuntu22.04 --help + $ docker run --rm flwr/superlink:1.8.0 --help Mounting a volume to store the state on the host system ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -If you want to persist the state of the server on your host system, all you need to do is specify a -path where you want to save the file on your host system and a name for the database file. In the -example below, we tell Docker via the flag ``-v`` to mount the user's home directory +If you want to persist the state of the SuperLink on your host system, all you need to do is specify +a path where you want to save the file on your host system and a name for the database file. In the +example below, we tell Docker via the flag ``--volume`` to mount the user's home directory (``~/`` on your host) into the ``/app/`` directory of the container. Furthermore, we use the flag ``--database`` to specify the name of the database file. .. code-block:: bash $ docker run --rm \ - -p 9091:9091 -p 9092:9092 -v ~/:/app/ flwr/server:1.7.0-py3.11-ubuntu22.04 \ + -p 9091:9091 -p 9092:9092 --volume ~/:/app/ flwr/superlink:1.8.0 \ --insecure \ --database state.db -As soon as the server starts, the file ``state.db`` is created in the user's home directory on -your host system. If the file already exists, the server tries to restore the state from the file. -To start the server with an empty database, simply remove the ``state.db`` file. +As soon as the SuperLink starts, the file ``state.db`` is created in the user's home directory on +your host system. If the file already exists, the SuperLink tries to restore the state from the +file. To start the SuperLink with an empty database, simply remove the ``state.db`` file. Enabling SSL for secure connections ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -To enable SSL, you will need a CA certificate, a server certificate and a server private key. +To enable SSL, you will need a PEM-encoded root certificate, a PEM-encoded private key and a +PEM-encoded certificate chain. .. note:: For testing purposes, you can generate your own self-signed certificates. The - `Enable SSL connections `_ + `Enable SSL connections `__ page contains a section that will guide you through the process. Assuming all files we need are in the local ``certificates`` directory, we can use the flag -``-v`` to mount the local directory into the ``/app/`` directory of the container. This allows the -server to access the files within the container. Finally, we pass the names of the certificates to -the server with the ``--certificates`` flag. +``--volume`` to mount the local directory into the ``/app/`` directory of the container. This allows +the SuperLink to access the files within the container. Finally, we pass the names of the +certificates to the SuperLink with the ``--certificates`` flag. .. code-block:: bash $ docker run --rm \ - -p 9091:9091 -p 9092:9092 -v ./certificates/:/app/ flwr/server:1.7.0-py3.11-ubuntu22.04 \ + -p 9091:9091 -p 9092:9092 --volume ./certificates/:/app/ flwr/superlink:1.8.0 \ --certificates ca.crt server.pem server.key -Using a different Flower or Python version -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Flower SuperNode +---------------- + +The SuperNode Docker image comes with a pre-installed version of Flower and serves as a base for +building your own SuperNode image. + +.. important:: + + The SuperNode Docker image currently works only with the 1.9.0-nightly release. A stable version + will be available when Flower 1.9.0 (stable) gets released (ETA: May). A SuperNode nightly image + must be paired with the corresponding SuperLink and ServerApp nightly images released on the same + day. To ensure the versions are in sync, using the concrete tag, e.g., ``1.9.0.dev20240501`` + instead of ``nightly`` is recommended. + +We will use the ``quickstart-pytorch`` example, which you can find in +the Flower repository, to illustrate how you can dockerize your ClientApp. + +.. _SuperNode Prerequisites: + +Prerequisites +~~~~~~~~~~~~~ + +Before we can start, we need to meet a few prerequisites in our local development environment. +You can skip the first part if you want to run your ClientApp instead of the ``quickstart-pytorch`` +example. + +#. Clone the Flower repository. + + .. code-block:: bash + + $ git clone --depth=1 https://github.com/adap/flower.git && cd flower/examples/quickstart-pytorch + +#. Verify the Docker daemon is running. + + Please follow the first section on + :doc:`Run Flower using Docker ` + which covers this step in more detail. + + +Creating a SuperNode Dockerfile +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Let's assume the following project layout: + +.. code-block:: bash + + $ tree . + . + ├── client.py # ClientApp code + └── + +First, we need to create a ``requirements.txt`` file in the directory where the ``ClientApp`` code +is located. In the file, we list all the dependencies that the ClientApp requires. + +.. code-block:: + + flwr-datasets[vision]>=0.0.2,<1.0.0 + torch==2.1.1 + torchvision==0.16.1 + tqdm==4.66.3 + +.. important:: + + Note that `flwr `__ is already installed in the ``flwr/supernode`` + base image, so you only need to include other package dependencies in your ``requirements.txt``, + such as ``torch``, ``tensorflow``, etc. + +Next, we create a Dockerfile. If you use the ``quickstart-pytorch`` example, create a new +file called ``Dockerfile.supernode`` in ``examples/quickstart-pytorch``. + +The ``Dockerfile.supernode`` contains the instructions that assemble the SuperNode image. + +.. code-block:: dockerfile + + FROM flwr/supernode:nightly + + WORKDIR /app + + COPY requirements.txt . + RUN python -m pip install -U --no-cache-dir -r requirements.txt && pyenv rehash + + COPY client.py ./ + ENTRYPOINT ["flower-client-app", "client:app"] + +In the first two lines, we instruct Docker to use the SuperNode image tagged ``nightly`` as a base +image and set our working directory to ``/app``. The following instructions will now be +executed in the ``/app`` directory. Next, we install the ClientApp dependencies by copying the +``requirements.txt`` file into the image and run ``pip install``. In the last two lines, +we copy the ``client.py`` module into the image and set the entry point to ``flower-client-app`` with +the argument ``client:app``. The argument is the object reference of the ClientApp +(``:``) that will be run inside the ClientApp. + +Building the SuperNode Docker image +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Next, we build the SuperNode Docker image by running the following command in the directory where +Dockerfile and ClientApp code are located. + +.. code-block:: bash + + $ docker build -f Dockerfile.supernode -t flwr_supernode:0.0.1 . + +We gave the image the name ``flwr_supernode``, and the tag ``0.0.1``. Remember that the here chosen +values only serve as an example. You can change them to your needs. + + +Running the SuperNode Docker image +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Now that we have built the SuperNode image, we can finally run it. + +.. code-block:: bash + + $ docker run --rm flwr_supernode:0.0.1 client:app \ + --insecure \ + --server 192.168.1.100:9092 + +Let's break down each part of this command: + +* ``docker run``: This is the command to run a new Docker container. +* ``--rm``: This option specifies that the container should be automatically removed when it stops. +* ``flwr_supernode:0.0.1``: The name the tag of the Docker image to use. +* ``--insecure``: This option enables insecure communication. + +.. attention:: + + The ``--insecure`` flag enables insecure communication (using HTTP, not HTTPS) and should only be + used for testing purposes. We strongly recommend enabling + `SSL `__ + when deploying to a production environment. + +* | ``--server 192.168.1.100:9092``: This option specifies the address of the SuperLinks Fleet + | API to connect to. Remember to update it with your SuperLink IP. + +.. note:: + + To test running Flower locally, you can create a + `bridge network `__, + use the ``--network`` argument and pass the name of the Docker network to run your SuperNodes. + +Any argument that comes after the tag is passed to the Flower SuperNode binary. +To see all available flags that the SuperNode supports, run: + +.. code-block:: bash + + $ docker run --rm flwr/supernode:nightly --help + +Enabling SSL for secure connections +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To enable SSL, we will need to mount a PEM-encoded root certificate into your SuperNode container. + +Assuming the certificate already exists locally, we can use the flag ``--volume`` to mount the local +certificate into the container's ``/app/`` directory. This allows the SuperNode to access the +certificate within the container. Use the ``--certificates`` flag when starting the container. + +.. code-block:: bash + + $ docker run --rm --volume ./ca.crt:/app/ca.crt flwr_supernode:0.0.1 client:app \ + --server 192.168.1.100:9092 \ + --certificates ca.crt + +Flower ServerApp +---------------- + +The procedure for building and running a ServerApp image is almost identical to the SuperNode image. + +Similar to the SuperNode image, the ServerApp Docker image comes with a pre-installed version of +Flower and serves as a base for building your own ServerApp image. + +We will use the same ``quickstart-pytorch`` example as we do in the Flower SuperNode section. +If you have not already done so, please follow the `SuperNode Prerequisites`_ before proceeding. + + +Creating a ServerApp Dockerfile +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Let's assume the following project layout: + +.. code-block:: bash + + $ tree . + . + ├── server.py # ServerApp code + └── + +First, we need to create a Dockerfile in the directory where the ``ServerApp`` code is located. +If you use the ``quickstart-pytorch`` example, create a new file called ``Dockerfile.serverapp`` in +``examples/quickstart-pytorch``. + +The ``Dockerfile.serverapp`` contains the instructions that assemble the ServerApp image. + +.. code-block:: dockerfile + + FROM flwr/serverapp:1.8.0 + + WORKDIR /app + + COPY server.py ./ + ENTRYPOINT ["flower-server-app", "server:app"] + +In the first two lines, we instruct Docker to use the ServerApp image tagged ``1.8.0`` as a base +image and set our working directory to ``/app``. The following instructions will now be +executed in the ``/app`` directory. In the last two lines, we copy the ``server.py`` module into the +image and set the entry point to ``flower-server-app`` with the argument ``server:app``. +The argument is the object reference of the ServerApp (``:``) that will be run +inside the ServerApp container. + +Building the ServerApp Docker image +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Next, we build the ServerApp Docker image by running the following command in the directory where +Dockerfile and ServerApp code are located. + +.. code-block:: bash + + $ docker build -f Dockerfile.serverapp -t flwr_serverapp:0.0.1 . + +We gave the image the name ``flwr_serverapp``, and the tag ``0.0.1``. Remember that the here chosen +values only serve as an example. You can change them to your needs. + + +Running the ServerApp Docker image +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Now that we have built the ServerApp image, we can finally run it. + +.. code-block:: bash + + $ docker run --rm flwr_serverapp:0.0.1 \ + --insecure \ + --server 192.168.1.100:9091 + +Let's break down each part of this command: + +* ``docker run``: This is the command to run a new Docker container. +* ``--rm``: This option specifies that the container should be automatically removed when it stops. +* ``flwr_serverapp:0.0.1``: The name the tag of the Docker image to use. +* ``--insecure``: This option enables insecure communication. + +.. attention:: + + The ``--insecure`` flag enables insecure communication (using HTTP, not HTTPS) and should only be + used for testing purposes. We strongly recommend enabling + `SSL `__ + when deploying to a production environment. + +* | ``--server 192.168.1.100:9091``: This option specifies the address of the SuperLinks Driver + | API to connect to. Remember to update it with your SuperLink IP. + +.. note:: + To test running Flower locally, you can create a + `bridge network `__, + use the ``--network`` argument and pass the name of the Docker network to run your ServerApps. + +Any argument that comes after the tag is passed to the Flower ServerApp binary. +To see all available flags that the ServerApp supports, run: + +.. code-block:: bash + + $ docker run --rm flwr/serverapp:1.8.0 --help + +Enabling SSL for secure connections +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To enable SSL, we will need to mount a PEM-encoded root certificate into your ServerApp container. + +Assuming the certificate already exists locally, we can use the flag ``--volume`` to mount the local +certificate into the container's ``/app/`` directory. This allows the ServerApp to access the +certificate within the container. Use the ``--certificates`` flag when starting the container. + +.. code-block:: bash + + $ docker run --rm --volume ./ca.crt:/app/ca.crt flwr_serverapp:0.0.1 client:app \ + --server 192.168.1.100:9091 \ + --certificates ca.crt + +Advanced Docker options +----------------------- + +Using a different Flower version +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -If you want to use a different version of Flower or Python, you can do so by changing the tag. -All versions we provide are available on `Docker Hub `_. +If you want to use a different version of Flower, for example Flower nightly, you can do so by +changing the tag. All available versions are on +`Docker Hub `__. Pinning a Docker image to a specific version ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -118,19 +404,19 @@ updates of system dependencies that should not change the functionality of Flowe want to ensure that you always use the same image, you can specify the hash of the image instead of the tag. -The following command returns the current image hash referenced by the ``server:1.7.0-py3.11-ubuntu22.04`` tag: +The following command returns the current image hash referenced by the ``superlink:1.8.0`` tag: .. code-block:: bash - $ docker inspect --format='{{index .RepoDigests 0}}' flwr/server:1.7.0-py3.11-ubuntu22.04 - flwr/server@sha256:c4be5012f9d73e3022e98735a889a463bb2f4f434448ebc19c61379920b1b327 + $ docker inspect --format='{{index .RepoDigests 0}}' flwr/superlink:1.8.0 + flwr/superlink@sha256:1b855d1fa4e344e4d95db99793f2bb35d8c63f6a1decdd736863bfe4bb0fe46c -Next, we can pin the hash when running a new server container: +Next, we can pin the hash when running a new SuperLink container: .. code-block:: bash $ docker run \ - --rm flwr/server@sha256:c4be5012f9d73e3022e98735a889a463bb2f4f434448ebc19c61379920b1b327 \ + --rm flwr/superlink@sha256:1b855d1fa4e344e4d95db99793f2bb35d8c63f6a1decdd736863bfe4bb0fe46c \ --insecure Setting environment variables @@ -141,4 +427,4 @@ To set a variable inside a Docker container, you can use the ``-e = .. code-block:: bash $ docker run -e FLWR_TELEMETRY_ENABLED=0 \ - --rm flwr/server:1.7.0-py3.11-ubuntu22.04 --insecure + --rm flwr/superlink:1.8.0 --insecure diff --git a/doc/source/how-to-upgrade-to-flower-next.rst b/doc/source/how-to-upgrade-to-flower-next.rst new file mode 100644 index 000000000000..8c8f3c3f8fd7 --- /dev/null +++ b/doc/source/how-to-upgrade-to-flower-next.rst @@ -0,0 +1,333 @@ +Upgrade to Flower Next +====================== + +Welcome to the migration guide for updating Flower to Flower Next! Whether you're a seasoned user +or just getting started, this guide will help you smoothly transition your existing setup to take +advantage of the latest features and improvements in Flower Next, starting from version 1.8. + +.. note:: + This guide shows how to reuse pre-``1.8`` Flower code with minimum code changes by + using the *compatibility layer* in Flower Next. In another guide, we will show how + to run Flower Next end-to-end with pure Flower Next APIs. + +Let's dive in! + +.. + Generate link text as literal. Refs: + - https://stackoverflow.com/q/71651598 + - https://github.com/jgm/pandoc/issues/3973#issuecomment-337087394 + +.. |clientapp_link| replace:: ``ClientApp()`` +.. |serverapp_link| replace:: ``ServerApp()`` +.. |startclient_link| replace:: ``start_client()`` +.. |startserver_link| replace:: ``start_server()`` +.. |startsim_link| replace:: ``start_simulation()`` +.. |runsimcli_link| replace:: ``flower-simulation`` +.. |runsim_link| replace:: ``run_simulation()`` +.. |flowernext_superlink_link| replace:: ``flower-superlink`` +.. |flowernext_clientapp_link| replace:: ``flower-client-app`` +.. |flowernext_serverapp_link| replace:: ``flower-server-app`` +.. _clientapp_link: ref-api/flwr.client.ClientApp.html +.. _serverapp_link: ref-api/flwr.server.ServerApp.html +.. _startclient_link: ref-api/flwr.client.start_client.html +.. _startserver_link: ref-api/flwr.server.start_server.html +.. _startsim_link: ref-api/flwr.simulation.start_simulation.html +.. _runsimcli_link: ref-api/flwr.simulation.run_simulation_from_cli.html +.. _runsim_link: ref-api/flwr.simulation.run_simulation.html +.. _flowernext_superlink_link: ref-api-cli.html#flower-superlink +.. _flowernext_clientapp_link: ref-api-cli.html#flower-client-app +.. _flowernext_serverapp_link: ref-api-cli.html#flower-server-app + +Install update +-------------- + +Using pip +~~~~~~~~~ + +Here's how to update an existing installation of Flower to Flower Next with ``pip``: + +.. code-block:: bash + + $ python -m pip install -U flwr + +or if you need Flower Next with simulation: + +.. code-block:: bash + + $ python -m pip install -U flwr[simulation] + + +Ensure you set the following version constraint in your ``requirements.txt`` + +.. code-block:: + + # Without simulation support + flwr>=1.8,<2.0 + + # With simulation support + flwr[simulation]>=1.8, <2.0 + +or ``pyproject.toml``: + +.. code-block:: toml + + # Without simulation support + dependencies = ["flwr>=1.8,2.0"] + + # With simulation support + dependencies = ["flwr[simulation]>=1.8,2.0"] + +Using Poetry +~~~~~~~~~~~~ + +Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before running ``poetry install``). + +Ensure you set the following version constraint in your ``pyproject.toml``: + +.. code-block:: toml + + [tool.poetry.dependencies] + python = "^3.8" + + # Without simulation support + flwr = ">=1.8,<2.0" + + # With simulation support + flwr = { version = ">=1.8,<2.0", extras = ["simulation"] } + +Required changes +---------------- + +In Flower Next, the *infrastructure* and *application layers* have been decoupled. +Instead of starting a client in code via ``start_client()``, you create a |clientapp_link|_ and start it via the command line. +Instead of starting a server in code via ``start_server()``, you create a |serverapp_link|_ and start it via the command line. +The long-running components of server and client are called SuperLink and SuperNode. +The following non-breaking changes that require manual updates and allow you to run your project both in the traditional way and in the Flower Next way: + +|clientapp_link|_ +~~~~~~~~~~~~~~~~~ +- Wrap your existing client with |clientapp_link|_ instead of launching it via + |startclient_link|_. Here's an example: + +.. code-block:: python + :emphasize-lines: 5,11 + + # Flower 1.8 + def client_fn(cid: str): + return flwr.client.FlowerClient().to_client() + + app = flwr.client.ClientApp( + client_fn=client_fn, + ) + + # Flower 1.7 + if __name__ == "__main__": + flwr.client.start_client( + server_address="127.0.0.1:8080", + client=flwr.client.FlowerClient().to_client(), + ) + +|serverapp_link|_ +~~~~~~~~~~~~~~~~~ +- Wrap your existing strategy with |serverapp_link|_ instead of starting the server + via |startserver_link|_. Here's an example: + +.. code-block:: python + :emphasize-lines: 2,9 + + # Flower 1.8 + app = flwr.server.ServerApp( + config=config, + strategy=strategy, + ) + + # Flower 1.7 + if __name__ == "__main__": + flwr.server.start_server( + server_address="0.0.0.0:8080", + config=config, + strategy=strategy, + ) + +Deployment +~~~~~~~~~~ +- Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, in sequence, + |flowernext_clientapp_link|_ (2x) and |flowernext_serverapp_link|_. There is no need to + execute `client.py` and `server.py` as Python scripts. +- Here's an example to start the server without HTTPS (only for prototyping): + +.. code-block:: bash + + # Start a Superlink + $ flower-superlink --insecure + + # In a new terminal window, start a long-running SuperNode + $ flower-client-app client:app --insecure + + # In another terminal window, start another long-running SuperNode (at least 2 SuperNodes are required) + $ flower-client-app client:app --insecure + + # In yet another terminal window, run the ServerApp (this starts the actual training run) + $ flower-server-app server:app --insecure + +- Here's another example to start with HTTPS. Use the ``--certificates`` command line + argument to pass paths to (CA certificate, server certificate, and server private key). + +.. code-block:: bash + + # Start a secure Superlink + $ flower-superlink --certificates \ + \ + \ + + + # In a new terminal window, start a long-running secure SuperNode + $ flower-client-app client:app \ + --root-certificates \ + --server 127.0.0.1:9092 + + # In another terminal window, start another long-running secure SuperNode (at least 2 SuperNodes are required) + $ flower-client-app client:app \ + --root-certificates \ + --server 127.0.0.1:9092 + + # In yet another terminal window, run the ServerApp (this starts the actual training run) + $ flower-server-app server:app \ + --root-certificates \ + --server 127.0.0.1:9091 + +Simulation in CLI +~~~~~~~~~~~~~~~~~ +- Wrap your existing client and strategy with |clientapp_link|_ and |serverapp_link|_, + respectively. There is no need to use |startsim_link|_ anymore. Here's an example: + +.. code-block:: python + :emphasize-lines: 9,13,20 + + # Regular Flower client implementation + class FlowerClient(NumPyClient): + # ... + + # Flower 1.8 + def client_fn(cid: str): + return FlowerClient().to_client() + + client_app = flwr.client.ClientApp( + client_fn=client_fn, + ) + + server_app = flwr.server.ServerApp( + config=config, + strategy=strategy, + ) + + # Flower 1.7 + if __name__ == "__main__": + hist = flwr.simulation.start_simulation( + num_clients=100, + ... + ) + +- Run |runsimcli_link|_ in CLI and point to the ``server_app`` / ``client_app`` object in the + code instead of executing the Python script. Here's an example (assuming the + ``server_app`` and ``client_app`` objects are in a ``sim.py`` module): + +.. code-block:: bash + + # Flower 1.8 + $ flower-simulation \ + --server-app=sim:server_app \ + --client-app=sim:client_app \ + --num-supernodes=100 + +.. code-block:: bash + + # Flower 1.7 + $ python sim.py + +- Set default resources for each |clientapp_link|_ using the ``--backend-config`` command + line argument instead of setting the ``client_resources`` argument in + |startsim_link|_. Here's an example: + +.. code-block:: bash + :emphasize-lines: 6 + + # Flower 1.8 + $ flower-simulation \ + --client-app=sim:client_app \ + --server-app=sim:server_app \ + --num-supernodes=100 \ + --backend-config='{"client_resources": {"num_cpus": 2, "num_gpus": 0.25}}' + +.. code-block:: python + :emphasize-lines: 5 + + # Flower 1.7 (in `sim.py`) + if __name__ == "__main__": + hist = flwr.simulation.start_simulation( + num_clients=100, + client_resources = {'num_cpus': 2, "num_gpus": 0.25}, + ... + ) + +Simulation in a Notebook +~~~~~~~~~~~~~~~~~~~~~~~~ +- Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's an example: + +.. code-block:: python + :emphasize-lines: 19,27 + + NUM_CLIENTS = + + def client_fn(cid: str): + # ... + return FlowerClient().to_client() + + client_app = flwr.client.ClientApp( + client_fn=client_fn, + ) + + server_app = flwr.server.ServerApp( + config=config, + strategy=strategy, + ) + + backend_config = {"client_resources": {"num_cpus": 2, "num_gpus": 0.25}} + + # Flower 1.8 + flwr.simulation.run_simulation( + server_app=server_app, + client_app=client_app, + num_supernodes=NUM_CLIENTS, + backend_config=backend_config, + ) + + # Flower 1.7 + flwr.simulation.start_simulation( + client_fn=client_fn, + num_clients=NUM_CLIENTS, + config=config, + strategy=strategy, + client_resources=backend_config["client_resources"], + ) + + +Further help +------------ + +Some official `Flower code examples `_ are already +updated to Flower Next so they can serve as a reference for using the Flower Next API. If there are +further questions, `join the Flower Slack `_ and use the channel ``#questions``. +You can also `participate in Flower Discuss `_ where you can find us +answering questions, or share and learn from others about migrating to Flower Next. + +.. admonition:: Important + :class: important + + As we continuously enhance Flower Next at a rapid pace, we'll be periodically + updating this guide. Please feel free to share any feedback with us! + +.. + [TODO] Add links to Flower Next 101 and Flower Glossary + +Happy migrating! 🚀 diff --git a/doc/source/index.rst b/doc/source/index.rst index c634ce939e73..df41d9d4ccb0 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -53,13 +53,12 @@ A learning-oriented series of federated learning tutorials, the best place to st tutorial-quickstart-pandas tutorial-quickstart-fastai tutorial-quickstart-pytorch-lightning - tutorial-quickstart-mxnet tutorial-quickstart-scikitlearn tutorial-quickstart-xgboost tutorial-quickstart-android tutorial-quickstart-ios -QUICKSTART TUTORIALS: :doc:`PyTorch ` | :doc:`TensorFlow ` | :doc:`🤗 Transformers ` | :doc:`JAX ` | :doc:`Pandas ` | :doc:`fastai ` | :doc:`PyTorch Lightning ` | :doc:`MXNet ` | :doc:`scikit-learn ` | :doc:`XGBoost ` | :doc:`Android ` | :doc:`iOS ` +QUICKSTART TUTORIALS: :doc:`PyTorch ` | :doc:`TensorFlow ` | :doc:`🤗 Transformers ` | :doc:`JAX ` | :doc:`Pandas ` | :doc:`fastai ` | :doc:`PyTorch Lightning ` | :doc:`scikit-learn ` | :doc:`XGBoost ` | :doc:`Android ` | :doc:`iOS ` We also made video tutorials for PyTorch: @@ -90,17 +89,18 @@ Problem-oriented how-to guides show step-by-step how to achieve a specific goal. how-to-monitor-simulation how-to-configure-logging how-to-enable-ssl-connections - how-to-upgrade-to-flower-1.0 how-to-use-built-in-mods - how-to-run-flower-using-docker how-to-use-differential-privacy + how-to-authenticate-supernodes + how-to-run-flower-using-docker + how-to-upgrade-to-flower-1.0 + how-to-upgrade-to-flower-next .. toctree:: :maxdepth: 1 :caption: Legacy example guides example-pytorch-from-centralized-to-federated - example-mxnet-walk-through example-jax-from-centralized-to-federated example-fedbn-pytorch-from-centralized-to-federated diff --git a/doc/source/ref-api-cli.rst b/doc/source/ref-api-cli.rst index 63579143755d..296c2219a065 100644 --- a/doc/source/ref-api-cli.rst +++ b/doc/source/ref-api-cli.rst @@ -1,43 +1,33 @@ Flower CLI reference ==================== -.. _flower-superlink-apiref: - -flower-superlink -~~~~~~~~~~~~~~~~ - -.. argparse:: - :module: flwr.server.app - :func: _parse_args_run_superlink - :prog: flower-superlink +.. _flower-simulation-apiref: -.. _flower-driver-api-apiref: - -flower-driver-api +flower-simulation ~~~~~~~~~~~~~~~~~ .. argparse:: - :module: flwr.server.app - :func: _parse_args_run_driver_api - :prog: flower-driver-api + :module: flwr.simulation.run_simulation + :func: _parse_args_run_simulation + :prog: flower-simulation -.. _flower-fleet-api-apiref: +.. _flower-superlink-apiref: -flower-fleet-api +flower-superlink ~~~~~~~~~~~~~~~~ .. argparse:: :module: flwr.server.app - :func: _parse_args_run_fleet_api - :prog: flower-fleet-api + :func: _parse_args_run_superlink + :prog: flower-superlink -.. _flower-client-app-apiref: +.. _flower-driver-api-apiref: flower-client-app ~~~~~~~~~~~~~~~~~ .. argparse:: - :module: flwr.client.app + :module: flwr.client.supernode.app :func: _parse_args_run_client_app :prog: flower-client-app diff --git a/doc/source/ref-changelog.md b/doc/source/ref-changelog.md index 1a6524d29353..c742b8cd9cbe 100644 --- a/doc/source/ref-changelog.md +++ b/doc/source/ref-changelog.md @@ -6,6 +6,72 @@ ### Incompatible changes +None + +## v1.8.0 (2024-04-03) + +### Thanks to our contributors + +We would like to give our special thanks to all the contributors who made the new version of Flower possible (in `git shortlog` order): + +`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear Ashimine`, `Jack Cook`, `Javier`, `Raj Parekh`, `Robert Steiner`, `Sebastian van der Voort`, `Taner Topal`, `Yan Gao`, `mohammadnaseri`, `tabdar-khan` + +### What's new? + +- **Introduce Flower Next high-level API (stable)** ([#3002](https://github.com/adap/flower/pull/3002), [#2934](https://github.com/adap/flower/pull/2934), [#2958](https://github.com/adap/flower/pull/2958), [#3173](https://github.com/adap/flower/pull/3173), [#3174](https://github.com/adap/flower/pull/3174), [#2923](https://github.com/adap/flower/pull/2923), [#2691](https://github.com/adap/flower/pull/2691), [#3079](https://github.com/adap/flower/pull/3079), [#2961](https://github.com/adap/flower/pull/2961), [#2924](https://github.com/adap/flower/pull/2924), [#3166](https://github.com/adap/flower/pull/3166), [#3031](https://github.com/adap/flower/pull/3031), [#3057](https://github.com/adap/flower/pull/3057), [#3000](https://github.com/adap/flower/pull/3000), [#3113](https://github.com/adap/flower/pull/3113), [#2957](https://github.com/adap/flower/pull/2957), [#3183](https://github.com/adap/flower/pull/3183), [#3180](https://github.com/adap/flower/pull/3180), [#3035](https://github.com/adap/flower/pull/3035), [#3189](https://github.com/adap/flower/pull/3189), [#3185](https://github.com/adap/flower/pull/3185), [#3190](https://github.com/adap/flower/pull/3190), [#3191](https://github.com/adap/flower/pull/3191), [#3195](https://github.com/adap/flower/pull/3195), [#3197](https://github.com/adap/flower/pull/3197)) + + The Flower Next high-level API is stable! Flower Next is the future of Flower - all new features (like Flower Mods) will be built on top of it. You can start to migrate your existing projects to Flower Next by using `ServerApp` and `ClientApp` (check out `quickstart-pytorch` or `quickstart-tensorflow`, a detailed migration guide will follow shortly). Flower Next allows you to run multiple projects concurrently (we call this multi-run) and execute the same project in either simulation environments or deployment environments without having to change a single line of code. The best part? It's fully compatible with existing Flower projects that use `Strategy`, `NumPyClient` & co. + +- **Introduce Flower Next low-level API (preview)** ([#3062](https://github.com/adap/flower/pull/3062), [#3034](https://github.com/adap/flower/pull/3034), [#3069](https://github.com/adap/flower/pull/3069)) + + In addition to the Flower Next *high-level* API that uses `Strategy`, `NumPyClient` & co, Flower 1.8 also comes with a preview version of the new Flower Next *low-level* API. The low-level API allows for granular control of every aspect of the learning process by sending/receiving individual messages to/from client nodes. The new `ServerApp` supports registering a custom `main` function that allows writing custom training loops for methods like async FL, cyclic training, or federated analytics. The new `ClientApp` supports registering `train`, `evaluate` and `query` functions that can access the raw message received from the `ServerApp`. New abstractions like `RecordSet`, `Message` and `Context` further enable sending multiple models, multiple sets of config values and metrics, stateful computations on the client node and implementations of custom SMPC protocols, to name just a few. + +- **Introduce Flower Mods (preview)** ([#3054](https://github.com/adap/flower/pull/3054), [#2911](https://github.com/adap/flower/pull/2911), [#3083](https://github.com/adap/flower/pull/3083)) + + Flower Modifiers (we call them Mods) can intercept messages and analyze, edit or handle them directly. Mods can be used to develop pluggable modules that work across different projects. Flower 1.8 already includes mods to log the size of a message, the number of parameters sent over the network, differential privacy with fixed clipping and adaptive clipping, local differential privacy and secure aggregation protocols SecAgg and SecAgg+. The Flower Mods API is released as a preview, but researchers can already use it to experiment with arbirtrary SMPC protocols. + +- **Fine-tune LLMs with LLM FlowerTune** ([#3029](https://github.com/adap/flower/pull/3029), [#3089](https://github.com/adap/flower/pull/3089), [#3092](https://github.com/adap/flower/pull/3092), [#3100](https://github.com/adap/flower/pull/3100), [#3114](https://github.com/adap/flower/pull/3114), [#3162](https://github.com/adap/flower/pull/3162), [#3172](https://github.com/adap/flower/pull/3172)) + + We are introducing LLM FlowerTune, an introductory example that demonstrates federated LLM fine-tuning of pre-trained Llama2 models on the Alpaca-GPT4 dataset. The example is built to be easily adapted to use different models and/or datasets. Read our blog post [LLM FlowerTune: Federated LLM Fine-tuning with Flower](https://flower.ai/blog/2024-03-14-llm-flowertune-federated-llm-finetuning-with-flower/) for more details. + +- **Introduce built-in Differential Privacy (preview)** ([#2798](https://github.com/adap/flower/pull/2798), [#2959](https://github.com/adap/flower/pull/2959), [#3038](https://github.com/adap/flower/pull/3038), [#3147](https://github.com/adap/flower/pull/3147), [#2909](https://github.com/adap/flower/pull/2909), [#2893](https://github.com/adap/flower/pull/2893), [#2892](https://github.com/adap/flower/pull/2892), [#3039](https://github.com/adap/flower/pull/3039), [#3074](https://github.com/adap/flower/pull/3074)) + + Built-in Differential Privacy is here! Flower supports both central and local differential privacy (DP). Central DP can be configured with either fixed or adaptive clipping. The clipping can happen either on the server-side or the client-side. Local DP does both clipping and noising on the client-side. A new documentation page [explains Differential Privacy approaches](https://flower.ai/docs/framework/explanation-differential-privacy.html) and a new how-to guide describes [how to use the new Differential Privacy components](https://flower.ai/docs/framework/how-to-use-differential-privacy.html) in Flower. + +- **Introduce built-in Secure Aggregation (preview)** ([#3120](https://github.com/adap/flower/pull/3120), [#3110](https://github.com/adap/flower/pull/3110), [#3108](https://github.com/adap/flower/pull/3108)) + + Built-in Secure Aggregation is here! Flower now supports different secure aggregation protocols out-of-the-box. The best part? You can add secure aggregation to your Flower projects with only a few lines of code. In this initial release, we inlcude support for SecAgg and SecAgg+, but more protocols will be implemented shortly. We'll also add detailed docs that explain secure aggregation and how to use it in Flower. You can already check out the new code example that shows how to use Flower to easily combine Federated Learning, Differential Privacy and Secure Aggregation in the same project. + +- **Introduce** `flwr` **CLI (preview)** ([#2942](https://github.com/adap/flower/pull/2942), [#3055](https://github.com/adap/flower/pull/3055), [#3111](https://github.com/adap/flower/pull/3111), [#3130](https://github.com/adap/flower/pull/3130), [#3136](https://github.com/adap/flower/pull/3136), [#3094](https://github.com/adap/flower/pull/3094), [#3059](https://github.com/adap/flower/pull/3059), [#3049](https://github.com/adap/flower/pull/3049), [#3142](https://github.com/adap/flower/pull/3142)) + + A new `flwr` CLI command allows creating new Flower projects (`flwr new`) and then running them using the Simulation Engine (`flwr run`). + +- **Introduce Flower Next Simulation Engine** ([#3024](https://github.com/adap/flower/pull/3024), [#3061](https://github.com/adap/flower/pull/3061), [#2997](https://github.com/adap/flower/pull/2997), [#2783](https://github.com/adap/flower/pull/2783), [#3184](https://github.com/adap/flower/pull/3184), [#3075](https://github.com/adap/flower/pull/3075), [#3047](https://github.com/adap/flower/pull/3047), [#2998](https://github.com/adap/flower/pull/2998), [#3009](https://github.com/adap/flower/pull/3009), [#3008](https://github.com/adap/flower/pull/3008)) + + The Flower Simulation Engine can now run Flower Next projects. For notebook environments, there's also a new `run_simulation` function that can run `ServerApp` and `ClientApp`. + +- **Handle SuperNode connection errors** ([#2969](https://github.com/adap/flower/pull/2969)) + + A SuperNode will now try to reconnect indefinitely to the SuperLink in case of connection errors. The arguments `--max-retries` and `--max-wait-time` can now be passed to the `flower-client-app` command. `--max-retries` will define the number of tentatives the client should make before it gives up trying to reconnect to the SuperLink, and, `--max-wait-time` defines the time before the SuperNode gives up trying to reconnect to the SuperLink. + +- **General updates to Flower Baselines** ([#2904](https://github.com/adap/flower/pull/2904), [#2482](https://github.com/adap/flower/pull/2482), [#2985](https://github.com/adap/flower/pull/2985), [#2968](https://github.com/adap/flower/pull/2968)) + + There's a new [FedStar](https://flower.ai/docs/baselines/fedstar.html) baseline. Several other baselined have been updated as well. + +- **Improve documentation and translations** ([#3050](https://github.com/adap/flower/pull/3050), [#3044](https://github.com/adap/flower/pull/3044), [#3043](https://github.com/adap/flower/pull/3043), [#2986](https://github.com/adap/flower/pull/2986), [#3041](https://github.com/adap/flower/pull/3041), [#3046](https://github.com/adap/flower/pull/3046), [#3042](https://github.com/adap/flower/pull/3042), [#2978](https://github.com/adap/flower/pull/2978), [#2952](https://github.com/adap/flower/pull/2952), [#3167](https://github.com/adap/flower/pull/3167), [#2953](https://github.com/adap/flower/pull/2953), [#3045](https://github.com/adap/flower/pull/3045), [#2654](https://github.com/adap/flower/pull/2654), [#3082](https://github.com/adap/flower/pull/3082), [#2990](https://github.com/adap/flower/pull/2990), [#2989](https://github.com/adap/flower/pull/2989)) + + As usual, we merged many smaller and larger improvements to the documentation. A special thank you goes to [Sebastian van der Voort](https://github.com/svdvoort) for landing a big documentation PR! + +- **General updates to Flower Examples** ([3134](https://github.com/adap/flower/pull/3134), [2996](https://github.com/adap/flower/pull/2996), [2930](https://github.com/adap/flower/pull/2930), [2967](https://github.com/adap/flower/pull/2967), [2467](https://github.com/adap/flower/pull/2467), [2910](https://github.com/adap/flower/pull/2910), [#2918](https://github.com/adap/flower/pull/2918), [#2773](https://github.com/adap/flower/pull/2773), [#3063](https://github.com/adap/flower/pull/3063), [#3116](https://github.com/adap/flower/pull/3116), [#3117](https://github.com/adap/flower/pull/3117)) + + Two new examples show federated training of a Vision Transformer (ViT) and federated learning in a medical context using the popular MONAI library. `quickstart-pytorch` and `quickstart-tensorflow` demonstrate the new Flower Next `ServerApp` and `ClientApp`. Many other examples received considerable updates as well. + +- **General improvements** ([#3171](https://github.com/adap/flower/pull/3171), [3099](https://github.com/adap/flower/pull/3099), [3003](https://github.com/adap/flower/pull/3003), [3145](https://github.com/adap/flower/pull/3145), [3017](https://github.com/adap/flower/pull/3017), [3085](https://github.com/adap/flower/pull/3085), [3012](https://github.com/adap/flower/pull/3012), [3119](https://github.com/adap/flower/pull/3119), [2991](https://github.com/adap/flower/pull/2991), [2970](https://github.com/adap/flower/pull/2970), [2980](https://github.com/adap/flower/pull/2980), [3086](https://github.com/adap/flower/pull/3086), [2932](https://github.com/adap/flower/pull/2932), [2928](https://github.com/adap/flower/pull/2928), [2941](https://github.com/adap/flower/pull/2941), [2933](https://github.com/adap/flower/pull/2933), [3181](https://github.com/adap/flower/pull/3181), [2973](https://github.com/adap/flower/pull/2973), [2992](https://github.com/adap/flower/pull/2992), [2915](https://github.com/adap/flower/pull/2915), [3040](https://github.com/adap/flower/pull/3040), [3022](https://github.com/adap/flower/pull/3022), [3032](https://github.com/adap/flower/pull/3032), [2902](https://github.com/adap/flower/pull/2902), [2931](https://github.com/adap/flower/pull/2931), [3005](https://github.com/adap/flower/pull/3005), [3132](https://github.com/adap/flower/pull/3132), [3115](https://github.com/adap/flower/pull/3115), [2944](https://github.com/adap/flower/pull/2944), [3064](https://github.com/adap/flower/pull/3064), [3106](https://github.com/adap/flower/pull/3106), [2974](https://github.com/adap/flower/pull/2974), [3178](https://github.com/adap/flower/pull/3178), [2993](https://github.com/adap/flower/pull/2993), [3186](https://github.com/adap/flower/pull/3186), [3091](https://github.com/adap/flower/pull/3091), [3125](https://github.com/adap/flower/pull/3125), [3093](https://github.com/adap/flower/pull/3093), [3013](https://github.com/adap/flower/pull/3013), [3033](https://github.com/adap/flower/pull/3033), [3133](https://github.com/adap/flower/pull/3133), [3068](https://github.com/adap/flower/pull/3068), [2916](https://github.com/adap/flower/pull/2916), [2975](https://github.com/adap/flower/pull/2975), [2984](https://github.com/adap/flower/pull/2984), [2846](https://github.com/adap/flower/pull/2846), [3077](https://github.com/adap/flower/pull/3077), [3143](https://github.com/adap/flower/pull/3143), [2921](https://github.com/adap/flower/pull/2921), [3101](https://github.com/adap/flower/pull/3101), [2927](https://github.com/adap/flower/pull/2927), [2995](https://github.com/adap/flower/pull/2995), [2972](https://github.com/adap/flower/pull/2972), [2912](https://github.com/adap/flower/pull/2912), [3065](https://github.com/adap/flower/pull/3065), [3028](https://github.com/adap/flower/pull/3028), [2922](https://github.com/adap/flower/pull/2922), [2982](https://github.com/adap/flower/pull/2982), [2914](https://github.com/adap/flower/pull/2914), [3179](https://github.com/adap/flower/pull/3179), [3080](https://github.com/adap/flower/pull/3080), [2994](https://github.com/adap/flower/pull/2994), [3187](https://github.com/adap/flower/pull/3187), [2926](https://github.com/adap/flower/pull/2926), [3018](https://github.com/adap/flower/pull/3018), [3144](https://github.com/adap/flower/pull/3144), [3011](https://github.com/adap/flower/pull/3011), [#3152](https://github.com/adap/flower/pull/3152), [#2836](https://github.com/adap/flower/pull/2836), [#2929](https://github.com/adap/flower/pull/2929), [#2943](https://github.com/adap/flower/pull/2943), [#2955](https://github.com/adap/flower/pull/2955), [#2954](https://github.com/adap/flower/pull/2954)) + +### Incompatible changes + +None + ## v1.7.0 (2024-02-05) ### Thanks to our contributors diff --git a/doc/source/tutorial-quickstart-mxnet.rst b/doc/source/tutorial-quickstart-mxnet.rst deleted file mode 100644 index fe582f793280..000000000000 --- a/doc/source/tutorial-quickstart-mxnet.rst +++ /dev/null @@ -1,296 +0,0 @@ -.. _quickstart-mxnet: - - -Quickstart MXNet -================ - -.. warning:: MXNet is no longer maintained and has been moved into `Attic `_. As a result, we would encourage you to use other ML frameworks alongside Flower, for example, PyTorch. This tutorial might be removed in future versions of Flower. - -.. meta:: - :description: Check out this Federated Learning quickstart tutorial for using Flower with MXNet to train a Sequential model on MNIST. - -In this tutorial, we will learn how to train a :code:`Sequential` model on MNIST using Flower and MXNet. - -It is recommended to create a virtual environment and run everything within this :doc:`virtualenv `. - -Our example consists of one *server* and two *clients* all having the same model. - -*Clients* are responsible for generating individual model parameter updates for the model based on their local datasets. -These updates are then sent to the *server* which will aggregate them to produce an updated global model. Finally, the *server* sends this improved version of the model back to each *client*. -A complete cycle of parameters updates is called a *round*. - -Now that we have a rough idea of what is going on, let's get started. We first need to install Flower. You can do this by running: - -.. code-block:: shell - - $ pip install flwr - -Since we want to use MXNet, let's go ahead and install it: - -.. code-block:: shell - - $ pip install mxnet - - -Flower Client -------------- - -Now that we have all our dependencies installed, let's run a simple distributed training with two clients and one server. Our training procedure and network architecture are based on MXNet´s `Hand-written Digit Recognition tutorial `_. - -In a file called :code:`client.py`, import Flower and MXNet related packages: - -.. code-block:: python - - import flwr as fl - - import numpy as np - - import mxnet as mx - from mxnet import nd - from mxnet import gluon - from mxnet.gluon import nn - from mxnet import autograd as ag - import mxnet.ndarray as F - -In addition, define the device allocation in MXNet with: - -.. code-block:: python - - DEVICE = [mx.gpu() if mx.test_utils.list_gpus() else mx.cpu()] - -We use MXNet to load MNIST, a popular image classification dataset of handwritten digits for machine learning. The MXNet utility :code:`mx.test_utils.get_mnist()` downloads the training and test data. - -.. code-block:: python - - def load_data(): - print("Download Dataset") - mnist = mx.test_utils.get_mnist() - batch_size = 100 - train_data = mx.io.NDArrayIter( - mnist["train_data"], mnist["train_label"], batch_size, shuffle=True - ) - val_data = mx.io.NDArrayIter(mnist["test_data"], mnist["test_label"], batch_size) - return train_data, val_data - -Define the training and loss with MXNet. We train the model by looping over the dataset, measure the corresponding loss, and optimize it. - -.. code-block:: python - - def train(net, train_data, epoch): - trainer = gluon.Trainer(net.collect_params(), "sgd", {"learning_rate": 0.03}) - trainer = gluon.Trainer(net.collect_params(), "sgd", {"learning_rate": 0.01}) - accuracy_metric = mx.metric.Accuracy() - loss_metric = mx.metric.CrossEntropy() - metrics = mx.metric.CompositeEvalMetric() - for child_metric in [accuracy_metric, loss_metric]: - metrics.add(child_metric) - softmax_cross_entropy_loss = gluon.loss.SoftmaxCrossEntropyLoss() - for i in range(epoch): - train_data.reset() - num_examples = 0 - for batch in train_data: - data = gluon.utils.split_and_load( - batch.data[0], ctx_list=DEVICE, batch_axis=0 - ) - label = gluon.utils.split_and_load( - batch.label[0], ctx_list=DEVICE, batch_axis=0 - ) - outputs = [] - with ag.record(): - for x, y in zip(data, label): - z = net(x) - loss = softmax_cross_entropy_loss(z, y) - loss.backward() - outputs.append(z.softmax()) - num_examples += len(x) - metrics.update(label, outputs) - trainer.step(batch.data[0].shape[0]) - trainings_metric = metrics.get_name_value() - print("Accuracy & loss at epoch %d: %s" % (i, trainings_metric)) - return trainings_metric, num_examples - - -Next, we define the validation of our machine learning model. We loop over the test set and measure both loss and accuracy on the test set. - -.. code-block:: python - - def test(net, val_data): - accuracy_metric = mx.metric.Accuracy() - loss_metric = mx.metric.CrossEntropy() - metrics = mx.metric.CompositeEvalMetric() - for child_metric in [accuracy_metric, loss_metric]: - metrics.add(child_metric) - val_data.reset() - num_examples = 0 - for batch in val_data: - data = gluon.utils.split_and_load(batch.data[0], ctx_list=DEVICE, batch_axis=0) - label = gluon.utils.split_and_load( - batch.label[0], ctx_list=DEVICE, batch_axis=0 - ) - outputs = [] - for x in data: - outputs.append(net(x).softmax()) - num_examples += len(x) - metrics.update(label, outputs) - return metrics.get_name_value(), num_examples - -After defining the training and testing of a MXNet machine learning model, we use these functions to implement a Flower client. - -Our Flower clients will use a simple :code:`Sequential` model: - -.. code-block:: python - - def main(): - def model(): - net = nn.Sequential() - net.add(nn.Dense(256, activation="relu")) - net.add(nn.Dense(64, activation="relu")) - net.add(nn.Dense(10)) - net.collect_params().initialize() - return net - - train_data, val_data = load_data() - - model = model() - init = nd.random.uniform(shape=(2, 784)) - model(init) - -After loading the dataset with :code:`load_data()` we perform one forward propagation to initialize the model and model parameters with :code:`model(init)`. Next, we implement a Flower client. - -The Flower server interacts with clients through an interface called -:code:`Client`. When the server selects a particular client for training, it -sends training instructions over the network. The client receives those -instructions and calls one of the :code:`Client` methods to run your code -(i.e., to train the neural network we defined earlier). - -Flower provides a convenience class called :code:`NumPyClient` which makes it -easier to implement the :code:`Client` interface when your workload uses MXNet. -Implementing :code:`NumPyClient` usually means defining the following methods -(:code:`set_parameters` is optional though): - -#. :code:`get_parameters` - * return the model weight as a list of NumPy ndarrays -#. :code:`set_parameters` (optional) - * update the local model weights with the parameters received from the server -#. :code:`fit` - * set the local model weights - * train the local model - * receive the updated local model weights -#. :code:`evaluate` - * test the local model - -They can be implemented in the following way: - -.. code-block:: python - - class MNISTClient(fl.client.NumPyClient): - def get_parameters(self, config): - param = [] - for val in model.collect_params(".*weight").values(): - p = val.data() - param.append(p.asnumpy()) - return param - - def set_parameters(self, parameters): - params = zip(model.collect_params(".*weight").keys(), parameters) - for key, value in params: - model.collect_params().setattr(key, value) - - def fit(self, parameters, config): - self.set_parameters(parameters) - [accuracy, loss], num_examples = train(model, train_data, epoch=2) - results = {"accuracy": float(accuracy[1]), "loss": float(loss[1])} - return self.get_parameters(config={}), num_examples, results - - def evaluate(self, parameters, config): - self.set_parameters(parameters) - [accuracy, loss], num_examples = test(model, val_data) - print("Evaluation accuracy & loss", accuracy, loss) - return float(loss[1]), val_data.batch_size, {"accuracy": float(accuracy[1])} - - -We can now create an instance of our class :code:`MNISTClient` and add one line -to actually run this client: - -.. code-block:: python - - fl.client.start_numpy_client(server_address="0.0.0.0:8080", client=MNISTClient()) - -That's it for the client. We only have to implement :code:`Client` or -:code:`NumPyClient` and call :code:`fl.client.start_client()` or :code:`fl.client.start_numpy_client()`. The string :code:`"0.0.0.0:8080"` tells the client which server to connect to. In our case we can run the server and the client on the same machine, therefore we use -:code:`"0.0.0.0:8080"`. If we run a truly federated workload with the server and -clients running on different machines, all that needs to change is the -:code:`server_address` we pass to the client. - -Flower Server -------------- - -For simple workloads we can start a Flower server and leave all the -configuration possibilities at their default values. In a file named -:code:`server.py`, import Flower and start the server: - -.. code-block:: python - - import flwr as fl - - fl.server.start_server(config=fl.server.ServerConfig(num_rounds=3)) - -Train the model, federated! ---------------------------- - -With both client and server ready, we can now run everything and see federated -learning in action. Federated learning systems usually have a server and multiple clients. We -therefore have to start the server first: - -.. code-block:: shell - - $ python server.py - -Once the server is running we can start the clients in different terminals. -Open a new terminal and start the first client: - -.. code-block:: shell - - $ python client.py - -Open another terminal and start the second client: - -.. code-block:: shell - - $ python client.py - -Each client will have its own dataset. -You should now see how the training does in the very first terminal (the one that started the server): - -.. code-block:: shell - - INFO flower 2021-03-11 11:59:04,512 | app.py:76 | Flower server running (insecure, 3 rounds) - INFO flower 2021-03-11 11:59:04,512 | server.py:72 | Getting initial parameters - INFO flower 2021-03-11 11:59:09,089 | server.py:74 | Evaluating initial parameters - INFO flower 2021-03-11 11:59:09,089 | server.py:87 | [TIME] FL starting - DEBUG flower 2021-03-11 11:59:11,997 | server.py:165 | fit_round: strategy sampled 2 clients (out of 2) - DEBUG flower 2021-03-11 11:59:14,652 | server.py:177 | fit_round received 2 results and 0 failures - DEBUG flower 2021-03-11 11:59:14,656 | server.py:139 | evaluate: strategy sampled 2 clients - DEBUG flower 2021-03-11 11:59:14,811 | server.py:149 | evaluate received 2 results and 0 failures - DEBUG flower 2021-03-11 11:59:14,812 | server.py:165 | fit_round: strategy sampled 2 clients (out of 2) - DEBUG flower 2021-03-11 11:59:18,499 | server.py:177 | fit_round received 2 results and 0 failures - DEBUG flower 2021-03-11 11:59:18,503 | server.py:139 | evaluate: strategy sampled 2 clients - DEBUG flower 2021-03-11 11:59:18,784 | server.py:149 | evaluate received 2 results and 0 failures - DEBUG flower 2021-03-11 11:59:18,786 | server.py:165 | fit_round: strategy sampled 2 clients (out of 2) - DEBUG flower 2021-03-11 11:59:22,551 | server.py:177 | fit_round received 2 results and 0 failures - DEBUG flower 2021-03-11 11:59:22,555 | server.py:139 | evaluate: strategy sampled 2 clients - DEBUG flower 2021-03-11 11:59:22,789 | server.py:149 | evaluate received 2 results and 0 failures - INFO flower 2021-03-11 11:59:22,789 | server.py:122 | [TIME] FL finished in 13.700094900001204 - INFO flower 2021-03-11 11:59:22,790 | app.py:109 | app_fit: losses_distributed [(1, 1.5717803835868835), (2, 0.6093432009220123), (3, 0.4424773305654526)] - INFO flower 2021-03-11 11:59:22,790 | app.py:110 | app_fit: accuracies_distributed [] - INFO flower 2021-03-11 11:59:22,791 | app.py:111 | app_fit: losses_centralized [] - INFO flower 2021-03-11 11:59:22,791 | app.py:112 | app_fit: accuracies_centralized [] - DEBUG flower 2021-03-11 11:59:22,793 | server.py:139 | evaluate: strategy sampled 2 clients - DEBUG flower 2021-03-11 11:59:23,111 | server.py:149 | evaluate received 2 results and 0 failures - INFO flower 2021-03-11 11:59:23,112 | app.py:121 | app_evaluate: federated loss: 0.4424773305654526 - INFO flower 2021-03-11 11:59:23,112 | app.py:125 | app_evaluate: results [('ipv4:127.0.0.1:44344', EvaluateRes(loss=0.443320095539093, num_examples=100, accuracy=0.0, metrics={'accuracy': 0.8752475247524752})), ('ipv4:127.0.0.1:44346', EvaluateRes(loss=0.44163456559181213, num_examples=100, accuracy=0.0, metrics={'accuracy': 0.8761386138613861}))] - INFO flower 2021-03-11 11:59:23,112 | app.py:127 | app_evaluate: failures [] - -Congratulations! -You've successfully built and run your first federated learning system. -The full `source code `_ for this example can be found in :code:`examples/quickstart-mxnet`. diff --git a/doc/source/tutorial-quickstart-scikitlearn.rst b/doc/source/tutorial-quickstart-scikitlearn.rst index d1d47dc37f19..93322842cc70 100644 --- a/doc/source/tutorial-quickstart-scikitlearn.rst +++ b/doc/source/tutorial-quickstart-scikitlearn.rst @@ -45,41 +45,51 @@ However, before setting up the client and server, we will define all functionali * :code:`get_model_parameters()` * Returns the parameters of a :code:`sklearn` LogisticRegression model * :code:`set_model_params()` - * Sets the parameters of a :code:`sklean` LogisticRegression model + * Sets the parameters of a :code:`sklearn` LogisticRegression model * :code:`set_initial_params()` * Initializes the model parameters that the Flower server will ask for -* :code:`load_mnist()` - * Loads the MNIST dataset using OpenML -* :code:`shuffle()` - * Shuffles data and its label -* :code:`partition()` - * Splits datasets into a number of partitions Please check out :code:`utils.py` `here `_ for more details. The pre-defined functions are used in the :code:`client.py` and imported. The :code:`client.py` also requires to import several packages such as Flower and scikit-learn: .. code-block:: python + import argparse import warnings - import flwr as fl - import numpy as np - + from sklearn.linear_model import LogisticRegression from sklearn.metrics import log_loss - + + import flwr as fl import utils + from flwr_datasets import FederatedDataset - -We load the MNIST dataset from `OpenML `_, a popular image classification dataset of handwritten digits for machine learning. The utility :code:`utils.load_mnist()` downloads the training and test data. The training set is split afterwards into 10 partitions with :code:`utils.partition()`. +Prior to local training, we need to load the MNIST dataset, a popular image classification dataset of handwritten digits for machine learning, and partition the dataset for FL. This can be conveniently achieved using `Flower Datasets `_. +The :code:`FederatedDataset.load_partition()` method loads the partitioned training set for each partition ID defined in the :code:`--partition-id` argument. .. code-block:: python if __name__ == "__main__": - - (X_train, y_train), (X_test, y_test) = utils.load_mnist() - - partition_id = np.random.choice(10) - (X_train, y_train) = utils.partition(X_train, y_train, 10)[partition_id] + N_CLIENTS = 10 + + parser = argparse.ArgumentParser(description="Flower") + parser.add_argument( + "--partition-id", + type=int, + choices=range(0, N_CLIENTS), + required=True, + help="Specifies the artificial data partition", + ) + args = parser.parse_args() + partition_id = args.partition_id + + fds = FederatedDataset(dataset="mnist", partitioners={"train": N_CLIENTS}) + + dataset = fds.load_partition(partition_id, "train").with_format("numpy") + X, y = dataset["image"].reshape((len(dataset), -1)), dataset["label"] + + X_train, X_test = X[: int(0.8 * len(X))], X[int(0.8 * len(X)) :] + y_train, y_test = y[: int(0.8 * len(y))], y[int(0.8 * len(y)) :] Next, the logistic regression model is defined and initialized with :code:`utils.set_initial_params()`. @@ -168,10 +178,13 @@ First, we import again all required libraries such as Flower and scikit-learn. from flwr.common import NDArrays, Scalar from sklearn.metrics import log_loss from sklearn.linear_model import LogisticRegression - from typing import Dict, Optional + from typing import Dict + + from flwr_datasets import FederatedDataset The number of federated learning rounds is set in :code:`fit_round()` and the evaluation is defined in :code:`get_evaluate_fn()`. The evaluation function is called after each federated learning round and gives you information about loss and accuracy. +Note that we also make use of Flower Datasets here to load the test split of the MNIST dataset for server-side evaluation. .. code-block:: python @@ -183,7 +196,9 @@ The evaluation function is called after each federated learning round and gives def get_evaluate_fn(model: LogisticRegression): """Return an evaluation function for server-side evaluation.""" - _, (X_test, y_test) = utils.load_mnist() + fds = FederatedDataset(dataset="mnist", partitioners={"train": 10}) + dataset = fds.load_split("test").with_format("numpy") + X_test, y_test = dataset["image"].reshape((len(dataset), -1)), dataset["label"] def evaluate( server_round: int, parameters: NDArrays, config: Dict[str, Scalar] @@ -199,7 +214,7 @@ The :code:`main` contains the server-side parameter initialization :code:`utils. .. code-block:: python - # Start Flower server for five rounds of federated learning + # Start Flower server for three rounds of federated learning if __name__ == "__main__": model = LogisticRegression() utils.set_initial_params(model) diff --git a/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb b/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb index 2b8dd382bb79..c9d38b417a92 100644 --- a/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb +++ b/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb @@ -13,7 +13,7 @@ "\n", "> [Star Flower on GitHub](https://github.com/adap/flower) ⭐️ and join the Flower community on Slack to connect, ask questions, and get help: [Join Slack](https://flower.ai/join-slack) 🌼 We'd love to hear from you in the `#introductions` channel! And if anything is unclear, head over to the `#questions` channel.\n", "\n", - "Let's get stated!" + "Let's get started!" ] }, { @@ -145,7 +145,7 @@ " for partition_id in range(NUM_CLIENTS):\n", " partition = fds.load_partition(partition_id, \"train\")\n", " partition = partition.with_transform(apply_transforms)\n", - " partition = partition.train_test_split(train_size=0.8)\n", + " partition = partition.train_test_split(train_size=0.8, seed=42)\n", " trainloaders.append(DataLoader(partition[\"train\"], batch_size=BATCH_SIZE))\n", " valloaders.append(DataLoader(partition[\"test\"], batch_size=BATCH_SIZE))\n", " testset = fds.load_split(\"test\").with_transform(apply_transforms)\n", diff --git a/e2e/bare-client-auth/README.md b/e2e/bare-client-auth/README.md new file mode 100644 index 000000000000..35967ebe2eb0 --- /dev/null +++ b/e2e/bare-client-auth/README.md @@ -0,0 +1,3 @@ +# Bare Flower testing + +This directory is used for testing Flower in a bare minimum scenario, that is, with a dummy model and dummy operations. This is mainly to test the core functionality of Flower independently from any framework. It can easily be extended to test more complex communication set-ups. diff --git a/e2e/bare-client-auth/certificate.conf b/e2e/bare-client-auth/certificate.conf new file mode 100644 index 000000000000..ea97fcbb700d --- /dev/null +++ b/e2e/bare-client-auth/certificate.conf @@ -0,0 +1,20 @@ +[req] +default_bits = 4096 +prompt = no +default_md = sha256 +req_extensions = req_ext +distinguished_name = dn + +[dn] +C = DE +ST = HH +O = Flower +CN = localhost + +[req_ext] +subjectAltName = @alt_names + +[alt_names] +DNS.1 = localhost +IP.1 = ::1 +IP.2 = 127.0.0.1 diff --git a/e2e/bare-client-auth/client.py b/e2e/bare-client-auth/client.py new file mode 100644 index 000000000000..a56ba5eca552 --- /dev/null +++ b/e2e/bare-client-auth/client.py @@ -0,0 +1,30 @@ +import flwr as fl +import numpy as np +from pathlib import Path + + +model_params = np.array([1]) +objective = 5 + +# Define Flower client +class FlowerClient(fl.client.NumPyClient): + def get_parameters(self, config): + return model_params + + def fit(self, parameters, config): + model_params = parameters + model_params = [param * (objective/np.mean(param)) for param in model_params] + return model_params, 1, {} + + def evaluate(self, parameters, config): + model_params = parameters + loss = min(np.abs(1 - np.mean(model_params)/objective), 1) + accuracy = 1 - loss + return loss, 1, {"accuracy": accuracy} + +def client_fn(cid): + return FlowerClient().to_client() + +app = fl.client.ClientApp( + client_fn=client_fn, +) diff --git a/e2e/bare-client-auth/generate.sh b/e2e/bare-client-auth/generate.sh new file mode 100755 index 000000000000..ebfdc17b80b5 --- /dev/null +++ b/e2e/bare-client-auth/generate.sh @@ -0,0 +1,72 @@ +#!/bin/bash +# This script will generate all certificates if ca.crt does not exist + +set -e +# Change directory to the script's directory +cd "$(dirname "${BASH_SOURCE[0]}")" + +CERT_DIR=certificates + +# Generate directories if not exists +mkdir -p $CERT_DIR + +# Clearing any existing files in the certificates directory +rm -f $CERT_DIR/* + +# Generate the root certificate authority key and certificate based on key +openssl genrsa -out $CERT_DIR/ca.key 4096 +openssl req \ + -new \ + -x509 \ + -key $CERT_DIR/ca.key \ + -sha256 \ + -subj "/C=DE/ST=HH/O=CA, Inc." \ + -days 365 -out $CERT_DIR/ca.crt + +# Generate a new private key for the server +openssl genrsa -out $CERT_DIR/server.key 4096 + +# Create a signing CSR +openssl req \ + -new \ + -key $CERT_DIR/server.key \ + -out $CERT_DIR/server.csr \ + -config certificate.conf + +# Generate a certificate for the server +openssl x509 \ + -req \ + -in $CERT_DIR/server.csr \ + -CA $CERT_DIR/ca.crt \ + -CAkey $CERT_DIR/ca.key \ + -CAcreateserial \ + -out $CERT_DIR/server.pem \ + -days 365 \ + -sha256 \ + -extfile certificate.conf \ + -extensions req_ext + +KEY_DIR=keys + +mkdir -p $KEY_DIR + +rm -f $KEY_DIR/* + +ssh-keygen -t ecdsa -b 384 -N "" -f "${KEY_DIR}/server_credentials" -C "" + +generate_client_credentials() { + local num_clients=${1:-2} + for ((i=1; i<=num_clients; i++)) + do + ssh-keygen -t ecdsa -b 384 -N "" -f "${KEY_DIR}/client_credentials_$i" -C "" + done +} + +generate_client_credentials "$1" + +printf "%s" "$(cat "${KEY_DIR}/client_credentials_1.pub" | sed 's/.$//')" > $KEY_DIR/client_public_keys.csv +for ((i=2; i<=${1:-2}; i++)) +do + printf ",%s" "$(sed 's/.$//' < "${KEY_DIR}/client_credentials_$i.pub")" >> $KEY_DIR/client_public_keys.csv +done +printf "\n" >> $KEY_DIR/client_public_keys.csv diff --git a/examples/quickstart-mxnet/pyproject.toml b/e2e/bare-client-auth/pyproject.toml similarity index 54% rename from examples/quickstart-mxnet/pyproject.toml rename to e2e/bare-client-auth/pyproject.toml index b00b3ddfe412..693fec815474 100644 --- a/examples/quickstart-mxnet/pyproject.toml +++ b/e2e/bare-client-auth/pyproject.toml @@ -3,13 +3,11 @@ requires = ["poetry-core>=1.4.0"] build-backend = "poetry.core.masonry.api" [tool.poetry] -name = "mxnet_example" +name = "bare_client_auth_test" version = "0.1.0" -description = "MXNet example with MNIST and CNN" +description = "Client-auth-enabled bare Federated Learning test with Flower" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = "1.6.0" -mxnet = "1.9.1" -numpy = "1.23.1" +python = "^3.8" +flwr = { path = "../../", develop = true } diff --git a/e2e/bare-client-auth/server.py b/e2e/bare-client-auth/server.py new file mode 100644 index 000000000000..7e4f96e15fd9 --- /dev/null +++ b/e2e/bare-client-auth/server.py @@ -0,0 +1,42 @@ +import flwr as fl +from pathlib import Path + +app = fl.server.ServerApp() + + +@app.main() +def main(driver, context): + # Construct the LegacyContext + context = fl.server.LegacyContext( + state=context.state, + config=fl.server.ServerConfig(num_rounds=3), + ) + + # Create the workflow + workflow = fl.server.workflow.DefaultWorkflow() + + # Execute + workflow(driver, context) + + hist = context.history + assert ( + hist.losses_distributed[-1][1] == 0 + or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 + ) + + +if __name__ == "__main__": + hist = fl.server.start_server( + server_address="127.0.0.1:8080", + config=fl.server.ServerConfig(num_rounds=3), + certificates=( + Path("certificates/ca.crt").read_bytes(), + Path("certificates/server.pem").read_bytes(), + Path("certificates/server.key").read_bytes(), + ), + ) + + assert ( + hist.losses_distributed[-1][1] == 0 + or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 + ) diff --git a/e2e/bare-https/driver.py b/e2e/bare-https/driver.py deleted file mode 100644 index f7bfeb613f6a..000000000000 --- a/e2e/bare-https/driver.py +++ /dev/null @@ -1,12 +0,0 @@ -import flwr as fl -from pathlib import Path - - -# Start Flower server -hist = fl.server.start_driver( - server_address="127.0.0.1:9091", - config=fl.server.ServerConfig(num_rounds=3), - root_certificates=Path("certificates/ca.crt").read_bytes(), -) - -assert hist.losses_distributed[-1][1] == 0 diff --git a/e2e/bare-https/server.py b/e2e/bare-https/server.py index fcad7a3e4522..d85c0623e92c 100644 --- a/e2e/bare-https/server.py +++ b/e2e/bare-https/server.py @@ -2,14 +2,42 @@ from pathlib import Path -hist = fl.server.start_server( - server_address="127.0.0.1:8080", - config=fl.server.ServerConfig(num_rounds=3), - certificates=( - Path("certificates/ca.crt").read_bytes(), - Path("certificates/server.pem").read_bytes(), - Path("certificates/server.key").read_bytes(), +app = fl.server.ServerApp() + + +@app.main() +def main(driver, context): + # Construct the LegacyContext + context = fl.server.LegacyContext( + state=context.state, + config=fl.server.ServerConfig(num_rounds=3), + ) + + # Create the workflow + workflow = fl.server.workflow.DefaultWorkflow() + + # Execute + workflow(driver, context) + + hist = context.history + assert ( + hist.losses_distributed[-1][1] == 0 + or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 ) -) -assert hist.losses_distributed[-1][1] == 0 or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 + +if __name__ == "__main__": + hist = fl.server.start_server( + server_address="127.0.0.1:8080", + config=fl.server.ServerConfig(num_rounds=3), + certificates=( + Path("certificates/ca.crt").read_bytes(), + Path("certificates/server.pem").read_bytes(), + Path("certificates/server.key").read_bytes(), + ), + ) + + assert ( + hist.losses_distributed[-1][1] == 0 + or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 + ) diff --git a/e2e/bare/client.py b/e2e/bare/client.py index c291fb0963e4..5f8642e27675 100644 --- a/e2e/bare/client.py +++ b/e2e/bare/client.py @@ -6,12 +6,13 @@ from flwr.common import ConfigsRecord SUBSET_SIZE = 1000 -STATE_VAR = 'timestamp' +STATE_VAR = "timestamp" model_params = np.array([1]) objective = 5 + # Define Flower client class FlowerClient(fl.client.NumPyClient): def get_parameters(self, config): @@ -25,31 +26,41 @@ def _record_timestamp_to_state(self): value = self.context.state.configs_records[STATE_VAR][STATE_VAR] # type: ignore value += f",{t_stamp}" - self.context.state.configs_records[STATE_VAR] = ConfigsRecord({STATE_VAR: value}) - + self.context.state.configs_records[STATE_VAR] = ConfigsRecord( + {STATE_VAR: value} + ) + def _retrieve_timestamp_from_state(self): return self.context.state.configs_records[STATE_VAR][STATE_VAR] - + def fit(self, parameters, config): model_params = parameters - model_params = [param * (objective/np.mean(param)) for param in model_params] + model_params = [param * (objective / np.mean(param)) for param in model_params] self._record_timestamp_to_state() return model_params, 1, {STATE_VAR: self._retrieve_timestamp_from_state()} def evaluate(self, parameters, config): model_params = parameters - loss = min(np.abs(1 - np.mean(model_params)/objective), 1) + loss = min(np.abs(1 - np.mean(model_params) / objective), 1) accuracy = 1 - loss self._record_timestamp_to_state() - return loss, 1, {"accuracy": accuracy, STATE_VAR: self._retrieve_timestamp_from_state()} + return ( + loss, + 1, + {"accuracy": accuracy, STATE_VAR: self._retrieve_timestamp_from_state()}, + ) + def client_fn(cid): return FlowerClient().to_client() + app = fl.client.ClientApp( client_fn=client_fn, ) if __name__ == "__main__": # Start Flower client - fl.client.start_client(server_address="127.0.0.1:8080", client=FlowerClient().to_client()) + fl.client.start_client( + server_address="127.0.0.1:8080", client=FlowerClient().to_client() + ) diff --git a/e2e/bare/driver.py b/e2e/bare/driver.py deleted file mode 100644 index defc2ad56213..000000000000 --- a/e2e/bare/driver.py +++ /dev/null @@ -1,10 +0,0 @@ -import flwr as fl - - -# Start Flower server -hist = fl.server.start_driver( - server_address="0.0.0.0:9091", - config=fl.server.ServerConfig(num_rounds=3), -) - -assert hist.losses_distributed[-1][1] == 0 diff --git a/e2e/docker/README.md b/e2e/docker/README.md new file mode 100644 index 000000000000..f87dd5ca68b5 --- /dev/null +++ b/e2e/docker/README.md @@ -0,0 +1,18 @@ +# Flower containers testing + +This directory is used to test Flower containers in a minimum scenario, that is, with 2 clients and without HTTPS. The FL setup uses PyTorch, the CIFAR10 dataset, and a CNN. This is mainly to test the functionalities of Flower in a containerized architecture. It can be easily extended to test more complex communication set-ups. + +It uses a subset of size 1000 for the training data and 10 data points for the testing. + +To execute locally, run the following in CLI: +``` shell +# pulls the latest supernode and serverapp nightly +docker compose build --pull +# pulls the latest superlink nightly +docker compose up -d --remove-orphans --force-recreate --pull always +``` + +To stop the containers, run: +``` shell +$ docker compose down +``` \ No newline at end of file diff --git a/e2e/docker/client.py b/e2e/docker/client.py new file mode 100644 index 000000000000..cea752ea5777 --- /dev/null +++ b/e2e/docker/client.py @@ -0,0 +1,132 @@ +import warnings +from collections import OrderedDict + +from flwr.client import NumPyClient, ClientApp +from flwr_datasets import FederatedDataset +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.data import DataLoader, Subset +from torchvision.transforms import Compose, Normalize, ToTensor + + +# ############################################################################# +# 1. Regular PyTorch pipeline: nn.Module, train, test, and DataLoader +# ############################################################################# + +warnings.filterwarnings("ignore", category=UserWarning) +DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + +SUBSET_SIZE = 1_000 + +class Net(nn.Module): + """Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz')""" + + def __init__(self) -> None: + super(Net, self).__init__() + self.conv1 = nn.Conv2d(3, 6, 5) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.pool(F.relu(self.conv1(x))) + x = self.pool(F.relu(self.conv2(x))) + x = x.view(-1, 16 * 5 * 5) + x = F.relu(self.fc1(x)) + x = F.relu(self.fc2(x)) + return self.fc3(x) + + +def train(net, trainloader, epochs): + """Train the model on the training set.""" + criterion = torch.nn.CrossEntropyLoss() + optimizer = torch.optim.SGD(net.parameters(), lr=0.001, momentum=0.9) + for _ in range(epochs): + for batch in trainloader: + images = batch["img"] + labels = batch["label"] + optimizer.zero_grad() + criterion(net(images.to(DEVICE)), labels.to(DEVICE)).backward() + optimizer.step() + + +def test(net, testloader): + """Validate the model on the test set.""" + criterion = torch.nn.CrossEntropyLoss() + correct, loss = 0, 0.0 + with torch.no_grad(): + for batch in testloader: + images = batch["img"].to(DEVICE) + labels = batch["label"].to(DEVICE) + outputs = net(images) + loss += criterion(outputs, labels).item() + correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() + accuracy = correct / len(testloader.dataset) + return loss, accuracy + + +def load_data(partition_id): + """Load partition CIFAR10 data.""" + fds = FederatedDataset(dataset="cifar10", partitioners={"train": 2}) + partition = fds.load_partition(partition_id) + # Divide data on each node: 80% train, 20% test + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) + pytorch_transforms = Compose( + [ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] + ) + + def apply_transforms(batch): + """Apply transforms to the partition from FederatedDataset.""" + batch["img"] = [pytorch_transforms(img) for img in batch["img"]] + return batch + + partition_train_test = partition_train_test.with_transform(apply_transforms) + trainset = Subset(partition_train_test["train"], range(SUBSET_SIZE)) + testset = Subset(partition_train_test["test"], range(10)) + trainloader = DataLoader(trainset, batch_size=32, shuffle=True) + testloader = DataLoader(testset, batch_size=32) + return trainloader, testloader + + +# ############################################################################# +# 2. Federation of the pipeline with Flower +# ############################################################################# + +# Load model and data (simple CNN, CIFAR-10) +net = Net().to(DEVICE) +trainloader, testloader = load_data(partition_id=0) + + +# Define Flower client +class FlowerClient(NumPyClient): + def get_parameters(self, config): + return [val.cpu().numpy() for _, val in net.state_dict().items()] + + def set_parameters(self, parameters): + params_dict = zip(net.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + net.load_state_dict(state_dict, strict=True) + + def fit(self, parameters, config): + self.set_parameters(parameters) + train(net, trainloader, epochs=1) + return self.get_parameters(config={}), len(trainloader.dataset), {} + + def evaluate(self, parameters, config): + self.set_parameters(parameters) + loss, accuracy = test(net, testloader) + return loss, len(testloader.dataset), {"accuracy": accuracy} + + +def client_fn(cid: str): + """Create and return an instance of Flower `Client`.""" + return FlowerClient().to_client() + + +# Flower ClientApp +app = ClientApp( + client_fn=client_fn, +) diff --git a/e2e/docker/compose.yaml b/e2e/docker/compose.yaml new file mode 100644 index 000000000000..073ca9f60a57 --- /dev/null +++ b/e2e/docker/compose.yaml @@ -0,0 +1,34 @@ +name: flwr-containers-e2e-test + +services: + # create a SuperLink service + superlink: + image: docker.io/flwr/superlink:nightly + ports: + - "9091:9091" + - "9092:9092" + command: [ "--insecure" ] + + # create a SuperNode service + supernode: + build: + dockerfile: supernode.Dockerfile + deploy: + # specify 2 containers with 2xCPUs (for 2 clients) + replicas: 2 + resources: + limits: + cpus: '2' + command: [ "--insecure", "--server", "superlink:9092" ] + depends_on: + - superlink + + # create a ServerApp service + serverapp: + build: + dockerfile: serverapp.Dockerfile + command: [ "--insecure", "--server", "superlink:9091" ] + # enforce dependency for graceful execution + depends_on: + - superlink + - supernode diff --git a/e2e/docker/pyproject.toml b/e2e/docker/pyproject.toml new file mode 100644 index 000000000000..955f30c7bf8d --- /dev/null +++ b/e2e/docker/pyproject.toml @@ -0,0 +1,19 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "e2e-docker" +version = "0.1.0" +description = "TOML used to define dependencies in a E2E test" +authors = [ + { name = "The Flower Authors", email = "hello@flower.ai" }, +] +dependencies = [ + "flwr-datasets[vision]>=0.1.0,<1.0.0", + "torch==2.2.1", + "torchvision==0.17.1", +] + +[tool.hatch.build.targets.wheel] +packages = ["."] diff --git a/e2e/tensorflow/driver.py b/e2e/docker/server.py similarity index 62% rename from e2e/tensorflow/driver.py rename to e2e/docker/server.py index 2ea4de69a62b..cb3490b75dca 100644 --- a/e2e/tensorflow/driver.py +++ b/e2e/docker/server.py @@ -1,6 +1,7 @@ from typing import List, Tuple -import flwr as fl +from flwr.server import ServerApp, ServerConfig +from flwr.server.strategy import FedAvg from flwr.common import Metrics @@ -15,13 +16,15 @@ def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: # Define strategy -strategy = fl.server.strategy.FedAvg(evaluate_metrics_aggregation_fn=weighted_average) +strategy = FedAvg(evaluate_metrics_aggregation_fn=weighted_average) -# Start Flower server -hist = fl.server.start_driver( - server_address="0.0.0.0:9091", - config=fl.server.ServerConfig(num_rounds=3), + +# Define config +config = ServerConfig(num_rounds=3) + + +# Flower ServerApp +app = ServerApp( + config=config, strategy=strategy, ) - -assert (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) > 0.98 diff --git a/e2e/docker/serverapp.Dockerfile b/e2e/docker/serverapp.Dockerfile new file mode 100644 index 000000000000..493d9e931983 --- /dev/null +++ b/e2e/docker/serverapp.Dockerfile @@ -0,0 +1,6 @@ +FROM flwr/serverapp:nightly + +WORKDIR /app + +COPY server.py ./ +ENTRYPOINT [ "flower-server-app", "server:app" ] diff --git a/e2e/docker/supernode.Dockerfile b/e2e/docker/supernode.Dockerfile new file mode 100644 index 000000000000..2770315a1b54 --- /dev/null +++ b/e2e/docker/supernode.Dockerfile @@ -0,0 +1,9 @@ +FROM flwr/supernode:nightly + +WORKDIR /app +COPY pyproject.toml ./ +RUN python -m pip install -U --no-cache-dir . \ + && pyenv rehash + +COPY client.py ./ +ENTRYPOINT [ "flower-client-app", "client:app" ] diff --git a/e2e/fastai/driver.py b/e2e/fastai/driver.py deleted file mode 100644 index cc452ea523ca..000000000000 --- a/e2e/fastai/driver.py +++ /dev/null @@ -1,7 +0,0 @@ -import flwr as fl - -hist = fl.server.start_driver( - server_address="0.0.0.0:9091", - config=fl.server.ServerConfig(num_rounds=3), -) -assert (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 1 diff --git a/e2e/jax/driver.py b/e2e/jax/driver.py deleted file mode 100644 index cc452ea523ca..000000000000 --- a/e2e/jax/driver.py +++ /dev/null @@ -1,7 +0,0 @@ -import flwr as fl - -hist = fl.server.start_driver( - server_address="0.0.0.0:9091", - config=fl.server.ServerConfig(num_rounds=3), -) -assert (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 1 diff --git a/e2e/opacus/driver.py b/e2e/opacus/driver.py deleted file mode 100644 index 75acd9ccea24..000000000000 --- a/e2e/opacus/driver.py +++ /dev/null @@ -1,7 +0,0 @@ -import flwr as fl - -hist = fl.server.start_driver( - server_address="0.0.0.0:9091", - config=fl.server.ServerConfig(num_rounds=3), -) -assert (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 diff --git a/e2e/pandas/driver.py b/e2e/pandas/driver.py deleted file mode 100644 index f5dc74c9f3f8..000000000000 --- a/e2e/pandas/driver.py +++ /dev/null @@ -1,34 +0,0 @@ -import flwr as fl - -from strategy import FedAnalytics - -# Start Flower server -hist = fl.server.start_driver( - server_address="0.0.0.0:9091", - config=fl.server.ServerConfig(num_rounds=1), - strategy=FedAnalytics(), -) -assert hist.metrics_centralized["Aggregated histograms"][1][1] == [ - "Length:", - "18", - "46", - "28", - "54", - "32", - "52", - "36", - "12", - "10", - "12", - "Width:", - "8", - "14", - "44", - "48", - "74", - "62", - "20", - "22", - "4", - "4", -] diff --git a/e2e/pandas/server.py b/e2e/pandas/server.py index a972b876294a..4c69ab3881d2 100644 --- a/e2e/pandas/server.py +++ b/e2e/pandas/server.py @@ -2,33 +2,79 @@ from strategy import FedAnalytics -hist = fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=1), - strategy=FedAnalytics(), -) - -assert hist.metrics_centralized["Aggregated histograms"][1][1] == [ - "Length:", - "18", - "46", - "28", - "54", - "32", - "52", - "36", - "12", - "10", - "12", - "Width:", - "8", - "14", - "44", - "48", - "74", - "62", - "20", - "22", - "4", - "4", -] +app = fl.server.ServerApp() + + +@app.main() +def main(driver, context): + # Construct the LegacyContext + context = fl.server.LegacyContext( + state=context.state, + config=fl.server.ServerConfig(num_rounds=3), + strategy=FedAnalytics(), + ) + + # Create the workflow + workflow = fl.server.workflow.DefaultWorkflow() + + # Execute + workflow(driver, context) + + hist = context.history + assert hist.metrics_centralized["Aggregated histograms"][1][1] == [ + "Length:", + "18", + "46", + "28", + "54", + "32", + "52", + "36", + "12", + "10", + "12", + "Width:", + "8", + "14", + "44", + "48", + "74", + "62", + "20", + "22", + "4", + "4", + ] + + +if __name__ == "__main__": + hist = fl.server.start_server( + server_address="0.0.0.0:8080", + config=fl.server.ServerConfig(num_rounds=1), + strategy=FedAnalytics(), + ) + + assert hist.metrics_centralized["Aggregated histograms"][1][1] == [ + "Length:", + "18", + "46", + "28", + "54", + "32", + "52", + "36", + "12", + "10", + "12", + "Width:", + "8", + "14", + "44", + "48", + "74", + "62", + "20", + "22", + "4", + "4", + ] diff --git a/e2e/pytorch-lightning/driver.py b/e2e/pytorch-lightning/driver.py deleted file mode 100644 index cc452ea523ca..000000000000 --- a/e2e/pytorch-lightning/driver.py +++ /dev/null @@ -1,7 +0,0 @@ -import flwr as fl - -hist = fl.server.start_driver( - server_address="0.0.0.0:9091", - config=fl.server.ServerConfig(num_rounds=3), -) -assert (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 1 diff --git a/e2e/pytorch-lightning/pyproject.toml b/e2e/pytorch-lightning/pyproject.toml index 88cddddf500f..90d659813c28 100644 --- a/e2e/pytorch-lightning/pyproject.toml +++ b/e2e/pytorch-lightning/pyproject.toml @@ -11,5 +11,5 @@ authors = ["The Flower Authors "] [tool.poetry.dependencies] python = "^3.8" flwr = { path = "../../", develop = true, extras = ["simulation"] } -pytorch-lightning = "2.1.3" +pytorch-lightning = "2.2.4" torchvision = "0.14.1" diff --git a/e2e/pytorch/driver.py b/e2e/pytorch/driver.py deleted file mode 100644 index 2ea4de69a62b..000000000000 --- a/e2e/pytorch/driver.py +++ /dev/null @@ -1,27 +0,0 @@ -from typing import List, Tuple - -import flwr as fl -from flwr.common import Metrics - - -# Define metric aggregation function -def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: - # Multiply accuracy of each client by number of examples used - accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] - examples = [num_examples for num_examples, _ in metrics] - - # Aggregate and return custom metric (weighted average) - return {"accuracy": sum(accuracies) / sum(examples)} - - -# Define strategy -strategy = fl.server.strategy.FedAvg(evaluate_metrics_aggregation_fn=weighted_average) - -# Start Flower server -hist = fl.server.start_driver( - server_address="0.0.0.0:9091", - config=fl.server.ServerConfig(num_rounds=3), - strategy=strategy, -) - -assert (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) > 0.98 diff --git a/e2e/scikit-learn/driver.py b/e2e/scikit-learn/driver.py deleted file mode 100644 index 29051d02c6b6..000000000000 --- a/e2e/scikit-learn/driver.py +++ /dev/null @@ -1,44 +0,0 @@ -import flwr as fl -import utils -from sklearn.metrics import log_loss -from sklearn.linear_model import LogisticRegression -from typing import Dict - - -def fit_round(server_round: int) -> Dict: - """Send round number to client.""" - return {"server_round": server_round} - - -def get_evaluate_fn(model: LogisticRegression): - """Return an evaluation function for server-side evaluation.""" - - # Load test data here to avoid the overhead of doing it in `evaluate` itself - _, (X_test, y_test) = utils.load_mnist() - - # The `evaluate` function will be called after every round - def evaluate(server_round, parameters: fl.common.NDArrays, config): - # Update model with the latest parameters - utils.set_model_params(model, parameters) - loss = log_loss(y_test, model.predict_proba(X_test)) - accuracy = model.score(X_test, y_test) - return loss, {"accuracy": accuracy} - - return evaluate - - -# Start Flower server for five rounds of federated learning -if __name__ == "__main__": - model = LogisticRegression() - utils.set_initial_params(model) - strategy = fl.server.strategy.FedAvg( - min_available_clients=2, - evaluate_fn=get_evaluate_fn(model), - on_fit_config_fn=fit_round, - ) - hist = fl.server.start_driver( - server_address="0.0.0.0:9091", - strategy=strategy, - config=fl.server.ServerConfig(num_rounds=3), - ) - assert (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 1 diff --git a/e2e/server.py b/e2e/server.py index 9abfbb27fafc..3b37ac4244e9 100644 --- a/e2e/server.py +++ b/e2e/server.py @@ -1,18 +1,17 @@ -from typing import List, Tuple import numpy as np import flwr as fl -from flwr.common import Metrics -STATE_VAR = 'timestamp' + +STATE_VAR = "timestamp" # Define metric aggregation function -def record_state_metrics(metrics: List[Tuple[int, Metrics]]) -> Metrics: +def record_state_metrics(metrics): """Ensure that timestamps are monotonically increasing.""" if not metrics: return {} - + if STATE_VAR not in metrics[0][1]: # Do nothing if keyword is not present return {} @@ -20,28 +19,62 @@ def record_state_metrics(metrics: List[Tuple[int, Metrics]]) -> Metrics: states = [] for _, m in metrics: # split string and covert timestamps to float - states.append([float(tt) for tt in m[STATE_VAR].split(',')]) + states.append([float(tt) for tt in m[STATE_VAR].split(",")]) for client_state in states: if len(client_state) == 1: continue deltas = np.diff(client_state) - assert np.all(deltas > 0), f"Timestamps are not monotonically increasing: {client_state}" + assert np.all( + deltas > 0 + ), f"Timestamps are not monotonically increasing: {client_state}" return {STATE_VAR: states} -strategy = fl.server.strategy.FedAvg(evaluate_metrics_aggregation_fn=record_state_metrics) +app = fl.server.ServerApp() + + +@app.main() +def main(driver, context): + # Construct the LegacyContext + context = fl.server.LegacyContext( + state=context.state, + config=fl.server.ServerConfig(num_rounds=3), + ) + + # Create the workflow + workflow = fl.server.workflow.DefaultWorkflow() + + # Execute + workflow(driver, context) + + hist = context.history + assert ( + hist.losses_distributed[-1][1] == 0 + or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 + ) + + +if __name__ == "__main__": + strategy = fl.server.strategy.FedAvg( + evaluate_metrics_aggregation_fn=record_state_metrics + ) -hist = fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=3), - strategy=strategy, -) + hist = fl.server.start_server( + server_address="0.0.0.0:8080", + config=fl.server.ServerConfig(num_rounds=3), + strategy=strategy, + ) -assert hist.losses_distributed[-1][1] == 0 or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 + assert ( + hist.losses_distributed[-1][1] == 0 + or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 + ) -if STATE_VAR in hist.metrics_distributed: - # The checks in record_state_metrics don't do anythinng if client's state has a single entry - state_metrics_last_round = hist.metrics_distributed[STATE_VAR][-1] - assert len(state_metrics_last_round[1][0]) == 2*state_metrics_last_round[0], f"There should be twice as many entries in the client state as rounds" + if STATE_VAR in hist.metrics_distributed: + # The checks in record_state_metrics don't do anythinng if client's state has a single entry + state_metrics_last_round = hist.metrics_distributed[STATE_VAR][-1] + assert ( + len(state_metrics_last_round[1][0]) == 2 * state_metrics_last_round[0] + ), f"There should be twice as many entries in the client state as rounds" diff --git a/e2e/tabnet/driver.py b/e2e/tabnet/driver.py deleted file mode 100644 index cc452ea523ca..000000000000 --- a/e2e/tabnet/driver.py +++ /dev/null @@ -1,7 +0,0 @@ -import flwr as fl - -hist = fl.server.start_driver( - server_address="0.0.0.0:9091", - config=fl.server.ServerConfig(num_rounds=3), -) -assert (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 1 diff --git a/e2e/test_driver.sh b/e2e/test_driver.sh index 3d4864a1b0fb..6d7f6ec864fd 100755 --- a/e2e/test_driver.sh +++ b/e2e/test_driver.sh @@ -2,14 +2,21 @@ set -e case "$1" in + pandas) + server_arg="--insecure" + client_arg="--insecure" + server_dir="./" + ;; bare-https) ./generate.sh server_arg="--certificates certificates/ca.crt certificates/server.pem certificates/server.key" client_arg="--root-certificates certificates/ca.crt" + server_dir="./" ;; *) server_arg="--insecure" client_arg="--insecure" + server_dir="./.." ;; esac @@ -17,33 +24,57 @@ case "$2" in rest) rest_arg="--rest" server_address="http://localhost:9093" + server_app_address="127.0.0.1:9091" db_arg="--database :flwr-in-memory-state:" + server_auth="" + client_auth_1="" + client_auth_2="" ;; sqlite) rest_arg="" server_address="127.0.0.1:9092" + server_app_address="127.0.0.1:9091" db_arg="--database $(date +%s).db" + server_auth="" + client_auth_1="" + client_auth_2="" + ;; + client-auth) + ./generate.sh + rest_arg="" + server_address="127.0.0.1:9092" + server_app_address="127.0.0.1:9091" + db_arg="--database :flwr-in-memory-state:" + server_arg="--certificates certificates/ca.crt certificates/server.pem certificates/server.key" + client_arg="--root-certificates certificates/ca.crt" + server_auth="--require-client-authentication keys/client_public_keys.csv keys/server_credentials keys/server_credentials.pub" + client_auth_1="--authentication-keys keys/client_credentials_1 keys/client_credentials_1.pub" + client_auth_2="--authentication-keys keys/client_credentials_2 keys/client_credentials_2.pub" ;; *) rest_arg="" server_address="127.0.0.1:9092" + server_app_address="127.0.0.1:9091" db_arg="--database :flwr-in-memory-state:" + server_auth="" + client_auth_1="" + client_auth_2="" ;; esac -timeout 2m flower-superlink $server_arg $db_arg $rest_arg & +timeout 2m flower-superlink $server_arg $db_arg $rest_arg $server_auth & sl_pid=$! sleep 3 -timeout 2m flower-client-app client:app $client_arg $rest_arg --server $server_address & +timeout 2m flower-client-app client:app $client_arg $rest_arg --server $server_address $client_auth_1 & cl1_pid=$! sleep 3 -timeout 2m flower-client-app client:app $client_arg $rest_arg --server $server_address & +timeout 2m flower-client-app client:app $client_arg $rest_arg --server $server_address $client_auth_2 & cl2_pid=$! sleep 3 -timeout 2m python driver.py & +timeout 2m flower-server-app server:app $client_arg --dir $server_dir --server $server_app_address & pid=$! wait $pid diff --git a/examples/advanced-pytorch/client.py b/examples/advanced-pytorch/client.py index d4c8abe3d404..7c1420a2cecd 100644 --- a/examples/advanced-pytorch/client.py +++ b/examples/advanced-pytorch/client.py @@ -46,7 +46,7 @@ def fit(self, parameters, config): batch_size: int = config["batch_size"] epochs: int = config["local_epochs"] - train_valid = self.trainset.train_test_split(self.validation_split) + train_valid = self.trainset.train_test_split(self.validation_split, seed=42) trainset = train_valid["train"] valset = train_valid["test"] diff --git a/examples/advanced-pytorch/utils.py b/examples/advanced-pytorch/utils.py index fd9dab19a70d..c47b4fa38593 100644 --- a/examples/advanced-pytorch/utils.py +++ b/examples/advanced-pytorch/utils.py @@ -14,7 +14,7 @@ def load_partition(partition_id, toy: bool = False): fds = FederatedDataset(dataset="cifar10", partitioners={"train": 10}) partition = fds.load_partition(partition_id) # Divide data on each node: 80% train, 20% test - partition_train_test = partition.train_test_split(test_size=0.2) + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) partition_train_test = partition_train_test.with_transform(apply_transforms) return partition_train_test["train"], partition_train_test["test"] diff --git a/examples/advanced-tensorflow/client.py b/examples/advanced-tensorflow/client.py index 17d1d2306270..b658a1f9ea04 100644 --- a/examples/advanced-tensorflow/client.py +++ b/examples/advanced-tensorflow/client.py @@ -123,7 +123,7 @@ def load_partition(idx: int): partition.set_format("numpy") # Divide data on each node: 80% train, 20% test - partition = partition.train_test_split(test_size=0.2) + partition = partition.train_test_split(test_size=0.2, seed=42) x_train, y_train = partition["train"]["img"] / 255.0, partition["train"]["label"] x_test, y_test = partition["test"]["img"] / 255.0, partition["test"]["label"] return x_train, y_train, x_test, y_test diff --git a/examples/app-pytorch/client.py b/examples/app-pytorch/client.py index ebbe977ecab1..eb84968bb986 100644 --- a/examples/app-pytorch/client.py +++ b/examples/app-pytorch/client.py @@ -18,7 +18,6 @@ # Define FlowerClient and client_fn class FlowerClient(NumPyClient): - def fit(self, parameters, config): set_weights(net, parameters) results = train(net, trainloader, testloader, epochs=1, device=DEVICE) diff --git a/examples/app-pytorch/pyproject.toml b/examples/app-pytorch/pyproject.toml index e47dd2db949d..c00e38aef19b 100644 --- a/examples/app-pytorch/pyproject.toml +++ b/examples/app-pytorch/pyproject.toml @@ -11,7 +11,6 @@ authors = ["The Flower Authors "] [tool.poetry.dependencies] python = "^3.8" # Mandatory dependencies -flwr-nightly = { version = "1.8.0.dev20240309", extras = ["simulation"] } -flwr-datasets = { version = "0.0.2", extras = ["vision"] } +flwr = { version = "^1.8.0", extras = ["simulation"] } torch = "2.2.1" torchvision = "0.17.1" diff --git a/examples/app-pytorch/requirements.txt b/examples/app-pytorch/requirements.txt index 016a84043cbe..117e30b2ad56 100644 --- a/examples/app-pytorch/requirements.txt +++ b/examples/app-pytorch/requirements.txt @@ -1,4 +1,3 @@ -flwr-nightly[simulation]==1.8.0.dev20240309 -flwr-datasets[vision]==0.0.2 +flwr[simulation]>=1.8.0 torch==2.2.1 torchvision==0.17.1 diff --git a/examples/app-pytorch/server_custom.py b/examples/app-pytorch/server_custom.py index ba9cdb11d694..67c1bce99c55 100644 --- a/examples/app-pytorch/server_custom.py +++ b/examples/app-pytorch/server_custom.py @@ -103,15 +103,19 @@ def main(driver: Driver, context: Context) -> None: all_replies: List[Message] = [] while True: replies = driver.pull_messages(message_ids=message_ids) - print(f"Got {len(replies)} results") + for res in replies: + print(f"Got 1 {'result' if res.has_content() else 'error'}") all_replies += replies if len(all_replies) == len(message_ids): break + print("Pulling messages...") time.sleep(3) - # Collect correct results + # Filter correct results all_fitres = [ - recordset_to_fitres(msg.content, keep_input=True) for msg in all_replies + recordset_to_fitres(msg.content, keep_input=True) + for msg in all_replies + if msg.has_content() ] print(f"Received {len(all_fitres)} results") @@ -128,16 +132,21 @@ def main(driver: Driver, context: Context) -> None: ) metrics_results.append((fitres.num_examples, fitres.metrics)) - # Aggregate parameters (FedAvg) - parameters_aggregated = ndarrays_to_parameters(aggregate(weights_results)) - parameters = parameters_aggregated + if len(weights_results) > 0: + # Aggregate parameters (FedAvg) + parameters_aggregated = ndarrays_to_parameters(aggregate(weights_results)) + parameters = parameters_aggregated - # Aggregate metrics - metrics_aggregated = weighted_average(metrics_results) - history.add_metrics_distributed_fit( - server_round=server_round, metrics=metrics_aggregated - ) - print("Round ", server_round, " metrics: ", metrics_aggregated) + # Aggregate metrics + metrics_aggregated = weighted_average(metrics_results) + history.add_metrics_distributed_fit( + server_round=server_round, metrics=metrics_aggregated + ) + print("Round ", server_round, " metrics: ", metrics_aggregated) + else: + print( + f"Round {server_round} got {len(weights_results)} results. Skipping aggregation..." + ) # Slow down the start of the next round time.sleep(sleep_time) diff --git a/examples/app-secure-aggregation/pyproject.toml b/examples/app-secure-aggregation/pyproject.toml index 84b6502064c8..fb1f636d8c33 100644 --- a/examples/app-secure-aggregation/pyproject.toml +++ b/examples/app-secure-aggregation/pyproject.toml @@ -11,4 +11,4 @@ authors = ["The Flower Authors "] [tool.poetry.dependencies] python = "^3.8" # Mandatory dependencies -flwr-nightly = { version = "1.8.0.dev20240309", extras = ["simulation"] } +flwr = { version = "^1.8.0", extras = ["simulation"] } diff --git a/examples/app-secure-aggregation/requirements.txt b/examples/app-secure-aggregation/requirements.txt index 5bac63a0d44c..2d8be098f264 100644 --- a/examples/app-secure-aggregation/requirements.txt +++ b/examples/app-secure-aggregation/requirements.txt @@ -1 +1 @@ -flwr-nightly[simulation]==1.8.0.dev20240309 +flwr[simulation]>=1.8.0 diff --git a/examples/custom-mods/README.md b/examples/custom-mods/README.md index b0ad668c2dec..6b03abcfbfe0 100644 --- a/examples/custom-mods/README.md +++ b/examples/custom-mods/README.md @@ -288,7 +288,7 @@ $ tree . pip install -r requirements.txt ``` -For [W&B](wandb.ai) you will also need a valid account. +For [W&B](https://wandb.ai) you will also need a valid account. ### Start the long-running Flower server (SuperLink) @@ -328,7 +328,7 @@ flower-server-app server:app --insecure ### Check the results -For W&B, you will need to login to the [website](wandb.ai). +For W&B, you will need to login to the [website](https://wandb.ai). For TensorBoard, you will need to run the following command in your terminal: diff --git a/examples/custom-mods/client.py b/examples/custom-mods/client.py index 2b87a24da19d..614daef6bcf6 100644 --- a/examples/custom-mods/client.py +++ b/examples/custom-mods/client.py @@ -86,7 +86,6 @@ def wandb_mod(msg: Message, context: Context, app: ClientAppCallable) -> Message # if the `ClientApp` just processed a "fit" message, let's log some metrics to W&B if reply.metadata.message_type == MessageType.TRAIN and reply.has_content(): - metrics = reply.content.configs_records results_to_log = dict(metrics.get("fitres.metrics", ConfigsRecord())) diff --git a/examples/doc/source/_static/.gitignore b/examples/doc/source/_static/.gitignore index c2412a5912cc..887023baf484 100644 --- a/examples/doc/source/_static/.gitignore +++ b/examples/doc/source/_static/.gitignore @@ -3,3 +3,4 @@ !favicon.ico !flower-logo.png !tmux_jtop_view.gif +!view-gh.png diff --git a/examples/doc/source/_static/view-gh.png b/examples/doc/source/_static/view-gh.png new file mode 100644 index 000000000000..afc3f07bc2d5 Binary files /dev/null and b/examples/doc/source/_static/view-gh.png differ diff --git a/examples/doc/source/conf.py b/examples/doc/source/conf.py index bf177aa5ae24..b9c18fba2e18 100644 --- a/examples/doc/source/conf.py +++ b/examples/doc/source/conf.py @@ -24,13 +24,12 @@ import datetime - project = "Flower" copyright = f"{datetime.date.today().year} Flower Labs GmbH" author = "The Flower Authors" # The full version, including alpha/beta/rc tags -release = "1.8.0" +release = "1.9.0" # -- General configuration --------------------------------------------------- @@ -63,8 +62,10 @@ # Sphinx redirects, implemented after the doc filename changes. # To prevent 404 errors and redirect to the new pages. -# redirects = { -# } +redirects = { + "quickstart-mxnet": "index.html", + "mxnet-from-centralized-to-federated": "index.html", +} # -- Options for HTML output ------------------------------------------------- diff --git a/examples/embedded-devices/Dockerfile b/examples/embedded-devices/Dockerfile index a85c05c4bb7a..48602c89970a 100644 --- a/examples/embedded-devices/Dockerfile +++ b/examples/embedded-devices/Dockerfile @@ -8,7 +8,7 @@ RUN pip3 install --upgrade pip # Install flower RUN pip3 install flwr>=1.0 -RUN pip3 install flwr-datsets>=0.2 +RUN pip3 install flwr-datsets>=0.0.2 RUN pip3 install tqdm==4.65.0 WORKDIR /client diff --git a/examples/embedded-devices/client_pytorch.py b/examples/embedded-devices/client_pytorch.py index 6bd69c16567e..411052bfb1ea 100644 --- a/examples/embedded-devices/client_pytorch.py +++ b/examples/embedded-devices/client_pytorch.py @@ -108,7 +108,7 @@ def apply_transforms(batch): for partition_id in range(NUM_CLIENTS): partition = fds.load_partition(partition_id, "train") # Divide data on each node: 90% train, 10% test - partition = partition.train_test_split(test_size=0.1) + partition = partition.train_test_split(test_size=0.1, seed=42) partition = partition.with_transform(apply_transforms) trainsets.append(partition["train"]) validsets.append(partition["test"]) diff --git a/examples/embedded-devices/client_tf.py b/examples/embedded-devices/client_tf.py index 49c63ce5d9dc..3df75f76312b 100644 --- a/examples/embedded-devices/client_tf.py +++ b/examples/embedded-devices/client_tf.py @@ -44,7 +44,7 @@ def prepare_dataset(use_mnist: bool): partition = fds.load_partition(partition_id, "train") partition.set_format("numpy") # Divide data on each node: 90% train, 10% test - partition = partition.train_test_split(test_size=0.1) + partition = partition.train_test_split(test_size=0.1, seed=42) x_train, y_train = ( partition["train"][img_key] / 255.0, partition["train"]["label"], diff --git a/examples/embedded-devices/requirements_pytorch.txt b/examples/embedded-devices/requirements_pytorch.txt index f859c4efef17..dbad686d914e 100644 --- a/examples/embedded-devices/requirements_pytorch.txt +++ b/examples/embedded-devices/requirements_pytorch.txt @@ -2,4 +2,4 @@ flwr>=1.0, <2.0 flwr-datasets[vision]>=0.0.2, <1.0.0 torch==1.13.1 torchvision==0.14.1 -tqdm==4.65.0 +tqdm==4.66.3 diff --git a/examples/fl-dp-sa/README.md b/examples/fl-dp-sa/README.md index 99a0a7e50980..47eedb70a2b8 100644 --- a/examples/fl-dp-sa/README.md +++ b/examples/fl-dp-sa/README.md @@ -15,7 +15,7 @@ poetry install ## Run -The example uses the CIFAR-10 dataset with a total of 100 clients, with 20 clients sampled in each round. The hyperparameters for DP and SecAgg are specified in `server.py`. +The example uses the MNIST dataset with a total of 100 clients, with 20 clients sampled in each round. The hyperparameters for DP and SecAgg are specified in `server.py`. ```shell flower-simulation --server-app fl_dp_sa.server:app --client-app fl_dp_sa.client:app --num-supernodes 100 diff --git a/examples/fl-dp-sa/fl_dp_sa/task.py b/examples/fl-dp-sa/fl_dp_sa/task.py index 3d506263d5a3..6a94571a2369 100644 --- a/examples/fl-dp-sa/fl_dp_sa/task.py +++ b/examples/fl-dp-sa/fl_dp_sa/task.py @@ -38,11 +38,11 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: def load_data(partition_id): - """Load partition CIFAR10 data.""" + """Load partition MNIST data.""" fds = FederatedDataset(dataset="mnist", partitioners={"train": 100}) partition = fds.load_partition(partition_id) # Divide data on each node: 80% train, 20% test - partition_train_test = partition.train_test_split(test_size=0.2) + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) pytorch_transforms = Compose([ToTensor(), Normalize((0.5,), (0.5,))]) def apply_transforms(batch): diff --git a/examples/fl-dp-sa/pyproject.toml b/examples/fl-dp-sa/pyproject.toml index d30fa4675e34..1ca343b072d9 100644 --- a/examples/fl-dp-sa/pyproject.toml +++ b/examples/fl-dp-sa/pyproject.toml @@ -15,7 +15,7 @@ readme = "README.md" [tool.poetry.dependencies] python = "^3.9" # Mandatory dependencies -flwr-nightly = { version = "1.8.0.dev20240313", extras = ["simulation"] } +flwr = { version = "^1.8.0", extras = ["simulation"] } flwr-datasets = { version = "0.0.2", extras = ["vision"] } torch = "2.2.1" torchvision = "0.17.1" diff --git a/examples/fl-dp-sa/requirements.txt b/examples/fl-dp-sa/requirements.txt index ddb8a814447b..f20b9d71e339 100644 --- a/examples/fl-dp-sa/requirements.txt +++ b/examples/fl-dp-sa/requirements.txt @@ -1,4 +1,4 @@ -flwr-nightly[simulation]==1.8.0.dev20240313 +flwr[simulation]>=1.8.0 flwr-datasets[vision]==0.0.2 torch==2.2.1 torchvision==0.17.1 diff --git a/examples/flower-client-authentication/README.md b/examples/flower-client-authentication/README.md new file mode 100644 index 000000000000..7c724fc26f64 --- /dev/null +++ b/examples/flower-client-authentication/README.md @@ -0,0 +1,105 @@ +# Flower Client Authentication with PyTorch 🧪 + +> 🧪 = This example covers experimental features that might change in future versions of Flower +> Please consult the regular PyTorch code examples ([quickstart](https://github.com/adap/flower/tree/main/examples/quickstart-pytorch), [advanced](https://github.com/adap/flower/tree/main/examples/advanced-pytorch)) to learn how to use Flower with PyTorch. + +The following steps describe how to start a long-running Flower server (SuperLink) and a long-running Flower client (SuperNode) with client authentication enabled. + +## Project Setup + +Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: + +```shell +git clone --depth=1 https://github.com/adap/flower.git _tmp && mv _tmp/examples/flower-client-authentication . && rm -rf _tmp && cd flower-client-authentication +``` + +This will create a new directory called `flower-client-authentication` with the following project structure: + +```bash +$ tree . +. +├── certificate.conf # <-- configuration for OpenSSL +├── generate.sh # <-- generate certificates and keys +├── pyproject.toml # <-- project dependencies +├── client.py # <-- contains `ClientApp` +├── server.py # <-- contains `ServerApp` +└── task.py # <-- task-specific code (model, data) +``` + +## Install dependencies + +Project dependencies (such as `torch` and `flwr`) are defined in `pyproject.toml`. You can install the dependencies by invoking `pip`: + +```shell +# From a new python environment, run: +pip install . +``` + +Then, to verify that everything works correctly you can run the following command: + +```shell +python3 -c "import flwr" +``` + +If you don't see any errors you're good to go! + +## Generate public and private keys + +```bash +./generate.sh +``` + +`generate.sh` is a script that (by default) generates certificates for creating a secure TLS connection +and three private and public key pairs for one server and two clients. +You can generate more keys by specifying the number of client credentials that you wish to generate. +The script also generates a CSV file that includes each of the generated (client) public keys. + +⚠️ Note that this script should only be used for development purposes and not for creating production key pairs. + +```bash +./generate.sh {your_number_of_clients} +``` + +## Start the long-running Flower server (SuperLink) + +To start a long-running Flower server and enable client authentication is very easy; all you need to do is type +`--require-client-authentication` followed by the path to the known `client_public_keys.csv`, server's private key +`server_credentials`, and server's public key `server_credentials.pub`. Notice that you can only enable client +authentication with a secure TLS connection. + +```bash +flower-superlink \ + --certificates certificates/ca.crt certificates/server.pem certificates/server.key \ + --require-client-authentication keys/client_public_keys.csv keys/server_credentials keys/server_credentials.pub +``` + +## Start the long-running Flower client (SuperNode) + +In a new terminal window, start the first long-running Flower client: + +```bash +flower-client-app client:app \ + --root-certificates certificates/ca.crt \ + --server 127.0.0.1:9092 \ + --authentication-keys keys/client_credentials_1 keys/client_credentials_1.pub +``` + +In yet another new terminal window, start the second long-running Flower client: + +```bash +flower-client-app client:app \ + --root-certificates certificates/ca.crt \ + --server 127.0.0.1:9092 \ + --authentication-keys keys/client_credentials_2 keys/client_credentials_2.pub +``` + +If you generated more than 2 client credentials, you can add more clients by opening new terminal windows and running the command +above. Don't forget to specify the correct client private and public keys for each client instance you created. + +## Run the Flower App + +With both the long-running server (SuperLink) and two clients (SuperNode) up and running, we can now run the actual Flower ServerApp: + +```bash +flower-server-app server:app --root-certificates certificates/ca.crt --dir ./ --server 127.0.0.1:9091 +``` diff --git a/examples/flower-client-authentication/certificate.conf b/examples/flower-client-authentication/certificate.conf new file mode 100644 index 000000000000..ea97fcbb700d --- /dev/null +++ b/examples/flower-client-authentication/certificate.conf @@ -0,0 +1,20 @@ +[req] +default_bits = 4096 +prompt = no +default_md = sha256 +req_extensions = req_ext +distinguished_name = dn + +[dn] +C = DE +ST = HH +O = Flower +CN = localhost + +[req_ext] +subjectAltName = @alt_names + +[alt_names] +DNS.1 = localhost +IP.1 = ::1 +IP.2 = 127.0.0.1 diff --git a/examples/flower-client-authentication/client.py b/examples/flower-client-authentication/client.py new file mode 100644 index 000000000000..3c99d5a410c9 --- /dev/null +++ b/examples/flower-client-authentication/client.py @@ -0,0 +1,43 @@ +from typing import Dict +from flwr.common import NDArrays, Scalar +from flwr.client import ClientApp, NumPyClient + +from task import ( + Net, + DEVICE, + load_data, + get_parameters, + set_parameters, + train, + test, +) + + +# Load model and data (simple CNN, CIFAR-10) +net = Net().to(DEVICE) +trainloader, testloader = load_data() + + +# Define Flower client and client_fn +class FlowerClient(NumPyClient): + def get_parameters(self, config: Dict[str, Scalar]) -> NDArrays: + return get_parameters(net) + + def fit(self, parameters, config): + set_parameters(net, parameters) + results = train(net, trainloader, testloader, epochs=1, device=DEVICE) + return get_parameters(net), len(trainloader.dataset), results + + def evaluate(self, parameters, config): + set_parameters(net, parameters) + loss, accuracy = test(net, testloader) + return loss, len(testloader.dataset), {"accuracy": accuracy} + + +def client_fn(cid: str): + return FlowerClient().to_client() + + +app = ClientApp( + client_fn=client_fn, +) diff --git a/examples/flower-client-authentication/generate.sh b/examples/flower-client-authentication/generate.sh new file mode 100644 index 000000000000..ebfdc17b80b5 --- /dev/null +++ b/examples/flower-client-authentication/generate.sh @@ -0,0 +1,72 @@ +#!/bin/bash +# This script will generate all certificates if ca.crt does not exist + +set -e +# Change directory to the script's directory +cd "$(dirname "${BASH_SOURCE[0]}")" + +CERT_DIR=certificates + +# Generate directories if not exists +mkdir -p $CERT_DIR + +# Clearing any existing files in the certificates directory +rm -f $CERT_DIR/* + +# Generate the root certificate authority key and certificate based on key +openssl genrsa -out $CERT_DIR/ca.key 4096 +openssl req \ + -new \ + -x509 \ + -key $CERT_DIR/ca.key \ + -sha256 \ + -subj "/C=DE/ST=HH/O=CA, Inc." \ + -days 365 -out $CERT_DIR/ca.crt + +# Generate a new private key for the server +openssl genrsa -out $CERT_DIR/server.key 4096 + +# Create a signing CSR +openssl req \ + -new \ + -key $CERT_DIR/server.key \ + -out $CERT_DIR/server.csr \ + -config certificate.conf + +# Generate a certificate for the server +openssl x509 \ + -req \ + -in $CERT_DIR/server.csr \ + -CA $CERT_DIR/ca.crt \ + -CAkey $CERT_DIR/ca.key \ + -CAcreateserial \ + -out $CERT_DIR/server.pem \ + -days 365 \ + -sha256 \ + -extfile certificate.conf \ + -extensions req_ext + +KEY_DIR=keys + +mkdir -p $KEY_DIR + +rm -f $KEY_DIR/* + +ssh-keygen -t ecdsa -b 384 -N "" -f "${KEY_DIR}/server_credentials" -C "" + +generate_client_credentials() { + local num_clients=${1:-2} + for ((i=1; i<=num_clients; i++)) + do + ssh-keygen -t ecdsa -b 384 -N "" -f "${KEY_DIR}/client_credentials_$i" -C "" + done +} + +generate_client_credentials "$1" + +printf "%s" "$(cat "${KEY_DIR}/client_credentials_1.pub" | sed 's/.$//')" > $KEY_DIR/client_public_keys.csv +for ((i=2; i<=${1:-2}; i++)) +do + printf ",%s" "$(sed 's/.$//' < "${KEY_DIR}/client_credentials_$i.pub")" >> $KEY_DIR/client_public_keys.csv +done +printf "\n" >> $KEY_DIR/client_public_keys.csv diff --git a/examples/flower-client-authentication/pyproject.toml b/examples/flower-client-authentication/pyproject.toml new file mode 100644 index 000000000000..e80a50b1eef9 --- /dev/null +++ b/examples/flower-client-authentication/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "flower-client-authentication" +version = "0.1.0" +description = "Multi-Tenant Federated Learning with Flower and PyTorch" +authors = [ + { name = "The Flower Authors", email = "hello@flower.ai" }, +] +dependencies = [ + "flwr-nightly[rest,simulation]", + "torch==1.13.1", + "torchvision==0.14.1", + "tqdm==4.66.3" +] + +[tool.hatch.build.targets.wheel] +packages = ["."] diff --git a/examples/flower-client-authentication/server.py b/examples/flower-client-authentication/server.py new file mode 100644 index 000000000000..d88dc1d1a641 --- /dev/null +++ b/examples/flower-client-authentication/server.py @@ -0,0 +1,42 @@ +from typing import List, Tuple + +import flwr as fl +from flwr.common import Metrics +from flwr.server.strategy.fedavg import FedAvg +from flwr.server import ServerApp + + +# Define metric aggregation function +def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: + examples = [num_examples for num_examples, _ in metrics] + + # Multiply accuracy of each client by number of examples used + train_losses = [num_examples * m["train_loss"] for num_examples, m in metrics] + train_accuracies = [ + num_examples * m["train_accuracy"] for num_examples, m in metrics + ] + val_losses = [num_examples * m["val_loss"] for num_examples, m in metrics] + val_accuracies = [num_examples * m["val_accuracy"] for num_examples, m in metrics] + + # Aggregate and return custom metric (weighted average) + return { + "train_loss": sum(train_losses) / sum(examples), + "train_accuracy": sum(train_accuracies) / sum(examples), + "val_loss": sum(val_losses) / sum(examples), + "val_accuracy": sum(val_accuracies) / sum(examples), + } + + +# Define strategy +strategy = FedAvg( + fraction_fit=1.0, # Select all available clients + fraction_evaluate=0.0, # Disable evaluation + min_available_clients=2, + fit_metrics_aggregation_fn=weighted_average, +) + + +app = ServerApp( + config=fl.server.ServerConfig(num_rounds=3), + strategy=strategy, +) diff --git a/examples/flower-client-authentication/task.py b/examples/flower-client-authentication/task.py new file mode 100644 index 000000000000..276aace885df --- /dev/null +++ b/examples/flower-client-authentication/task.py @@ -0,0 +1,95 @@ +import warnings +from collections import OrderedDict + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.data import DataLoader +from torchvision.datasets import CIFAR10 +from torchvision.transforms import Compose, Normalize, ToTensor +from tqdm import tqdm + + +warnings.filterwarnings("ignore", category=UserWarning) +DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + +class Net(nn.Module): + """Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz')""" + + def __init__(self) -> None: + super(Net, self).__init__() + self.conv1 = nn.Conv2d(3, 6, 5) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.pool(F.relu(self.conv1(x))) + x = self.pool(F.relu(self.conv2(x))) + x = x.view(-1, 16 * 5 * 5) + x = F.relu(self.fc1(x)) + x = F.relu(self.fc2(x)) + return self.fc3(x) + + +def train(net, trainloader, valloader, epochs, device): + """Train the model on the training set.""" + print("Starting training...") + net.to(device) # move model to GPU if available + criterion = torch.nn.CrossEntropyLoss().to(device) + optimizer = torch.optim.SGD(net.parameters(), lr=0.001, momentum=0.9) + net.train() + for _ in range(epochs): + for images, labels in trainloader: + images, labels = images.to(device), labels.to(device) + optimizer.zero_grad() + loss = criterion(net(images), labels) + loss.backward() + optimizer.step() + + train_loss, train_acc = test(net, trainloader) + val_loss, val_acc = test(net, valloader) + + results = { + "train_loss": train_loss, + "train_accuracy": train_acc, + "val_loss": val_loss, + "val_accuracy": val_acc, + } + return results + + +def test(net, testloader): + """Validate the model on the test set.""" + net.to(DEVICE) + criterion = torch.nn.CrossEntropyLoss() + correct, loss = 0, 0.0 + with torch.no_grad(): + for images, labels in tqdm(testloader): + outputs = net(images.to(DEVICE)) + labels = labels.to(DEVICE) + loss += criterion(outputs, labels).item() + correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() + accuracy = correct / len(testloader.dataset) + return loss, accuracy + + +def load_data(): + """Load CIFAR-10 (training and test set).""" + trf = Compose([ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) + trainset = CIFAR10("./data", train=True, download=True, transform=trf) + testset = CIFAR10("./data", train=False, download=True, transform=trf) + return DataLoader(trainset, batch_size=32, shuffle=True), DataLoader(testset) + + +def get_parameters(net): + return [val.cpu().numpy() for _, val in net.state_dict().items()] + + +def set_parameters(net, parameters): + params_dict = zip(net.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + net.load_state_dict(state_dict, strict=True) diff --git a/examples/flower-in-30-minutes/tutorial.ipynb b/examples/flower-in-30-minutes/tutorial.ipynb index 0e42cff924e8..9f0c86a2507a 100644 --- a/examples/flower-in-30-minutes/tutorial.ipynb +++ b/examples/flower-in-30-minutes/tutorial.ipynb @@ -13,7 +13,7 @@ "\n", "> Star Flower on [GitHub ⭐️](https://github.com/adap/flower) and join the Flower community on Slack to connect, ask questions, and get help: [Join Slack 🌼](https://flower.ai/join-slack/). We'd love to hear from you in the #introductions channel! And if anything is unclear, head over to the #questions channel.\n", "\n", - "Let's get stated!" + "Let's get started!" ] }, { diff --git a/examples/flower-via-docker-compose/helpers/load_data.py b/examples/flower-via-docker-compose/helpers/load_data.py index 1f2784946868..b7d6b0de26c5 100644 --- a/examples/flower-via-docker-compose/helpers/load_data.py +++ b/examples/flower-via-docker-compose/helpers/load_data.py @@ -25,7 +25,7 @@ def load_data(data_sampling_percentage=0.5, client_id=1, total_clients=2): partition.set_format("numpy") # Divide data on each client: 80% train, 20% test - partition = partition.train_test_split(test_size=0.2) + partition = partition.train_test_split(test_size=0.2, seed=42) x_train, y_train = partition["train"]["img"] / 255.0, partition["train"]["label"] x_test, y_test = partition["test"]["img"] / 255.0, partition["test"]["label"] diff --git a/examples/flower-via-docker-compose/requirements.txt b/examples/flower-via-docker-compose/requirements.txt index b93e5b1d9f2b..d08937c4d02a 100644 --- a/examples/flower-via-docker-compose/requirements.txt +++ b/examples/flower-via-docker-compose/requirements.txt @@ -1,4 +1,4 @@ -flwr==1.7.0 +flwr==1.8.0 tensorflow==2.13.1 numpy==1.24.3 prometheus_client == 0.19.0 diff --git a/examples/llm-flowertune/requirements.txt b/examples/llm-flowertune/requirements.txt index 196531c99b92..7c66612eb2a5 100644 --- a/examples/llm-flowertune/requirements.txt +++ b/examples/llm-flowertune/requirements.txt @@ -1,5 +1,5 @@ -flwr-nightly[rest,simulation] -flwr_datasets==0.0.2 +flwr[rest,simulation]>=1.8.0, <2.0 +flwr-datasets>=0.0.2 hydra-core==1.3.2 trl==0.7.2 bitsandbytes==0.41.3 diff --git a/examples/mxnet-from-centralized-to-federated/.gitignore b/examples/mxnet-from-centralized-to-federated/.gitignore deleted file mode 100644 index 10d00b5797e2..000000000000 --- a/examples/mxnet-from-centralized-to-federated/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.gz diff --git a/examples/mxnet-from-centralized-to-federated/README.md b/examples/mxnet-from-centralized-to-federated/README.md deleted file mode 100644 index 2c3f240d8978..000000000000 --- a/examples/mxnet-from-centralized-to-federated/README.md +++ /dev/null @@ -1,81 +0,0 @@ -# MXNet: From Centralized To Federated - -> Note the MXNet project has ended, and is now in [Attic](https://attic.apache.org/projects/mxnet.html). The MXNet GitHub has also [been archived](https://github.com/apache/mxnet). As a result, this example won't be receiving more updates. Using MXNet is no longer recommnended. - -This example demonstrates how an already existing centralized MXNet-based machine learning project can be federated with Flower. - -This introductory example for Flower uses MXNet, but you're not required to be a MXNet expert to run the example. The example will help you to understand how Flower can be used to build federated learning use cases based on an existing MXNet project. - -## Project Setup - -Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: - -```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/mxnet-from-centralized-to-federated . && rm -rf flower && cd mxnet-from-centralized-to-federated -``` - -This will create a new directory called `mxnet-from-centralized-to-federated` containing the following files: - -```shell --- pyproject.toml --- requirements.txt --- mxnet_mnist.py --- client.py --- server.py --- README.md -``` - -### Installing Dependencies - -Project dependencies (such as `mxnet` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. - -#### Poetry - -```shell -poetry install -poetry shell -``` - -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: - -```shell -poetry run python3 -c "import flwr" -``` - -If you don't see any errors you're good to go! - -#### pip - -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. - -```shell -pip install -r requirements.txt -``` - -## Run MXNet Federated - -This MXNet example is based on the [Handwritten Digit Recognition](https://mxnet.apache.org/versions/1.7.0/api/python/docs/tutorials/packages/gluon/image/mnist.html) tutorial and uses the MNIST dataset (hand-written digits with 28x28 pixels in greyscale with 10 classes). Feel free to consult the tutorial if you want to get a better understanding of MXNet. The file `mxnet_mnist.py` contains all the steps that are described in the tutorial. It loads the dataset and a sequential model, trains the model with the training set, and evaluates the trained model on the test set. - -The only things we need are a simple Flower server (in `server.py`) and a Flower client (in `client.py`). The Flower client basically takes model and training code tells Flower how to call it. - -Start the server in a terminal as follows: - -```shell -python3 server.py -``` - -Now that the server is running and waiting for clients, we can start two clients that will participate in the federated learning process. To do so simply open two more terminal windows and run the following commands. - -Start client 1 in the first terminal: - -```shell -python3 client.py -``` - -Start client 2 in the second terminal: - -```shell -python3 client.py -``` - -You are now training a MXNet-based classifier on MNIST, federated across two clients. The setup is of course simplified since both clients hold the same dataset, but you can now continue with your own explorations. How about changing from a sequential model to a CNN? How about adding more clients? diff --git a/examples/mxnet-from-centralized-to-federated/client.py b/examples/mxnet-from-centralized-to-federated/client.py deleted file mode 100644 index bb666a26508e..000000000000 --- a/examples/mxnet-from-centralized-to-federated/client.py +++ /dev/null @@ -1,93 +0,0 @@ -"""Flower client example using MXNet for MNIST classification.""" - -from typing import Dict, List, Tuple - -import flwr as fl -import numpy as np -import mxnet as mx -from mxnet import nd - -import mxnet_mnist - - -# Flower Client -class MNISTClient(fl.client.NumPyClient): - """Flower client implementing MNIST classification using MXNet.""" - - def __init__( - self, - model: mxnet_mnist.model(), - train_data: mx.io.NDArrayIter, - val_data: mx.io.NDArrayIter, - device: mx.context, - ) -> None: - self.model = model - self.train_data = train_data - self.val_data = val_data - self.device = device - - def get_parameters(self, config: Dict) -> List[np.ndarray]: - # Return model parameters as a list of NumPy Arrays - param = [] - for val in self.model.collect_params(".*weight").values(): - p = val.data() - # convert parameters from NDArray to Numpy Array required by Flower Numpy Client - param.append(p.asnumpy()) - return param - - def set_parameters(self, parameters: List[np.ndarray]) -> None: - # Collect model parameters and set new weight values - params = zip(self.model.collect_params(".*weight").keys(), parameters) - for key, value in params: - self.model.collect_params().setattr(key, value) - - def fit( - self, parameters: List[np.ndarray], config: Dict - ) -> Tuple[List[np.ndarray], int, Dict]: - # Set model parameters, train model, return updated model parameters - self.set_parameters(parameters) - [accuracy, loss], num_examples = mxnet_mnist.train( - self.model, self.train_data, epoch=2, device=self.device - ) - results = {"accuracy": accuracy[1], "loss": loss[1]} - return self.get_parameters(config={}), num_examples, results - - def evaluate( - self, parameters: List[np.ndarray], config: Dict - ) -> Tuple[int, float, Dict]: - # Set model parameters, evaluate model on local test dataset, return result - self.set_parameters(parameters) - [accuracy, loss], num_examples = mxnet_mnist.test( - self.model, self.val_data, device=self.device - ) - print("Evaluation accuracy & loss", accuracy, loss) - return ( - float(loss[1]), - num_examples, - {"accuracy": float(accuracy[1])}, - ) - - -def main() -> None: - """Load data, start MNISTClient.""" - - # Set context to GPU or - if not available - to CPU - DEVICE = [mx.gpu() if mx.test_utils.list_gpus() else mx.cpu()] - - # Load data - train_data, val_data = mxnet_mnist.load_data() - - # Load model (from centralized training) - model = mxnet_mnist.model() - - # Do one forward propagation to initialize parameters - init = nd.random.uniform(shape=(2, 784)) - model(init) - - # Start Flower client - client = MNISTClient(model, train_data, val_data, DEVICE) - fl.client.start_numpy_client(server_address="0.0.0.0:8080", client=client) - - -if __name__ == "__main__": - main() diff --git a/examples/mxnet-from-centralized-to-federated/mxnet_mnist.py b/examples/mxnet-from-centralized-to-federated/mxnet_mnist.py deleted file mode 100644 index 5cf39da7c9ca..000000000000 --- a/examples/mxnet-from-centralized-to-federated/mxnet_mnist.py +++ /dev/null @@ -1,144 +0,0 @@ -"""MXNet MNIST image classification. - -The code is generally adapted from: - -https://mxnet.apache.org/api/python/docs/tutorials/packages/gluon/image/mnist.html -""" - -from typing import List, Tuple -import mxnet as mx -from mxnet import gluon -from mxnet.gluon import nn -from mxnet import autograd as ag -import mxnet.ndarray as F -from mxnet import nd - -# Fixing the random seed -mx.random.seed(42) - - -def load_data() -> Tuple[mx.io.NDArrayIter, mx.io.NDArrayIter]: - print("Download Dataset") - # Download MNIST data - mnist = mx.test_utils.get_mnist() - batch_size = 100 - train_data = mx.io.NDArrayIter( - mnist["train_data"], mnist["train_label"], batch_size, shuffle=True - ) - val_data = mx.io.NDArrayIter(mnist["test_data"], mnist["test_label"], batch_size) - return train_data, val_data - - -def model(): - # Define simple Sequential model - net = nn.Sequential() - net.add(nn.Dense(256, activation="relu")) - net.add(nn.Dense(64, activation="relu")) - net.add(nn.Dense(10)) - net.collect_params().initialize() - return net - - -def train( - net: mx.gluon.nn, train_data: mx.io.NDArrayIter, epoch: int, device: mx.context -) -> Tuple[List[float], int]: - trainer = gluon.Trainer(net.collect_params(), "sgd", {"learning_rate": 0.01}) - # Use Accuracy and Cross Entropy Loss as the evaluation metric. - accuracy_metric = mx.metric.Accuracy() - loss_metric = mx.metric.CrossEntropy() - metrics = mx.metric.CompositeEvalMetric() - for child_metric in [accuracy_metric, loss_metric]: - metrics.add(child_metric) - softmax_cross_entropy_loss = gluon.loss.SoftmaxCrossEntropyLoss() - for i in range(epoch): - # Reset the train data iterator. - train_data.reset() - # Calculate number of samples - num_examples = 0 - # Loop over the train data iterator. - for batch in train_data: - # Splits train data into multiple slices along batch_axis - # and copy each slice into a context. - data = gluon.utils.split_and_load( - batch.data[0], ctx_list=device, batch_axis=0 - ) - # Splits train labels into multiple slices along batch_axis - # and copy each slice into a context. - label = gluon.utils.split_and_load( - batch.label[0], ctx_list=device, batch_axis=0 - ) - outputs = [] - # Inside training scope - with ag.record(): - for x, y in zip(data, label): - z = net(x) - # Computes softmax cross entropy loss. - loss = softmax_cross_entropy_loss(z, y) - # Backpropogate the error for one iteration. - loss.backward() - outputs.append(z.softmax()) - num_examples += len(x) - # Updates internal evaluation - metrics.update(label, outputs) - # Make one step of parameter update. Trainer needs to know the - # batch size of data to normalize the gradient by 1/batch_size. - trainer.step(batch.data[0].shape[0]) - # Gets the evaluation result. - trainings_metric = metrics.get_name_value() - print("Accuracy & loss at epoch %d: %s" % (i, trainings_metric)) - return trainings_metric, num_examples - - -def test( - net: mx.gluon.nn, val_data: mx.io.NDArrayIter, device: mx.context -) -> Tuple[List[float], int]: - # Use Accuracy as the evaluation metric. - accuracy_metric = mx.metric.Accuracy() - loss_metric = mx.metric.CrossEntropy() - metrics = mx.metric.CompositeEvalMetric() - for child_metric in [accuracy_metric, loss_metric]: - metrics.add(child_metric) - # Reset the validation data iterator. - val_data.reset() - # Get number of samples for val_dat - num_examples = 0 - # Loop over the validation data iterator. - for batch in val_data: - # Splits validation data into multiple slices along batch_axis - # and copy each slice into a context. - data = gluon.utils.split_and_load(batch.data[0], ctx_list=device, batch_axis=0) - # Splits validation label into multiple slices along batch_axis - # and copy each slice into a context. - label = gluon.utils.split_and_load( - batch.label[0], ctx_list=device, batch_axis=0 - ) - outputs = [] - for x in data: - outputs.append(net(x).softmax()) - num_examples += len(x) - # Updates internal evaluation - metrics.update(label, outputs) - return metrics.get_name_value(), num_examples - - -def main(): - # Set context to GPU or - if not available - to CPU - DEVICE = [mx.gpu() if mx.test_utils.list_gpus() else mx.cpu()] - # Load train and validation data - train_data, val_data = load_data() - # Define sequential model - net = model() - init = nd.random.uniform(shape=(2, 784)) - net(init) - # Start model training based on training set - train(net=net, train_data=train_data, epoch=2, device=DEVICE) - # Evaluate model using loss and accuracy - eval_metric, _ = test(net=net, val_data=val_data, device=DEVICE) - acc = eval_metric[0] - loss = eval_metric[1] - print("Evaluation Loss: ", loss) - print("Evaluation Accuracy: ", acc) - - -if __name__ == "__main__": - main() diff --git a/examples/mxnet-from-centralized-to-federated/pyproject.toml b/examples/mxnet-from-centralized-to-federated/pyproject.toml deleted file mode 100644 index b00b3ddfe412..000000000000 --- a/examples/mxnet-from-centralized-to-federated/pyproject.toml +++ /dev/null @@ -1,15 +0,0 @@ -[build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" - -[tool.poetry] -name = "mxnet_example" -version = "0.1.0" -description = "MXNet example with MNIST and CNN" -authors = ["The Flower Authors "] - -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = "1.6.0" -mxnet = "1.9.1" -numpy = "1.23.1" diff --git a/examples/mxnet-from-centralized-to-federated/requirements.txt b/examples/mxnet-from-centralized-to-federated/requirements.txt deleted file mode 100644 index 8dd6f7150dfd..000000000000 --- a/examples/mxnet-from-centralized-to-federated/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -flwr==1.6.0 -mxnet==1.9.1 -numpy==1.23.1 diff --git a/examples/mxnet-from-centralized-to-federated/server.py b/examples/mxnet-from-centralized-to-federated/server.py deleted file mode 100644 index 871aa4e8ec99..000000000000 --- a/examples/mxnet-from-centralized-to-federated/server.py +++ /dev/null @@ -1,9 +0,0 @@ -"""Flower server example.""" - -import flwr as fl - -if __name__ == "__main__": - fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=3), - ) diff --git a/examples/opacus/README.md b/examples/opacus/README.md index 39d44edcdf22..6fc0d2ff49a0 100644 --- a/examples/opacus/README.md +++ b/examples/opacus/README.md @@ -1,28 +1,60 @@ -# Differentially Private Federated Learning using Opacus, PyTorch and Flower +# Training with Sample-Level Differential Privacy using Opacus Privacy Engine -This example contains code demonstrating how to include the Opacus library for training a model using DP-SGD. The code is adapted from multiple other examples: +In this example, we demonstrate how to train a model with differential privacy (DP) using Flower. We employ PyTorch and integrate the Opacus Privacy Engine to achieve sample-level differential privacy. This setup ensures robust privacy guarantees during the client training phase. The code is adapted from the [PyTorch Quickstart example](https://github.com/adap/flower/tree/main/examples/quickstart-pytorch). -- PyTorch Quickstart -- Simulation Quickstart -- Simulation Extended Example +For more information about DP in Flower please refer to the [tutorial](https://flower.ai/docs/framework/how-to-use-differential-privacy.html). For additional information about Opacus, visit the official [website](https://opacus.ai/). -## Requirements +## Environments Setup -- **Flower** nightly release (or development version from `main` branch) for the simulation, otherwise normal Flower for the client -- **PyTorch** 1.7.1 (but most likely will work with older versions) -- **Ray** 1.4.1 (just for the simulation) -- **Opacus** 0.14.0 +Start by cloning the example. We prepared a single-line command that you can copy into your shell which will checkout the example for you: -## Privacy Parameters +```shell +git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/opacus . && rm -rf flower && cd opacus +``` -The parameters can be set in `dp_cifar_main.py`. +This will create a new directory called `opacus` containing the following files: -## Running the client +```shell +-- pyproject.toml +-- client.py +-- server.py +-- README.md +``` -Run the server with `python server.py`. Then open two (or more) new terminals to start two (or more) clients with `python dp_cifar_client.py`. +### Installing dependencies -## Running the simulation +Project dependencies are defined in `pyproject.toml`. Install them with: -Note: It is not possible to see the total privacy budget used with this example since the simulation creates clients from scratch every round. +```shell +pip install . +``` -Run the simulation with `python dp_cifar_simulation.py`. +## Run Flower with Opacus and Pytorch + +### 1. Start the long-running Flower server (SuperLink) + +```bash +flower-superlink --insecure +``` + +### 2. Start the long-running Flower clients (SuperNodes) + +Start 2 Flower `SuperNodes` in 2 separate terminal windows, using: + +```bash +flower-client-app client:appA --insecure +``` + +```bash +flower-client-app client:appB --insecure +``` + +Opacus hyperparameters can be passed for each client in `ClientApp` instantiation (in `client.py`). In this example, `noise_multiplier=1.5` and `noise_multiplier=1` are used for the first and second client respectively. + +### 3. Run the Flower App + +With both the long-running server (SuperLink) and two clients (SuperNode) up and running, we can now run the actual Flower App: + +```bash +flower-server-app server:app --insecure +``` diff --git a/examples/opacus/client.py b/examples/opacus/client.py new file mode 100644 index 000000000000..51c1e1cfa667 --- /dev/null +++ b/examples/opacus/client.py @@ -0,0 +1,172 @@ +import argparse +import warnings +from collections import OrderedDict + +from flwr_datasets import FederatedDataset +from flwr.client import NumPyClient, ClientApp +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.data import DataLoader +from torchvision.transforms import Compose, Normalize, ToTensor +from tqdm import tqdm + +from opacus import PrivacyEngine + +warnings.filterwarnings("ignore", category=UserWarning) + +DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + +class Net(nn.Module): + """Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz')""" + + def __init__(self) -> None: + super(Net, self).__init__() + self.conv1 = nn.Conv2d(3, 6, 5) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.pool(F.relu(self.conv1(x))) + x = self.pool(F.relu(self.conv2(x))) + x = x.view(-1, 16 * 5 * 5) + x = F.relu(self.fc1(x)) + x = F.relu(self.fc2(x)) + return self.fc3(x) + + +def train(net, train_loader, privacy_engine, optimizer, target_delta, epochs=1): + criterion = torch.nn.CrossEntropyLoss() + for _ in range(epochs): + for batch in tqdm(train_loader, "Training"): + images = batch["img"] + labels = batch["label"] + optimizer.zero_grad() + criterion(net(images.to(DEVICE)), labels.to(DEVICE)).backward() + optimizer.step() + + epsilon = privacy_engine.get_epsilon(delta=target_delta) + return epsilon + + +def test(net, test_loader): + criterion = torch.nn.CrossEntropyLoss() + correct, loss = 0, 0.0 + with torch.no_grad(): + for batch in tqdm(test_loader, "Testing"): + images = batch["img"].to(DEVICE) + labels = batch["label"].to(DEVICE) + outputs = net(images) + loss += criterion(outputs, labels).item() + correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() + accuracy = correct / len(test_loader.dataset) + return loss, accuracy + + +def load_data(partition_id): + fds = FederatedDataset(dataset="cifar10", partitioners={"train": 2}) + partition = fds.load_partition(partition_id) + # Divide data on each node: 80% train, 20% test + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) + pytorch_transforms = Compose( + [ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] + ) + + def apply_transforms(batch): + batch["img"] = [pytorch_transforms(img) for img in batch["img"]] + return batch + + partition_train_test = partition_train_test.with_transform(apply_transforms) + train_loader = DataLoader( + partition_train_test["train"], batch_size=32, shuffle=True + ) + test_loader = DataLoader(partition_train_test["test"], batch_size=32) + return train_loader, test_loader + + +class FlowerClient(NumPyClient): + def __init__( + self, + model, + train_loader, + test_loader, + target_delta, + noise_multiplier, + max_grad_norm, + ) -> None: + super().__init__() + self.test_loader = test_loader + self.optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9) + self.privacy_engine = PrivacyEngine(secure_mode=False) + self.target_delta = target_delta + ( + self.model, + self.optimizer, + self.train_loader, + ) = self.privacy_engine.make_private( + module=model, + optimizer=self.optimizer, + data_loader=train_loader, + noise_multiplier=noise_multiplier, + max_grad_norm=max_grad_norm, + ) + + def get_parameters(self, config): + return [val.cpu().numpy() for _, val in self.model.state_dict().items()] + + def set_parameters(self, parameters): + params_dict = zip(self.model.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + self.model.load_state_dict(state_dict, strict=True) + + def fit(self, parameters, config): + self.set_parameters(parameters) + epsilon = train( + self.model, + self.train_loader, + self.privacy_engine, + self.optimizer, + self.target_delta, + ) + + if epsilon is not None: + print(f"Epsilon value for delta={self.target_delta} is {epsilon:.2f}") + else: + print("Epsilon value not available.") + return (self.get_parameters(config={}), len(self.train_loader), {}) + + def evaluate(self, parameters, config): + self.set_parameters(parameters) + loss, accuracy = test(self.model, self.test_loader) + return loss, len(self.test_loader.dataset), {"accuracy": accuracy} + + +def client_fn_parameterized( + partition_id, target_delta=1e-5, noise_multiplier=1.3, max_grad_norm=1.0 +): + def client_fn(cid: str): + net = Net().to(DEVICE) + train_loader, test_loader = load_data(partition_id=partition_id) + return FlowerClient( + net, + train_loader, + test_loader, + target_delta, + noise_multiplier, + max_grad_norm, + ).to_client() + + return client_fn + + +appA = ClientApp( + client_fn=client_fn_parameterized(partition_id=0, noise_multiplier=1.5), +) + +appB = ClientApp( + client_fn=client_fn_parameterized(partition_id=1, noise_multiplier=1), +) diff --git a/examples/opacus/dp_cifar_client.py b/examples/opacus/dp_cifar_client.py deleted file mode 100644 index cc30e7728222..000000000000 --- a/examples/opacus/dp_cifar_client.py +++ /dev/null @@ -1,34 +0,0 @@ -import math - -import torch -import torchvision.transforms as transforms -from torch.utils.data import DataLoader -from torchvision.datasets import CIFAR10 - -import flwr as fl - -from dp_cifar_main import Net, DPCifarClient, PARAMS - -# Setup for running a single client manually (alternatively use simulation code in 'dp_cifar_simulation'). - - -def load_data(): - transform = transforms.Compose( - [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] - ) - data = CIFAR10("./data", train=True, download=True, transform=transform) - split = math.floor(len(data) * PARAMS["train_split"]) - trainset = torch.utils.data.Subset(data, list(range(0, split))) - trainset = torch.utils.data.Subset(data, list(range(split, len(data)))) - trainloader = DataLoader(trainset, PARAMS["batch_size"]) - testloader = DataLoader(trainset, PARAMS["batch_size"]) - sample_rate = PARAMS["batch_size"] / len(trainset) - return trainloader, testloader, sample_rate - - -model = Net() -trainloader, testloader, sample_rate = load_data() -fl.client.start_client( - server_address="127.0.0.1:8080", - client=DPCifarClient(model, trainloader, testloader).to_client(), -) diff --git a/examples/opacus/dp_cifar_main.py b/examples/opacus/dp_cifar_main.py deleted file mode 100644 index 174bd64678e5..000000000000 --- a/examples/opacus/dp_cifar_main.py +++ /dev/null @@ -1,119 +0,0 @@ -from collections import OrderedDict - -import torch -import torch.nn as nn -import torch.nn.functional as F - -import flwr as fl -from opacus import PrivacyEngine - -# Adapted from the PyTorch quickstart example. - - -# Define parameters. -PARAMS = { - "batch_size": 32, - "train_split": 0.7, - "local_epochs": 1, -} -PRIVACY_PARAMS = { - # 'target_epsilon': 5.0, - "target_delta": 1e-5, - "noise_multiplier": 0.4, - "max_grad_norm": 1.2, -} -DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - - -# Define model used for training. -class Net(nn.Module): - def __init__(self) -> None: - super(Net, self).__init__() - self.conv1 = nn.Conv2d(3, 6, 5) - self.pool = nn.MaxPool2d(2, 2) - self.conv2 = nn.Conv2d(6, 16, 5) - self.fc1 = nn.Linear(16 * 5 * 5, 120) - self.fc2 = nn.Linear(120, 84) - self.fc3 = nn.Linear(84, 10) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.pool(F.relu(self.conv1(x))) - x = self.pool(F.relu(self.conv2(x))) - x = x.view(-1, 16 * 5 * 5) - x = F.relu(self.fc1(x)) - x = F.relu(self.fc2(x)) - x = self.fc3(x) - return x - - -def train(net, trainloader, privacy_engine, optimizer, epochs): - criterion = torch.nn.CrossEntropyLoss() - for _ in range(epochs): - for images, labels in trainloader: - images, labels = images.to(DEVICE), labels.to(DEVICE) - optimizer.zero_grad() - loss = criterion(net(images), labels) - loss.backward() - optimizer.step() - epsilon = privacy_engine.get_epsilon(delta=PRIVACY_PARAMS["target_delta"]) - return epsilon - - -def test(net, testloader): - criterion = torch.nn.CrossEntropyLoss() - correct, loss = 0, 0.0 - with torch.no_grad(): - for data in testloader: - images, labels = data[0].to(DEVICE), data[1].to(DEVICE) - outputs = net(images) - loss += criterion(outputs, labels).item() - _, predicted = torch.max(outputs.data, 1) - correct += (predicted == labels).sum().item() - accuracy = correct / len(testloader.dataset) - return loss, accuracy - - -# Define Flower client. -class DPCifarClient(fl.client.NumPyClient): - def __init__(self, model, trainloader, testloader) -> None: - super().__init__() - optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9) - self.testloader = testloader - # Create a privacy engine which will add DP and keep track of the privacy budget. - self.privacy_engine = PrivacyEngine() - self.model, self.optimizer, self.trainloader = self.privacy_engine.make_private( - module=model, - optimizer=optimizer, - data_loader=trainloader, - max_grad_norm=PRIVACY_PARAMS["max_grad_norm"], - noise_multiplier=PRIVACY_PARAMS["noise_multiplier"], - ) - - def get_parameters(self, config): - return [val.cpu().numpy() for _, val in self.model.state_dict().items()] - - def set_parameters(self, parameters): - params_dict = zip(self.model.state_dict().keys(), parameters) - state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) - self.model.load_state_dict(state_dict, strict=True) - - def fit(self, parameters, config): - self.set_parameters(parameters) - epsilon = train( - self.model, - self.trainloader, - self.privacy_engine, - self.optimizer, - PARAMS["local_epochs"], - ) - print(f"epsilon = {epsilon:.2f}") - return ( - self.get_parameters(config={}), - len(self.trainloader), - {"epsilon": epsilon}, - ) - - def evaluate(self, parameters, config): - self.set_parameters(parameters) - loss, accuracy = test(self.model, self.testloader) - return float(loss), len(self.testloader), {"accuracy": float(accuracy)} diff --git a/examples/opacus/dp_cifar_simulation.py b/examples/opacus/dp_cifar_simulation.py deleted file mode 100644 index d957caf8785c..000000000000 --- a/examples/opacus/dp_cifar_simulation.py +++ /dev/null @@ -1,93 +0,0 @@ -import math -from collections import OrderedDict -from typing import Callable, Dict, Optional, Tuple - -import flwr as fl -import numpy as np -import torch -import torchvision.transforms as transforms -from torch.utils.data import DataLoader -from torchvision.datasets import CIFAR10 -from flwr.common.typing import Scalar - -from dp_cifar_main import DEVICE, PARAMS, DPCifarClient, Net, test - -# Adapted from the PyTorch quickstart and ray simulation (quickstart and extended) examples. - - -# Define parameters. -NUM_CLIENTS = 2 - - -def client_fn(cid: str) -> fl.client.Client: - # Load model. - model = Net() - # Check model is compatible with Opacus. - - # Load data partition (divide CIFAR10 into NUM_CLIENTS distinct partitions, using 30% for validation). - transform = transforms.Compose( - [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] - ) - data = CIFAR10("./data", train=True, download=True, transform=transform) - - partitions = tuple([len(data) // NUM_CLIENTS for i in range(NUM_CLIENTS)]) - partitioned_data = torch.utils.data.random_split( - data, partitions, generator=torch.Generator().manual_seed(2) - ) - client_data = partitioned_data[int(cid)] - split = math.floor(len(client_data) * PARAMS["train_split"]) - client_trainset = torch.utils.data.Subset(client_data, list(range(0, split))) - client_testset = torch.utils.data.Subset( - client_data, list(range(split, len(client_data))) - ) - client_trainloader = DataLoader(client_trainset, PARAMS["batch_size"]) - client_testloader = DataLoader(client_testset, PARAMS["batch_size"]) - - return DPCifarClient(model, client_trainloader, client_testloader).to_client() - - -# Define an evaluation function for centralized evaluation (using whole CIFAR10 testset). -def get_evaluate_fn() -> Callable[[fl.common.NDArrays], Optional[Tuple[float, float]]]: - def evaluate( - server_round: int, parameters: fl.common.NDArrays, config: Dict[str, Scalar] - ): - transform = transforms.Compose( - [ - transforms.ToTensor(), - transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), - ] - ) - testset = CIFAR10(root="./data", train=False, transform=transform) - model = Net() - # Set weights in model. - state_dict = OrderedDict( - { - k: torch.tensor(np.atleast_1d(v)) - for k, v in zip(model.state_dict().keys(), parameters) - } - ) - model.load_state_dict(state_dict, strict=True) - model.to(DEVICE) - testloader = torch.utils.data.DataLoader(testset, PARAMS["batch_size"]) - loss, accuracy = test(model, testloader) - # Return metrics. - return loss, {"accuracy": accuracy} - - return evaluate - - -def main() -> None: - # Start Flower simulation - fl.simulation.start_simulation( - client_fn=client_fn, - num_clients=NUM_CLIENTS, - client_resources={"num_cpus": 1}, - config=fl.server.ServerConfig(num_rounds=3), - strategy=fl.server.strategy.FedAvg( - fraction_fit=0.1, fraction_evaluate=0.1, evaluate_fn=get_evaluate_fn() - ), - ) - - -if __name__ == "__main__": - main() diff --git a/examples/opacus/pyproject.toml b/examples/opacus/pyproject.toml index 26914fa27aa4..0aaa167d0a28 100644 --- a/examples/opacus/pyproject.toml +++ b/examples/opacus/pyproject.toml @@ -1,15 +1,22 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] -name = "flwr_opacus" +[project] +name = "opacus-fl" version = "0.1.0" -description = "Differentially Private Federated Learning with Opacus and Flower" -authors = ["The Flower Authors "] +description = "Sample Differential Privacy with Opacus in Flower" +authors = [ + { name = "The Flower Authors", email = "hello@flower.ai" }, +] +dependencies = [ + "flwr>=1.8.0,<2.0", + "flwr-datasets[vision]>=0.0.2,<1.0.0", + "torch==2.1.1", + "torchvision==0.16.1", + "tqdm==4.65.0", + "opacus==v1.4.1" +] -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = ">=1.0,<2.0" -opacus = "1.4.0" -torchvision = "0.15.2" +[tool.hatch.build.targets.wheel] +packages = ["."] \ No newline at end of file diff --git a/examples/opacus/requirements.txt b/examples/opacus/requirements.txt deleted file mode 100644 index f17b78fbf311..000000000000 --- a/examples/opacus/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -flwr>=1.0, <2.0 -opacus==1.4.0 -torchvision==0.15.2 diff --git a/examples/opacus/server.py b/examples/opacus/server.py index 8d077f76d8b3..a206c48307e2 100644 --- a/examples/opacus/server.py +++ b/examples/opacus/server.py @@ -1,6 +1,22 @@ +from typing import List, Tuple + import flwr as fl +from flwr.server.strategy import FedAvg +from flwr.common import Metrics +from flwr.server import ServerApp, ServerConfig + + +def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: + accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + return {"accuracy": sum(accuracies) / sum(examples)} + + +strategy = FedAvg(evaluate_metrics_aggregation_fn=weighted_average) + +config = ServerConfig(num_rounds=3) -fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=3), +app = ServerApp( + config=config, + strategy=strategy, ) diff --git a/examples/pytorch-from-centralized-to-federated/cifar.py b/examples/pytorch-from-centralized-to-federated/cifar.py index 277a21da2e70..c592b63b0042 100644 --- a/examples/pytorch-from-centralized-to-federated/cifar.py +++ b/examples/pytorch-from-centralized-to-federated/cifar.py @@ -56,7 +56,7 @@ def load_data(partition_id: int): fds = FederatedDataset(dataset="cifar10", partitioners={"train": 10}) partition = fds.load_partition(partition_id) # Divide data on each node: 80% train, 20% test - partition_train_test = partition.train_test_split(test_size=0.2) + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) pytorch_transforms = Compose( [ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] ) diff --git a/examples/quickstart-cpp/README.md b/examples/quickstart-cpp/README.md index d8982048793c..d6cbeebe1bc6 100644 --- a/examples/quickstart-cpp/README.md +++ b/examples/quickstart-cpp/README.md @@ -1,4 +1,4 @@ -# Flower Clients in C++ +# Flower Clients in C++ (under development) In this example you will train a linear model on synthetic data using C++ clients. @@ -12,7 +12,7 @@ Many thanks to the original contributors to this code: ## Install requirements -You'll need CMake and Python. +You'll need CMake and Python with `flwr` installed. ### Building the example @@ -23,16 +23,20 @@ cmake -S . -B build cmake --build build ``` -## Run the server and two clients in separate terminals +## Run the `Flower SuperLink`, the two clients, and the `Flower ServerApp` in separate terminals ```bash -python server.py +flwr-superlink --insecure ``` ```bash -build/flwr_client 0 127.0.0.1:8080 +build/flwr_client 0 127.0.0.1:9092 ``` ```bash -build/flwr_client 1 127.0.0.1:8080 +build/flwr_client 1 127.0.0.1:9092 +``` + +```bash +flower-server-app server:app --insecure ``` diff --git a/examples/quickstart-cpp/driver.py b/examples/quickstart-cpp/driver.py deleted file mode 100644 index f19cf0e9bd98..000000000000 --- a/examples/quickstart-cpp/driver.py +++ /dev/null @@ -1,10 +0,0 @@ -import flwr as fl -from fedavg_cpp import FedAvgCpp - -# Start Flower server for three rounds of federated learning -if __name__ == "__main__": - fl.server.start_driver( - server_address="0.0.0.0:9091", - config=fl.server.ServerConfig(num_rounds=3), - strategy=FedAvgCpp(), - ) diff --git a/examples/quickstart-cpp/fedavg_cpp.py b/examples/quickstart-cpp/fedavg_cpp.py index 672858fb8c48..cd62d07bb848 100644 --- a/examples/quickstart-cpp/fedavg_cpp.py +++ b/examples/quickstart-cpp/fedavg_cpp.py @@ -82,7 +82,6 @@ def aggregate_evaluate( # Do not aggregate if there are failures and failures are not accepted if not self.accept_failures and failures: return None, {} - print(results[0][1]) loss_aggregated = weighted_loss_avg( [ ( diff --git a/examples/quickstart-cpp/server.py b/examples/quickstart-cpp/server.py index aa595b498786..8ad8e0b3647c 100644 --- a/examples/quickstart-cpp/server.py +++ b/examples/quickstart-cpp/server.py @@ -2,17 +2,15 @@ import numpy as np from fedavg_cpp import FedAvgCpp, weights_to_parameters -# Start Flower server for three rounds of federated learning -if __name__ == "__main__": - model_size = 2 - initial_weights = [ - np.array([1.0, 2.0], dtype=np.float64), - np.array([3.0], dtype=np.float64), - ] - initial_parameters = weights_to_parameters(initial_weights) - strategy = FedAvgCpp(initial_parameters=initial_parameters) - fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=3), - strategy=strategy, - ) +model_size = 2 +initial_weights = [ + np.array([1.0, 2.0], dtype=np.float64), + np.array([3.0], dtype=np.float64), +] +initial_parameters = weights_to_parameters(initial_weights) +strategy = FedAvgCpp(initial_parameters=initial_parameters) + +app = fl.server.ServerApp( + config=fl.server.ServerConfig(num_rounds=3), + strategy=strategy, +) diff --git a/examples/quickstart-cpp/src/main.cc b/examples/quickstart-cpp/src/main.cc index f294f9d69473..f645360992c2 100644 --- a/examples/quickstart-cpp/src/main.cc +++ b/examples/quickstart-cpp/src/main.cc @@ -2,17 +2,10 @@ #include "start.h" int main(int argc, char **argv) { - if (argc != 3 && argc != 4) { - std::cout << "Client takes three mandatory arguments and one optional as " - "follows: " - << std::endl; - std::cout << "./client CLIENT_ID SERVER_URL [GRPC_MODE]" << std::endl; - std::cout - << "GRPC_MODE is optional and can be either 'bidi' (default) or 'rere'." - << std::endl; - std::cout << "Example: ./flwr_client 0 '127.0.0.1:8080' bidi" << std::endl; - std::cout << "This is the same as: ./flwr_client 0 '127.0.0.1:8080'" - << std::endl; + if (argc != 3) { + std::cout << "Client takes 2 mandatory arguments as follows: " << std::endl; + std::cout << "./client CLIENT_ID SERVER_URL" << std::endl; + std::cout << "Example: ./flwr_client 0 '127.0.0.1:8080'" << std::endl; return 0; } @@ -45,15 +38,8 @@ int main(int argc, char **argv) { // Define a server address std::string server_add = SERVER_URL; - if (argc == 4 && std::string(argv[3]) == "rere") { - std::cout << "Starting rere client" << std::endl; - // Start rere client - start::start_rere_client(server_add, &client); - } else { - std::cout << "Starting bidi client" << std::endl; - // Start bidi client - start::start_client(server_add, &client); - } + std::cout << "Starting rere client" << std::endl; + start::start_client(server_add, &client); return 0; } diff --git a/examples/quickstart-huggingface/client.py b/examples/quickstart-huggingface/client.py index 9be08d0cbcf4..a9d48bfa8f13 100644 --- a/examples/quickstart-huggingface/client.py +++ b/examples/quickstart-huggingface/client.py @@ -22,9 +22,9 @@ def load_data(partition_id): fds = FederatedDataset(dataset="imdb", partitioners={"train": 1_000}) partition = fds.load_partition(partition_id) # Divide data: 80% train, 20% test - partition_train_test = partition.train_test_split(test_size=0.2) + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) - tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT) + tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT, model_max_length=512) def tokenize_function(examples): return tokenizer(examples["text"], truncation=True) diff --git a/examples/quickstart-mlcube/dev/mnist.py b/examples/quickstart-mlcube/dev/mnist.py index e52e2cba85c7..55fb8fae62a7 100644 --- a/examples/quickstart-mlcube/dev/mnist.py +++ b/examples/quickstart-mlcube/dev/mnist.py @@ -36,6 +36,7 @@ def create_directory(path: str) -> None: def download(task_args: List[str]) -> None: """Task: download. + Input parameters: --data_dir """ @@ -81,6 +82,7 @@ def download(task_args: List[str]) -> None: def train(task_args: List[str]) -> None: """Task: train. + Input parameters: --data_dir, --log_dir, --model_dir, --parameters_file """ @@ -175,6 +177,7 @@ def train(task_args: List[str]) -> None: def evaluate(task_args: List[str]) -> None: """Task: train. + Input parameters: --data_dir, --log_dir, --model_dir, --parameters_file """ diff --git a/examples/quickstart-mlx/client.py b/examples/quickstart-mlx/client.py index faba2b94d6bd..344cfc65e42d 100644 --- a/examples/quickstart-mlx/client.py +++ b/examples/quickstart-mlx/client.py @@ -107,7 +107,7 @@ def evaluate(self, parameters, config): fds = FederatedDataset(dataset="mnist", partitioners={"train": 3}) partition = fds.load_partition(partition_id=args.partition_id) - partition_splits = partition.train_test_split(test_size=0.2) + partition_splits = partition.train_test_split(test_size=0.2, seed=42) partition_splits["train"].set_format("numpy") partition_splits["test"].set_format("numpy") diff --git a/examples/quickstart-mlx/requirements.txt b/examples/quickstart-mlx/requirements.txt index 0c3ea45ee188..b56f7a15bfb9 100644 --- a/examples/quickstart-mlx/requirements.txt +++ b/examples/quickstart-mlx/requirements.txt @@ -1,4 +1,4 @@ flwr>=1.0, <2.0 mlx==0.0.3 numpy==1.24.4 -flwr-datasets["vision"]>=0.0.2, <1.0 +flwr-datasets[vision]>=0.0.2, <1.0.0 diff --git a/examples/quickstart-monai/pyproject.toml b/examples/quickstart-monai/pyproject.toml index b1713f05f2ef..2b77a2fc061f 100644 --- a/examples/quickstart-monai/pyproject.toml +++ b/examples/quickstart-monai/pyproject.toml @@ -12,7 +12,7 @@ authors = ["The Flower Authors "] python = ">=3.8,<3.11" flwr = ">=1.0,<2.0" torch = "1.13.1" -tqdm = "4.65.0" +tqdm = "4.66.3" scikit-learn = "1.3.1" monai = { version = "1.3.0", extras=["gdown", "nibabel", "tqdm", "itk"] } numpy = "1.24.4" diff --git a/examples/quickstart-mxnet/.gitignore b/examples/quickstart-mxnet/.gitignore deleted file mode 100644 index 10d00b5797e2..000000000000 --- a/examples/quickstart-mxnet/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.gz diff --git a/examples/quickstart-mxnet/README.md b/examples/quickstart-mxnet/README.md deleted file mode 100644 index 37e01ef2707c..000000000000 --- a/examples/quickstart-mxnet/README.md +++ /dev/null @@ -1,78 +0,0 @@ -# Flower Example using MXNet - -> Note the MXNet project has ended, and is now in [Attic](https://attic.apache.org/projects/mxnet.html). The MXNet GitHub has also [been archived](https://github.com/apache/mxnet). As a result, this example won't be receiving more updates. Using MXNet is no longer recommnended. - -This example demonstrates how to run a MXNet machine learning project federated with Flower. - -This introductory example for Flower uses MXNet, but you're not required to be a MXNet expert to run the example. The example will help you to understand how Flower can be used to build federated learning use cases based on an existing MXNet projects. - -## Project Setup - -Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: - -```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/quickstart-mxnet . && rm -rf flower && cd quickstart-mxnet -``` - -This will create a new directory called `quickstart-mxnet` containing the following files: - -```shell --- pyproject.toml --- requirements.txt --- client.py --- server.py --- README.md -``` - -### Installing Dependencies - -Project dependencies (such as `mxnet` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. - -#### Poetry - -```shell -poetry install -poetry shell -``` - -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: - -```shell -poetry run python3 -c "import flwr" -``` - -If you don't see any errors you're good to go! - -#### pip - -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. - -```shell -pip install -r requirements.txt -``` - -## Run MXNet Federated - -This MXNet example is based on the [Handwritten Digit Recognition](https://mxnet.apache.org/versions/1.7.0/api/python/docs/tutorials/packages/gluon/image/mnist.html) tutorial and uses the MNIST dataset (hand-written digits with 28x28 pixels in greyscale with 10 classes). Feel free to consult the tutorial if you want to get a better understanding of MXNet. The file `client.py` contains all the steps that are described in the tutorial. It loads the dataset and a sequential model, trains the model with the training set, and evaluates the trained model on the test set. - -You are ready to start the Flower server as well as the clients. You can simply start the server in a terminal as follows: - -```shell -python3 server.py -``` - -Now you are ready to start the Flower clients which will participate in the learning. To do so simply open two more terminal windows and run the following commands. - -Start client 1 in the first terminal: - -```shell -python3 client.py -``` - -Start client 2 in the second terminal: - -```shell -python3 client.py -``` - -You are now training a MXNet-based classifier on MNIST, federated across two clients. The setup is of course simplified since both clients hold the same dataset, but you can now continue with your own explorations. How about changing from a sequential model to a CNN? How about adding more clients? diff --git a/examples/quickstart-mxnet/client.py b/examples/quickstart-mxnet/client.py deleted file mode 100644 index 6c2b2e99775d..000000000000 --- a/examples/quickstart-mxnet/client.py +++ /dev/null @@ -1,136 +0,0 @@ -"""Flower client example using MXNet for MNIST classification. - -The code is generally adapted from: - -https://mxnet.apache.org/api/python/docs/tutorials/packages/gluon/image/mnist.html -""" - -import flwr as fl -import numpy as np -import mxnet as mx -from mxnet import nd -from mxnet import gluon -from mxnet.gluon import nn -from mxnet import autograd as ag -import mxnet.ndarray as F - -# Fixing the random seed -mx.random.seed(42) - -# Setup context to GPU or CPU -DEVICE = [mx.gpu() if mx.test_utils.list_gpus() else mx.cpu()] - - -def main(): - def model(): - net = nn.Sequential() - net.add(nn.Dense(256, activation="relu")) - net.add(nn.Dense(64, activation="relu")) - net.add(nn.Dense(10)) - net.collect_params().initialize() - return net - - train_data, val_data = load_data() - - model = model() - init = nd.random.uniform(shape=(2, 784)) - model(init) - - # Flower Client - class MNISTClient(fl.client.NumPyClient): - def get_parameters(self, config): - param = [] - for val in model.collect_params(".*weight").values(): - p = val.data() - param.append(p.asnumpy()) - return param - - def set_parameters(self, parameters): - params = zip(model.collect_params(".*weight").keys(), parameters) - for key, value in params: - model.collect_params().setattr(key, value) - - def fit(self, parameters, config): - self.set_parameters(parameters) - [accuracy, loss], num_examples = train(model, train_data, epoch=2) - results = {"accuracy": float(accuracy[1]), "loss": float(loss[1])} - return self.get_parameters(config={}), num_examples, results - - def evaluate(self, parameters, config): - self.set_parameters(parameters) - [accuracy, loss], num_examples = test(model, val_data) - print("Evaluation accuracy & loss", accuracy, loss) - return float(loss[1]), num_examples, {"accuracy": float(accuracy[1])} - - # Start Flower client - fl.client.start_numpy_client(server_address="0.0.0.0:8080", client=MNISTClient()) - - -def load_data(): - print("Download Dataset") - mnist = mx.test_utils.get_mnist() - batch_size = 100 - train_data = mx.io.NDArrayIter( - mnist["train_data"], mnist["train_label"], batch_size, shuffle=True - ) - val_data = mx.io.NDArrayIter(mnist["test_data"], mnist["test_label"], batch_size) - return train_data, val_data - - -def train(net, train_data, epoch): - trainer = gluon.Trainer(net.collect_params(), "sgd", {"learning_rate": 0.01}) - accuracy_metric = mx.metric.Accuracy() - loss_metric = mx.metric.CrossEntropy() - metrics = mx.metric.CompositeEvalMetric() - for child_metric in [accuracy_metric, loss_metric]: - metrics.add(child_metric) - softmax_cross_entropy_loss = gluon.loss.SoftmaxCrossEntropyLoss() - for i in range(epoch): - train_data.reset() - num_examples = 0 - for batch in train_data: - data = gluon.utils.split_and_load( - batch.data[0], ctx_list=DEVICE, batch_axis=0 - ) - label = gluon.utils.split_and_load( - batch.label[0], ctx_list=DEVICE, batch_axis=0 - ) - outputs = [] - with ag.record(): - for x, y in zip(data, label): - z = net(x) - loss = softmax_cross_entropy_loss(z, y) - loss.backward() - outputs.append(z.softmax()) - num_examples += len(x) - metrics.update(label, outputs) - trainer.step(batch.data[0].shape[0]) - trainings_metric = metrics.get_name_value() - print("Accuracy & loss at epoch %d: %s" % (i, trainings_metric)) - return trainings_metric, num_examples - - -def test(net, val_data): - accuracy_metric = mx.metric.Accuracy() - loss_metric = mx.metric.CrossEntropy() - metrics = mx.metric.CompositeEvalMetric() - for child_metric in [accuracy_metric, loss_metric]: - metrics.add(child_metric) - val_data.reset() - num_examples = 0 - for batch in val_data: - data = gluon.utils.split_and_load(batch.data[0], ctx_list=DEVICE, batch_axis=0) - label = gluon.utils.split_and_load( - batch.label[0], ctx_list=DEVICE, batch_axis=0 - ) - outputs = [] - for x in data: - outputs.append(net(x).softmax()) - num_examples += len(x) - metrics.update(label, outputs) - metrics.update(label, outputs) - return metrics.get_name_value(), num_examples - - -if __name__ == "__main__": - main() diff --git a/examples/quickstart-mxnet/requirements.txt b/examples/quickstart-mxnet/requirements.txt deleted file mode 100644 index 8dd6f7150dfd..000000000000 --- a/examples/quickstart-mxnet/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -flwr==1.6.0 -mxnet==1.9.1 -numpy==1.23.1 diff --git a/examples/quickstart-mxnet/server.py b/examples/quickstart-mxnet/server.py deleted file mode 100644 index 871aa4e8ec99..000000000000 --- a/examples/quickstart-mxnet/server.py +++ /dev/null @@ -1,9 +0,0 @@ -"""Flower server example.""" - -import flwr as fl - -if __name__ == "__main__": - fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=3), - ) diff --git a/examples/quickstart-pytorch-lightning/mnist.py b/examples/quickstart-pytorch-lightning/mnist.py index 95342f4fb9b3..2f6100fe94cc 100644 --- a/examples/quickstart-pytorch-lightning/mnist.py +++ b/examples/quickstart-pytorch-lightning/mnist.py @@ -82,9 +82,11 @@ def load_data(partition): partition = partition.with_transform(apply_transforms) # 20 % for on federated evaluation - partition_full = partition.train_test_split(test_size=0.2) + partition_full = partition.train_test_split(test_size=0.2, seed=42) # 60 % for the federated train and 20 % for the federated validation (both in fit) - partition_train_valid = partition_full["train"].train_test_split(train_size=0.75) + partition_train_valid = partition_full["train"].train_test_split( + train_size=0.75, seed=42 + ) trainloader = DataLoader( partition_train_valid["train"], batch_size=32, diff --git a/examples/quickstart-pytorch/README.md b/examples/quickstart-pytorch/README.md index 02c9b4b38498..93d6a593f362 100644 --- a/examples/quickstart-pytorch/README.md +++ b/examples/quickstart-pytorch/README.md @@ -14,7 +14,6 @@ This will create a new directory called `quickstart-pytorch` containing the foll ```shell -- pyproject.toml --- requirements.txt -- client.py -- server.py -- README.md @@ -22,30 +21,22 @@ This will create a new directory called `quickstart-pytorch` containing the foll ### Installing Dependencies -Project dependencies (such as `torch` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. - -#### Poetry +Project dependencies (such as `torch` and `flwr`) are defined in `pyproject.toml`. You can install the dependencies by invoking `pip`: ```shell -poetry install -poetry shell +# From a new python environment, run: +pip install . ``` -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: +Then, to verify that everything works correctly you can run the following command: ```shell -poetry run python3 -c "import flwr" +python3 -c "import flwr" ``` If you don't see any errors you're good to go! -#### pip - -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. - -```shell -pip install -r requirements.txt -``` +______________________________________________________________________ ## Run Federated Learning with PyTorch and Flower @@ -72,3 +63,29 @@ python3 client.py --partition-id 1 ``` You will see that PyTorch is starting a federated training. Look at the [code](https://github.com/adap/flower/tree/main/examples/quickstart-pytorch) for a detailed explanation. + +______________________________________________________________________ + +## Run Federated Learning with PyTorch and `Flower Next` + +### 1. Start the long-running Flower server (SuperLink) + +```bash +flower-superlink --insecure +``` + +### 2. Start the long-running Flower clients (SuperNodes) + +Start 2 Flower `SuperNodes` in 2 separate terminal windows, using: + +```bash +flower-client-app client:app --insecure +``` + +### 3. Run the Flower App + +With both the long-running server (SuperLink) and two clients (SuperNode) up and running, we can now run the actual Flower App: + +```bash +flower-server-app server:app --insecure +``` diff --git a/examples/quickstart-pytorch/client.py b/examples/quickstart-pytorch/client.py index e640ce111dff..2452db819e1d 100644 --- a/examples/quickstart-pytorch/client.py +++ b/examples/quickstart-pytorch/client.py @@ -2,7 +2,7 @@ import warnings from collections import OrderedDict -import flwr as fl +from flwr.client import NumPyClient, ClientApp from flwr_datasets import FederatedDataset import torch import torch.nn as nn @@ -71,10 +71,10 @@ def test(net, testloader): def load_data(partition_id): """Load partition CIFAR10 data.""" - fds = FederatedDataset(dataset="cifar10", partitioners={"train": 3}) + fds = FederatedDataset(dataset="cifar10", partitioners={"train": 2}) partition = fds.load_partition(partition_id) # Divide data on each node: 80% train, 20% test - partition_train_test = partition.train_test_split(test_size=0.2) + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) pytorch_transforms = Compose( [ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] ) @@ -98,12 +98,12 @@ def apply_transforms(batch): parser = argparse.ArgumentParser(description="Flower") parser.add_argument( "--partition-id", - choices=[0, 1, 2], - required=True, + choices=[0, 1], + default=0, type=int, - help="Partition of the dataset divided into 3 iid partitions created artificially.", + help="Partition of the dataset divided into 2 iid partitions created artificially.", ) -partition_id = parser.parse_args().partition_id +partition_id = parser.parse_known_args()[0].partition_id # Load model and data (simple CNN, CIFAR-10) net = Net().to(DEVICE) @@ -111,7 +111,7 @@ def apply_transforms(batch): # Define Flower client -class FlowerClient(fl.client.NumPyClient): +class FlowerClient(NumPyClient): def get_parameters(self, config): return [val.cpu().numpy() for _, val in net.state_dict().items()] @@ -131,8 +131,22 @@ def evaluate(self, parameters, config): return loss, len(testloader.dataset), {"accuracy": accuracy} -# Start Flower client -fl.client.start_client( - server_address="127.0.0.1:8080", - client=FlowerClient().to_client(), +def client_fn(cid: str): + """Create and return an instance of Flower `Client`.""" + return FlowerClient().to_client() + + +# Flower ClientApp +app = ClientApp( + client_fn=client_fn, ) + + +# Legacy mode +if __name__ == "__main__": + from flwr.client import start_client + + start_client( + server_address="127.0.0.1:8080", + client=FlowerClient().to_client(), + ) diff --git a/examples/quickstart-pytorch/pyproject.toml b/examples/quickstart-pytorch/pyproject.toml index d8e1503dd8a7..89a5cd16d7de 100644 --- a/examples/quickstart-pytorch/pyproject.toml +++ b/examples/quickstart-pytorch/pyproject.toml @@ -1,17 +1,21 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] +[project] name = "quickstart-pytorch" version = "0.1.0" description = "PyTorch Federated Learning Quickstart with Flower" -authors = ["The Flower Authors "] +authors = [ + { name = "The Flower Authors", email = "hello@flower.ai" }, +] +dependencies = [ + "flwr>=1.8.0,<2.0", + "flwr-datasets[vision]>=0.0.2,<1.0.0", + "torch==2.1.1", + "torchvision==0.16.1", + "tqdm==4.66.3" +] -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = ">=1.0,<2.0" -flwr-datasets = { extras = ["vision"], version = ">=0.0.2,<1.0.0" } -torch = "2.1.1" -torchvision = "0.16.1" -tqdm = "4.65.0" +[tool.hatch.build.targets.wheel] +packages = ["."] diff --git a/examples/quickstart-pytorch/requirements.txt b/examples/quickstart-pytorch/requirements.txt deleted file mode 100644 index 4e321e2cd0c2..000000000000 --- a/examples/quickstart-pytorch/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -flwr>=1.0, <2.0 -flwr-datasets[vision]>=0.0.2, <1.0.0 -torch==2.1.1 -torchvision==0.16.1 -tqdm==4.65.0 diff --git a/examples/quickstart-pytorch/run.sh b/examples/quickstart-pytorch/run.sh deleted file mode 100755 index 6ca9c8cafec9..000000000000 --- a/examples/quickstart-pytorch/run.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -set -e -cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/ - -echo "Starting server" -python server.py & -sleep 3 # Sleep for 3s to give the server enough time to start - -for i in $(seq 0 1); do - echo "Starting client $i" - python client.py --partition-id "$i" & -done - -# Enable CTRL+C to stop all background processes -trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM -# Wait for all background processes to complete -wait diff --git a/examples/quickstart-pytorch/server.py b/examples/quickstart-pytorch/server.py index fe691a88aba0..4034703ca690 100644 --- a/examples/quickstart-pytorch/server.py +++ b/examples/quickstart-pytorch/server.py @@ -1,6 +1,7 @@ from typing import List, Tuple -import flwr as fl +from flwr.server import ServerApp, ServerConfig +from flwr.server.strategy import FedAvg from flwr.common import Metrics @@ -15,11 +16,26 @@ def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: # Define strategy -strategy = fl.server.strategy.FedAvg(evaluate_metrics_aggregation_fn=weighted_average) +strategy = FedAvg(evaluate_metrics_aggregation_fn=weighted_average) -# Start Flower server -fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=3), + +# Define config +config = ServerConfig(num_rounds=3) + + +# Flower ServerApp +app = ServerApp( + config=config, strategy=strategy, ) + + +# Legacy mode +if __name__ == "__main__": + from flwr.server import start_server + + start_server( + server_address="0.0.0.0:8080", + config=config, + strategy=strategy, + ) diff --git a/examples/quickstart-tabnet/pyproject.toml b/examples/quickstart-tabnet/pyproject.toml index 18f1979791bd..6b7311f068f0 100644 --- a/examples/quickstart-tabnet/pyproject.toml +++ b/examples/quickstart-tabnet/pyproject.toml @@ -13,5 +13,5 @@ python = ">=3.8,<3.11" flwr = ">=1.0,<2.0" tensorflow-cpu = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "platform_machine == \"x86_64\"" } tensorflow-macos = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "sys_platform == \"darwin\" and platform_machine == \"arm64\"" } -tensorflow_datasets = "4.8.3" +tensorflow_datasets = "4.9.2" tabnet = "0.1.6" diff --git a/examples/quickstart-tensorflow/README.md b/examples/quickstart-tensorflow/README.md index 8d5e9434b086..ae1fe19834a3 100644 --- a/examples/quickstart-tensorflow/README.md +++ b/examples/quickstart-tensorflow/README.md @@ -15,7 +15,6 @@ This will create a new directory called `quickstart-tensorflow` containing the f ```shell -- pyproject.toml --- requirements.txt -- client.py -- server.py -- README.md @@ -23,51 +22,63 @@ This will create a new directory called `quickstart-tensorflow` containing the f ### Installing Dependencies -Project dependencies (such as `tensorflow` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. - -#### Poetry +Project dependencies (such as `tensorflow` and `flwr`) are defined in `pyproject.toml`. You can install the dependencies by invoking `pip`: ```shell -poetry install -poetry shell +# From a new python environment, run: +pip install . ``` -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: +Then, to verify that everything works correctly you can run the following command: ```shell -poetry run python3 -c "import flwr" +python3 -c "import flwr" ``` If you don't see any errors you're good to go! -#### pip - -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. - -```shell -pip install -r requirements.txt -``` - ## Run Federated Learning with TensorFlow/Keras and Flower Afterward, you are ready to start the Flower server as well as the clients. You can simply start the server in a terminal as follows: ```shell -poetry run python3 server.py +python3 server.py ``` Now you are ready to start the Flower clients which will participate in the learning. To do so simply open two more terminals and run the following command in each: ```shell -poetry run python3 client.py +python3 client.py --partition-id 0 ``` -Alternatively, you can run all of it in one shell as follows: +Start client 2 in the second terminal: ```shell -poetry run python3 server.py & -poetry run python3 client.py & -poetry run python3 client.py +python3 client.py --partition-id 1 ``` You will see that Keras is starting a federated training. Have a look at the [code](https://github.com/adap/flower/tree/main/examples/quickstart-tensorflow) for a detailed explanation. You can add `steps_per_epoch=3` to `model.fit()` if you just want to evaluate that everything works without having to wait for the client-side training to finish (this will save you a lot of time during development). + +## Run Federated Learning with TensorFlow/Keras and `Flower Next` + +### 1. Start the long-running Flower server (SuperLink) + +```bash +flower-superlink --insecure +``` + +### 2. Start the long-running Flower clients (SuperNodes) + +Start 2 Flower \`SuperNodes in 2 separate terminal windows, using: + +```bash +flower-client-app client:app --insecure +``` + +### 3. Run the Flower App + +With both the long-running server (SuperLink) and two clients (SuperNode) up and running, we can now run the actual Flower App, using: + +```bash +flower-server-app server:app --insecure +``` diff --git a/examples/quickstart-tensorflow/client.py b/examples/quickstart-tensorflow/client.py index 3e2035c09311..6b2bd6639ce0 100644 --- a/examples/quickstart-tensorflow/client.py +++ b/examples/quickstart-tensorflow/client.py @@ -1,7 +1,7 @@ import argparse import os -import flwr as fl +from flwr.client import ClientApp, NumPyClient import tensorflow as tf from flwr_datasets import FederatedDataset @@ -14,11 +14,11 @@ "--partition-id", type=int, choices=[0, 1, 2], - required=True, - help="Partition of the dataset (0,1 or 2). " + default=0, + help="Partition of the dataset (0, 1 or 2). " "The dataset is divided into 3 partitions created artificially.", ) -args = parser.parse_args() +args, _ = parser.parse_known_args() # Load model and data (MobileNetV2, CIFAR-10) model = tf.keras.applications.MobileNetV2((32, 32, 3), classes=10, weights=None) @@ -30,13 +30,13 @@ partition.set_format("numpy") # Divide data on each node: 80% train, 20% test -partition = partition.train_test_split(test_size=0.2) +partition = partition.train_test_split(test_size=0.2, seed=42) x_train, y_train = partition["train"]["img"] / 255.0, partition["train"]["label"] x_test, y_test = partition["test"]["img"] / 255.0, partition["test"]["label"] # Define Flower client -class CifarClient(fl.client.NumPyClient): +class FlowerClient(NumPyClient): def get_parameters(self, config): return model.get_weights() @@ -51,7 +51,22 @@ def evaluate(self, parameters, config): return loss, len(x_test), {"accuracy": accuracy} -# Start Flower client -fl.client.start_client( - server_address="127.0.0.1:8080", client=CifarClient().to_client() +def client_fn(cid: str): + """Create and return an instance of Flower `Client`.""" + return FlowerClient().to_client() + + +# Flower ClientApp +app = ClientApp( + client_fn=client_fn, ) + + +# Legacy mode +if __name__ == "__main__": + from flwr.client import start_client + + start_client( + server_address="127.0.0.1:8080", + client=FlowerClient().to_client(), + ) diff --git a/examples/quickstart-tensorflow/pyproject.toml b/examples/quickstart-tensorflow/pyproject.toml index 98aeb932cab9..c0f71344b2fb 100644 --- a/examples/quickstart-tensorflow/pyproject.toml +++ b/examples/quickstart-tensorflow/pyproject.toml @@ -1,16 +1,20 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] +[project] name = "quickstart-tensorflow" version = "0.1.0" description = "Keras Federated Learning Quickstart with Flower" -authors = ["The Flower Authors "] +authors = [ + { name = "The Flower Authors", email = "hello@flower.ai" }, +] +dependencies = [ + "flwr>=1.8.0,<2.0", + "flwr-datasets[vision]>=0.0.2,<1.0.0", + "tensorflow-cpu>=2.9.1, != 2.11.1 ; platform_machine == \"x86_64\"", + "tensorflow-macos>=2.9.1, != 2.11.1 ; sys_platform == \"darwin\" and platform_machine == \"arm64\"" +] -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = ">=1.0,<2.0" -flwr-datasets = { extras = ["vision"], version = ">=0.0.2,<1.0.0" } -tensorflow-cpu = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "platform_machine == \"x86_64\"" } -tensorflow-macos = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "sys_platform == \"darwin\" and platform_machine == \"arm64\"" } +[tool.hatch.build.targets.wheel] +packages = ["."] diff --git a/examples/quickstart-tensorflow/requirements.txt b/examples/quickstart-tensorflow/requirements.txt deleted file mode 100644 index 7f025975cae9..000000000000 --- a/examples/quickstart-tensorflow/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -flwr>=1.0, <2.0 -flwr-datasets[vision]>=0.0.2, <1.0.0 -tensorflow-macos>=2.9.1, != 2.11.1 ; sys_platform == "darwin" and platform_machine == "arm64" -tensorflow-cpu>=2.9.1, != 2.11.1 ; platform_machine == "x86_64" diff --git a/examples/quickstart-tensorflow/run.sh b/examples/quickstart-tensorflow/run.sh deleted file mode 100755 index 76188f197e3e..000000000000 --- a/examples/quickstart-tensorflow/run.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -echo "Starting server" -python server.py & -sleep 3 # Sleep for 3s to give the server enough time to start - -for i in `seq 0 1`; do - echo "Starting client $i" - python client.py --partition-id $i & -done - -# This will allow you to use CTRL+C to stop all background processes -trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM -# Wait for all background processes to complete -wait diff --git a/examples/quickstart-tensorflow/server.py b/examples/quickstart-tensorflow/server.py index fe691a88aba0..4034703ca690 100644 --- a/examples/quickstart-tensorflow/server.py +++ b/examples/quickstart-tensorflow/server.py @@ -1,6 +1,7 @@ from typing import List, Tuple -import flwr as fl +from flwr.server import ServerApp, ServerConfig +from flwr.server.strategy import FedAvg from flwr.common import Metrics @@ -15,11 +16,26 @@ def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: # Define strategy -strategy = fl.server.strategy.FedAvg(evaluate_metrics_aggregation_fn=weighted_average) +strategy = FedAvg(evaluate_metrics_aggregation_fn=weighted_average) -# Start Flower server -fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=3), + +# Define config +config = ServerConfig(num_rounds=3) + + +# Flower ServerApp +app = ServerApp( + config=config, strategy=strategy, ) + + +# Legacy mode +if __name__ == "__main__": + from flwr.server import start_server + + start_server( + server_address="0.0.0.0:8080", + config=config, + strategy=strategy, + ) diff --git a/examples/simulation-pytorch/sim.ipynb b/examples/simulation-pytorch/sim.ipynb index 6dda1ef9319d..d225069cb444 100644 --- a/examples/simulation-pytorch/sim.ipynb +++ b/examples/simulation-pytorch/sim.ipynb @@ -497,7 +497,7 @@ " client_dataset = dataset.load_partition(int(cid), \"train\")\n", "\n", " # Now let's split it into train (90%) and validation (10%)\n", - " client_dataset_splits = client_dataset.train_test_split(test_size=0.1)\n", + " client_dataset_splits = client_dataset.train_test_split(test_size=0.1, seed=42)\n", "\n", " trainset = client_dataset_splits[\"train\"]\n", " valset = client_dataset_splits[\"test\"]\n", diff --git a/examples/simulation-pytorch/sim.py b/examples/simulation-pytorch/sim.py index 6fb750f2e59c..db68e75653fc 100644 --- a/examples/simulation-pytorch/sim.py +++ b/examples/simulation-pytorch/sim.py @@ -94,7 +94,7 @@ def client_fn(cid: str) -> fl.client.Client: client_dataset = dataset.load_partition(int(cid), "train") # Now let's split it into train (90%) and validation (10%) - client_dataset_splits = client_dataset.train_test_split(test_size=0.1) + client_dataset_splits = client_dataset.train_test_split(test_size=0.1, seed=42) trainset = client_dataset_splits["train"] valset = client_dataset_splits["test"] diff --git a/examples/simulation-tensorflow/sim.ipynb b/examples/simulation-tensorflow/sim.ipynb index 797e2dcc603e..26b7260b5f1c 100644 --- a/examples/simulation-tensorflow/sim.ipynb +++ b/examples/simulation-tensorflow/sim.ipynb @@ -179,7 +179,7 @@ " client_dataset = dataset.load_partition(int(cid), \"train\")\n", "\n", " # Now let's split it into train (90%) and validation (10%)\n", - " client_dataset_splits = client_dataset.train_test_split(test_size=0.1)\n", + " client_dataset_splits = client_dataset.train_test_split(test_size=0.1, seed=42)\n", "\n", " trainset = client_dataset_splits[\"train\"].to_tf_dataset(\n", " columns=\"image\", label_cols=\"label\", batch_size=32\n", diff --git a/examples/simulation-tensorflow/sim.py b/examples/simulation-tensorflow/sim.py index e94e5ec96850..4014e3c6be72 100644 --- a/examples/simulation-tensorflow/sim.py +++ b/examples/simulation-tensorflow/sim.py @@ -83,7 +83,7 @@ def client_fn(cid: str) -> fl.client.Client: client_dataset = dataset.load_partition(int(cid), "train") # Now let's split it into train (90%) and validation (10%) - client_dataset_splits = client_dataset.train_test_split(test_size=0.1) + client_dataset_splits = client_dataset.train_test_split(test_size=0.1, seed=42) trainset = client_dataset_splits["train"].to_tf_dataset( columns="image", label_cols="label", batch_size=32 diff --git a/examples/vertical-fl/README.md b/examples/vertical-fl/README.md index 78588180d3d6..d8c599d617c4 100644 --- a/examples/vertical-fl/README.md +++ b/examples/vertical-fl/README.md @@ -123,7 +123,7 @@ In `task.py`, you'll find the preprocessing functions we'll apply to our data: 'Adult' for ages between 11 and 40, and 'Elderly' for those over 40. If the age isn't listed, we'll label it as 'Unknown'. - ```python3 + ```python def _bin_age(age_series): bins = [-np.inf, 10, 40, np.inf] labels = ["Child", "Adult", "Elderly"] @@ -138,7 +138,7 @@ In `task.py`, you'll find the preprocessing functions we'll apply to our data: understand social status and family roles, simplifying rare titles into a single 'Rare' category and converting any French titles to their English equivalents. - ```python3 + ```python def _extract_title(name_series): titles = name_series.str.extract(" ([A-Za-z]+)\.", expand=False) rare_titles = { @@ -170,7 +170,7 @@ In `task.py`, you'll find the preprocessing functions we'll apply to our data: 'Pclass', 'Embarked', 'Title', 'Cabin', and the binned 'Age' into One-Hot encodings. - ```python3 + ```python def _create_features(df): # Convert 'Age' to numeric, coercing errors to NaN df["Age"] = pd.to_numeric(df["Age"], errors="coerce") @@ -190,7 +190,7 @@ In `task.py`, you'll find the preprocessing functions we'll apply to our data: In `task.py`, we also partition our data for our 3 clients to mirror real-life collaborations where different organizations hold different feature sets: -```python3 +```python def _partition_data(df, all_keywords): partitions = [] keywords_sets = [{"Parch", "Cabin", "Pclass"}, {"Sex", "Title"}] @@ -236,7 +236,7 @@ collective intelligence without sharing sensitive information. Note that our final data processing function looks like that: -```python3 +```python def get_partitions_and_label(): df = pd.read_csv("_static/data/train.csv") processed_df = df.dropna(subset=["Embarked", "Fare"]).copy() @@ -259,7 +259,7 @@ Each client's model is a neural network designed to operate on a distinct subset of features held by a client. In this example we will use simple linear regression models. -```python3 +```python class ClientModel(nn.Module): def __init__(self, input_size): super(ClientModel, self).__init__() @@ -281,7 +281,7 @@ The server's model acts as the central aggregator in the VFL system. It's also a neural network but with a slightly different architecture tailored to its role in aggregating the client models' outputs. -```python3 +```python class ServerModel(nn.Module): def __init__(self): super(ServerModel, self).__init__() @@ -305,7 +305,7 @@ a probability score indicative of the likelihood of survival. The strategy we will write to perform the aggregation will inherit from `FedAvg` and set the following additional attributes: -```python3 +```python self.model = ServerModel(12) self.initial_parameters = ndarrays_to_parameters( [val.cpu().numpy() for _, val in self.model.state_dict().items()] @@ -319,7 +319,7 @@ With `labels` given as an argument to the strategy. We then redefine the `aggregate_fit` method: -```python3 +```python def aggregate_fit( self, rnd, @@ -406,7 +406,7 @@ The last thing we have to do is to redefine the `aggregate_evaluate` function to disable distributed evaluation (as the clients do not hold any labels to test their local models). -```python3 +```python def aggregate_evaluate( self, rnd, @@ -420,7 +420,7 @@ def aggregate_evaluate( Our `FlowerClient` class is going to be quite straight forward. -```python3 +```python class FlowerClient(fl.client.NumPyClient): def __init__(self, cid, data): self.cid = cid @@ -487,7 +487,7 @@ the `aggregate_evaluate` function of the strategy. Putting everything together, to start our simulation we use the following function: -```python3 +```python hist = fl.simulation.start_simulation( client_fn=client_fn, num_clients=3, diff --git a/examples/vit-finetune/client.py b/examples/vit-finetune/client.py index 68d98926feeb..bf91fa0c4328 100644 --- a/examples/vit-finetune/client.py +++ b/examples/vit-finetune/client.py @@ -8,9 +8,7 @@ class FedViTClient(NumPyClient): - def __init__(self, trainset): - self.trainset = trainset self.model = get_model() diff --git a/examples/vit-finetune/main.py b/examples/vit-finetune/main.py index 1257246304a1..c629a6f68980 100644 --- a/examples/vit-finetune/main.py +++ b/examples/vit-finetune/main.py @@ -19,7 +19,6 @@ def main(): - args = parser.parse_args() # To control the degree of parallelism diff --git a/examples/whisper-federated-finetuning/utils.py b/examples/whisper-federated-finetuning/utils.py index 21fe0309151c..117cf7100ddd 100644 --- a/examples/whisper-federated-finetuning/utils.py +++ b/examples/whisper-federated-finetuning/utils.py @@ -107,10 +107,10 @@ def prepare_silences_dataset(train_dataset, ratio_silence: float = 0.1) -> Datas """Generate silences for the train set. One of the classes in the SpeechCommands datatset is `silence`. However, the dataset - does not include clips of silence. It does however include 5 long files with different - background sounds. The taks of this function is to extract several (defined by `ratio_silence`) - one-second long clips from those background audio files. Later, those audio clips will be - included into the training set. + does not include clips of silence. It does however include 5 long files with + different background sounds. The taks of this function is to extract several + (defined by `ratio_silence`) one-second long clips from those background audio + files. Later, those audio clips will be included into the training set. """ # retrieve original silence audio clips silences = [d for d in train_dataset if d["label"] == 35] @@ -138,9 +138,9 @@ def prepare_silences_dataset(train_dataset, ratio_silence: float = 0.1) -> Datas def construct_client_mapping(full_trainset, num_clients: int = 100): """Create a mapping to partition the dataset into `num_client` buckets. - These buckets contain the same number of `spekaer_id` but likely different - number of training exampes since each `speaker_id` in SpeechCommands does - provide different amounts of data to the dataset. + These buckets contain the same number of `spekaer_id` but likely different number of + training exampes since each `speaker_id` in SpeechCommands does provide different + amounts of data to the dataset. """ client_ids = list(set(full_trainset["speaker_id"])) client_ids.remove( @@ -191,7 +191,7 @@ def set_params(model: torch.nn.ModuleList, params: List[fl.common.NDArrays]): def get_model(device, num_classes, compile: bool = True): - """Create model: Whisper-tiny Encoder + classification head""" + """Create model: Whisper-tiny Encoder + classification head.""" encoder = WhisperForConditionalGeneration.from_pretrained( "openai/whisper-tiny" ).get_encoder() diff --git a/examples/xgboost-comprehensive/pyproject.toml b/examples/xgboost-comprehensive/pyproject.toml index b257801cb420..2d44c06d6e3f 100644 --- a/examples/xgboost-comprehensive/pyproject.toml +++ b/examples/xgboost-comprehensive/pyproject.toml @@ -10,6 +10,6 @@ authors = ["The Flower Authors "] [tool.poetry.dependencies] python = ">=3.8,<3.11" -flwr-nightly = { extras = ["simulation"], version = ">=1.7.0,<2.0" } -flwr-datasets = ">=0.0.2,<1.0.0" +flwr = { extras = ["simulation"], version = ">=1.7.0,<2.0" } +flwr-datasets = ">=0.1.0,<1.0.0" xgboost = ">=2.0.0,<3.0.0" diff --git a/examples/xgboost-comprehensive/requirements.txt b/examples/xgboost-comprehensive/requirements.txt index b5b1d83bcdd1..16eb78f484e3 100644 --- a/examples/xgboost-comprehensive/requirements.txt +++ b/examples/xgboost-comprehensive/requirements.txt @@ -1,3 +1,3 @@ flwr[simulation]>=1.7.0, <2.0 -flwr-datasets>=0.0.2, <1.0.0 +flwr-datasets>=0.1.0, <1.0.0 xgboost>=2.0.0, <3.0.0 diff --git a/examples/xgboost-quickstart/README.md b/examples/xgboost-quickstart/README.md index 72dde5706e8d..b196520d37e6 100644 --- a/examples/xgboost-quickstart/README.md +++ b/examples/xgboost-quickstart/README.md @@ -4,7 +4,7 @@ This example demonstrates how to perform EXtreme Gradient Boosting (XGBoost) wit We use [HIGGS](https://archive.ics.uci.edu/dataset/280/higgs) dataset for this example to perform a binary classification task. Tree-based with bagging method is used for aggregation on the server. -This project provides a minimal code example to enable you to get stated quickly. For a more comprehensive code example, take a look at [xgboost-comprehensive](https://github.com/adap/flower/tree/main/examples/xgboost-comprehensive). +This project provides a minimal code example to enable you to get started quickly. For a more comprehensive code example, take a look at [xgboost-comprehensive](https://github.com/adap/flower/tree/main/examples/xgboost-comprehensive). ## Project Setup diff --git a/pyproject.toml b/pyproject.toml index dc8b293bc880..d117a9501b58 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "flwr" -version = "1.8.0" +version = "1.9.0" description = "Flower: A Friendly Federated Learning Framework" license = "Apache-2.0" authors = ["The Flower Authors "] @@ -56,9 +56,10 @@ flwr = "flwr.cli.app:app" flower-driver-api = "flwr.server:run_driver_api" flower-fleet-api = "flwr.server:run_fleet_api" flower-superlink = "flwr.server:run_superlink" +flower-supernode = "flwr.client:run_supernode" flower-client-app = "flwr.client:run_client_app" flower-server-app = "flwr.server:run_server_app" -flower-simulation = "flwr.simulation:run_simulation_from_cli" +flower-simulation = "flwr.simulation.run_simulation:run_simulation_from_cli" [tool.poetry.dependencies] python = "^3.8" @@ -71,9 +72,9 @@ pycryptodome = "^3.18.0" iterators = "^0.0.2" typer = { version = "^0.9.0", extras=["all"] } tomli = "^2.0.1" +pathspec = "^0.12.1" # Optional dependencies (Simulation Engine) -ray = { version = "==2.6.3", optional = true } -pydantic = { version = "<2.0.0", optional = true } +ray = { version = "==2.6.3", optional = true, python = ">=3.8,<3.12" } # Optional dependencies (REST transport layer) requests = { version = "^2.31.0", optional = true } starlette = { version = "^0.31.0", optional = true } @@ -127,6 +128,7 @@ check-wheel-contents = "==0.4.0" GitPython = "==3.1.32" PyGithub = "==2.1.1" licensecheck = "==2024" +pre-commit = "==3.5.0" [tool.isort] line_length = 88 @@ -135,7 +137,7 @@ multi_line_output = 3 include_trailing_comma = true force_grid_wrap = 0 use_parentheses = true -known_first_party = ["flwr", "flwr_experimental", "flwr_tool"] +known_first_party = ["flwr", "flwr_tool"] [tool.black] line-length = 88 @@ -169,12 +171,6 @@ plugins = [ ignore_missing_imports = true strict = true -[[tool.mypy.overrides]] -module = [ - "flwr_experimental.*", -] -ignore_errors = true - [[tool.mypy.overrides]] module = [ "importlib.metadata.*", diff --git a/src/cc/flwr/.gitignore b/src/cc/flwr/.gitignore index bd834005883d..1909b6136f64 100644 --- a/src/cc/flwr/.gitignore +++ b/src/cc/flwr/.gitignore @@ -1,2 +1,3 @@ build/ +.clangd *.bak diff --git a/src/cc/flwr/CMakeLists.txt b/src/cc/flwr/CMakeLists.txt index c242f52b237b..9955d21e84ad 100644 --- a/src/cc/flwr/CMakeLists.txt +++ b/src/cc/flwr/CMakeLists.txt @@ -73,6 +73,8 @@ GENERATE_AND_COPY(transport) GENERATE_AND_COPY(node) GENERATE_AND_COPY(task) GENERATE_AND_COPY(fleet) +GENERATE_AND_COPY(error) +GENERATE_AND_COPY(recordset) add_library(flwr_grpc_proto STATIC ${ALL_PROTO_FILES}) diff --git a/src/cc/flwr/include/communicator.h b/src/cc/flwr/include/communicator.h new file mode 100644 index 000000000000..ace4821ab6af --- /dev/null +++ b/src/cc/flwr/include/communicator.h @@ -0,0 +1,30 @@ +#ifndef COMMUNICATOR_H +#define COMMUNICATOR_H + +#include "flwr/proto/fleet.pb.h" +#include +#include + +class Communicator { +public: + virtual bool send_create_node(flwr::proto::CreateNodeRequest request, + flwr::proto::CreateNodeResponse *response) = 0; + + virtual bool send_delete_node(flwr::proto::DeleteNodeRequest request, + flwr::proto::DeleteNodeResponse *response) = 0; + + virtual bool + send_pull_task_ins(flwr::proto::PullTaskInsRequest request, + flwr::proto::PullTaskInsResponse *response) = 0; + + virtual bool + send_push_task_res(flwr::proto::PushTaskResRequest request, + flwr::proto::PushTaskResResponse *response) = 0; +}; + +void create_node(Communicator *communicator); +void delete_node(Communicator *communicator); +void send(Communicator *communicator, flwr::proto::TaskRes task_res); +std::optional receive(Communicator *communicator); + +#endif diff --git a/src/cc/flwr/include/flwr/proto/error.grpc.pb.cc b/src/cc/flwr/include/flwr/proto/error.grpc.pb.cc new file mode 100644 index 000000000000..dbe668508d80 --- /dev/null +++ b/src/cc/flwr/include/flwr/proto/error.grpc.pb.cc @@ -0,0 +1,27 @@ +// Generated by the gRPC C++ plugin. +// If you make any local change, they will be lost. +// source: flwr/proto/error.proto + +#include "flwr/proto/error.pb.h" +#include "flwr/proto/error.grpc.pb.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +namespace flwr { +namespace proto { + +} // namespace flwr +} // namespace proto + diff --git a/src/cc/flwr/include/flwr/proto/error.grpc.pb.h b/src/cc/flwr/include/flwr/proto/error.grpc.pb.h new file mode 100644 index 000000000000..df31ee174acf --- /dev/null +++ b/src/cc/flwr/include/flwr/proto/error.grpc.pb.h @@ -0,0 +1,51 @@ +// Generated by the gRPC C++ plugin. +// If you make any local change, they will be lost. +// source: flwr/proto/error.proto +// Original file comments: +// Copyright 2024 Flower Labs GmbH. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +#ifndef GRPC_flwr_2fproto_2ferror_2eproto__INCLUDED +#define GRPC_flwr_2fproto_2ferror_2eproto__INCLUDED + +#include "flwr/proto/error.pb.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace flwr { +namespace proto { + +} // namespace proto +} // namespace flwr + + +#endif // GRPC_flwr_2fproto_2ferror_2eproto__INCLUDED diff --git a/src/cc/flwr/include/flwr/proto/error.pb.cc b/src/cc/flwr/include/flwr/proto/error.pb.cc new file mode 100644 index 000000000000..c086fa941954 --- /dev/null +++ b/src/cc/flwr/include/flwr/proto/error.pb.cc @@ -0,0 +1,312 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: flwr/proto/error.proto + +#include "flwr/proto/error.pb.h" + +#include + +#include +#include +#include +#include +#include +#include +#include +// @@protoc_insertion_point(includes) +#include + +PROTOBUF_PRAGMA_INIT_SEG +namespace flwr { +namespace proto { +constexpr Error::Error( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : reason_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string) + , code_(int64_t{0}){} +struct ErrorDefaultTypeInternal { + constexpr ErrorDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~ErrorDefaultTypeInternal() {} + union { + Error _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT ErrorDefaultTypeInternal _Error_default_instance_; +} // namespace proto +} // namespace flwr +static ::PROTOBUF_NAMESPACE_ID::Metadata file_level_metadata_flwr_2fproto_2ferror_2eproto[1]; +static constexpr ::PROTOBUF_NAMESPACE_ID::EnumDescriptor const** file_level_enum_descriptors_flwr_2fproto_2ferror_2eproto = nullptr; +static constexpr ::PROTOBUF_NAMESPACE_ID::ServiceDescriptor const** file_level_service_descriptors_flwr_2fproto_2ferror_2eproto = nullptr; + +const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_flwr_2fproto_2ferror_2eproto::offsets[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::Error, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::Error, code_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::Error, reason_), +}; +static const ::PROTOBUF_NAMESPACE_ID::internal::MigrationSchema schemas[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { + { 0, -1, -1, sizeof(::flwr::proto::Error)}, +}; + +static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] = { + reinterpret_cast(&::flwr::proto::_Error_default_instance_), +}; + +const char descriptor_table_protodef_flwr_2fproto_2ferror_2eproto[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = + "\n\026flwr/proto/error.proto\022\nflwr.proto\"%\n\005" + "Error\022\014\n\004code\030\001 \001(\022\022\016\n\006reason\030\002 \001(\tb\006pro" + "to3" + ; +static ::PROTOBUF_NAMESPACE_ID::internal::once_flag descriptor_table_flwr_2fproto_2ferror_2eproto_once; +const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_flwr_2fproto_2ferror_2eproto = { + false, false, 83, descriptor_table_protodef_flwr_2fproto_2ferror_2eproto, "flwr/proto/error.proto", + &descriptor_table_flwr_2fproto_2ferror_2eproto_once, nullptr, 0, 1, + schemas, file_default_instances, TableStruct_flwr_2fproto_2ferror_2eproto::offsets, + file_level_metadata_flwr_2fproto_2ferror_2eproto, file_level_enum_descriptors_flwr_2fproto_2ferror_2eproto, file_level_service_descriptors_flwr_2fproto_2ferror_2eproto, +}; +PROTOBUF_ATTRIBUTE_WEAK const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable* descriptor_table_flwr_2fproto_2ferror_2eproto_getter() { + return &descriptor_table_flwr_2fproto_2ferror_2eproto; +} + +// Force running AddDescriptors() at dynamic initialization time. +PROTOBUF_ATTRIBUTE_INIT_PRIORITY static ::PROTOBUF_NAMESPACE_ID::internal::AddDescriptorsRunner dynamic_init_dummy_flwr_2fproto_2ferror_2eproto(&descriptor_table_flwr_2fproto_2ferror_2eproto); +namespace flwr { +namespace proto { + +// =================================================================== + +class Error::_Internal { + public: +}; + +Error::Error(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.Error) +} +Error::Error(const Error& from) + : ::PROTOBUF_NAMESPACE_ID::Message() { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + reason_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + if (!from._internal_reason().empty()) { + reason_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, from._internal_reason(), + GetArenaForAllocation()); + } + code_ = from.code_; + // @@protoc_insertion_point(copy_constructor:flwr.proto.Error) +} + +void Error::SharedCtor() { +reason_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +code_ = int64_t{0}; +} + +Error::~Error() { + // @@protoc_insertion_point(destructor:flwr.proto.Error) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void Error::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); + reason_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} + +void Error::ArenaDtor(void* object) { + Error* _this = reinterpret_cast< Error* >(object); + (void)_this; +} +void Error::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +} +void Error::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void Error::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.Error) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + reason_.ClearToEmpty(); + code_ = int64_t{0}; + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* Error::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // sint64 code = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 8)) { + code_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarintZigZag64(&ptr); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // string reason = 2; + case 2: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) { + auto str = _internal_mutable_reason(); + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); + CHK_(::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "flwr.proto.Error.reason")); + CHK_(ptr); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* Error::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.Error) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // sint64 code = 1; + if (this->_internal_code() != 0) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteSInt64ToArray(1, this->_internal_code(), target); + } + + // string reason = 2; + if (!this->_internal_reason().empty()) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->_internal_reason().data(), static_cast(this->_internal_reason().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "flwr.proto.Error.reason"); + target = stream->WriteStringMaybeAliased( + 2, this->_internal_reason(), target); + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.Error) + return target; +} + +size_t Error::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.Error) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // string reason = 2; + if (!this->_internal_reason().empty()) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( + this->_internal_reason()); + } + + // sint64 code = 1; + if (this->_internal_code() != 0) { + total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SInt64SizePlusOne(this->_internal_code()); + } + + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData Error::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + Error::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*Error::GetClassData() const { return &_class_data_; } + +void Error::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void Error::MergeFrom(const Error& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.Error) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + if (!from._internal_reason().empty()) { + _internal_set_reason(from._internal_reason()); + } + if (from._internal_code() != 0) { + _internal_set_code(from._internal_code()); + } + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void Error::CopyFrom(const Error& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.Error) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool Error::IsInitialized() const { + return true; +} + +void Error::InternalSwap(Error* other) { + using std::swap; + auto* lhs_arena = GetArenaForAllocation(); + auto* rhs_arena = other->GetArenaForAllocation(); + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap( + &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), + &reason_, lhs_arena, + &other->reason_, rhs_arena + ); + swap(code_, other->code_); +} + +::PROTOBUF_NAMESPACE_ID::Metadata Error::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2ferror_2eproto_getter, &descriptor_table_flwr_2fproto_2ferror_2eproto_once, + file_level_metadata_flwr_2fproto_2ferror_2eproto[0]); +} + +// @@protoc_insertion_point(namespace_scope) +} // namespace proto +} // namespace flwr +PROTOBUF_NAMESPACE_OPEN +template<> PROTOBUF_NOINLINE ::flwr::proto::Error* Arena::CreateMaybeMessage< ::flwr::proto::Error >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::Error >(arena); +} +PROTOBUF_NAMESPACE_CLOSE + +// @@protoc_insertion_point(global_scope) +#include diff --git a/src/cc/flwr/include/flwr/proto/error.pb.h b/src/cc/flwr/include/flwr/proto/error.pb.h new file mode 100644 index 000000000000..483e5575d1ce --- /dev/null +++ b/src/cc/flwr/include/flwr/proto/error.pb.h @@ -0,0 +1,317 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: flwr/proto/error.proto + +#ifndef GOOGLE_PROTOBUF_INCLUDED_flwr_2fproto_2ferror_2eproto +#define GOOGLE_PROTOBUF_INCLUDED_flwr_2fproto_2ferror_2eproto + +#include +#include + +#include +#if PROTOBUF_VERSION < 3018000 +#error This file was generated by a newer version of protoc which is +#error incompatible with your Protocol Buffer headers. Please update +#error your headers. +#endif +#if 3018001 < PROTOBUF_MIN_PROTOC_VERSION +#error This file was generated by an older version of protoc which is +#error incompatible with your Protocol Buffer headers. Please +#error regenerate this file with a newer version of protoc. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include // IWYU pragma: export +#include // IWYU pragma: export +#include +// @@protoc_insertion_point(includes) +#include +#define PROTOBUF_INTERNAL_EXPORT_flwr_2fproto_2ferror_2eproto +PROTOBUF_NAMESPACE_OPEN +namespace internal { +class AnyMetadata; +} // namespace internal +PROTOBUF_NAMESPACE_CLOSE + +// Internal implementation detail -- do not use these members. +struct TableStruct_flwr_2fproto_2ferror_2eproto { + static const ::PROTOBUF_NAMESPACE_ID::internal::ParseTableField entries[] + PROTOBUF_SECTION_VARIABLE(protodesc_cold); + static const ::PROTOBUF_NAMESPACE_ID::internal::AuxiliaryParseTableField aux[] + PROTOBUF_SECTION_VARIABLE(protodesc_cold); + static const ::PROTOBUF_NAMESPACE_ID::internal::ParseTable schema[1] + PROTOBUF_SECTION_VARIABLE(protodesc_cold); + static const ::PROTOBUF_NAMESPACE_ID::internal::FieldMetadata field_metadata[]; + static const ::PROTOBUF_NAMESPACE_ID::internal::SerializationTable serialization_table[]; + static const ::PROTOBUF_NAMESPACE_ID::uint32 offsets[]; +}; +extern const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_flwr_2fproto_2ferror_2eproto; +namespace flwr { +namespace proto { +class Error; +struct ErrorDefaultTypeInternal; +extern ErrorDefaultTypeInternal _Error_default_instance_; +} // namespace proto +} // namespace flwr +PROTOBUF_NAMESPACE_OPEN +template<> ::flwr::proto::Error* Arena::CreateMaybeMessage<::flwr::proto::Error>(Arena*); +PROTOBUF_NAMESPACE_CLOSE +namespace flwr { +namespace proto { + +// =================================================================== + +class Error final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.Error) */ { + public: + inline Error() : Error(nullptr) {} + ~Error() override; + explicit constexpr Error(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + Error(const Error& from); + Error(Error&& from) noexcept + : Error() { + *this = ::std::move(from); + } + + inline Error& operator=(const Error& from) { + CopyFrom(from); + return *this; + } + inline Error& operator=(Error&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const Error& default_instance() { + return *internal_default_instance(); + } + static inline const Error* internal_default_instance() { + return reinterpret_cast( + &_Error_default_instance_); + } + static constexpr int kIndexInFileMessages = + 0; + + friend void swap(Error& a, Error& b) { + a.Swap(&b); + } + inline void Swap(Error* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(Error* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline Error* New() const final { + return new Error(); + } + + Error* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const Error& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const Error& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(Error* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.Error"; + } + protected: + explicit Error(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kReasonFieldNumber = 2, + kCodeFieldNumber = 1, + }; + // string reason = 2; + void clear_reason(); + const std::string& reason() const; + template + void set_reason(ArgT0&& arg0, ArgT... args); + std::string* mutable_reason(); + PROTOBUF_MUST_USE_RESULT std::string* release_reason(); + void set_allocated_reason(std::string* reason); + private: + const std::string& _internal_reason() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_reason(const std::string& value); + std::string* _internal_mutable_reason(); + public: + + // sint64 code = 1; + void clear_code(); + ::PROTOBUF_NAMESPACE_ID::int64 code() const; + void set_code(::PROTOBUF_NAMESPACE_ID::int64 value); + private: + ::PROTOBUF_NAMESPACE_ID::int64 _internal_code() const; + void _internal_set_code(::PROTOBUF_NAMESPACE_ID::int64 value); + public: + + // @@protoc_insertion_point(class_scope:flwr.proto.Error) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr reason_; + ::PROTOBUF_NAMESPACE_ID::int64 code_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flwr_2fproto_2ferror_2eproto; +}; +// =================================================================== + + +// =================================================================== + +#ifdef __GNUC__ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wstrict-aliasing" +#endif // __GNUC__ +// Error + +// sint64 code = 1; +inline void Error::clear_code() { + code_ = int64_t{0}; +} +inline ::PROTOBUF_NAMESPACE_ID::int64 Error::_internal_code() const { + return code_; +} +inline ::PROTOBUF_NAMESPACE_ID::int64 Error::code() const { + // @@protoc_insertion_point(field_get:flwr.proto.Error.code) + return _internal_code(); +} +inline void Error::_internal_set_code(::PROTOBUF_NAMESPACE_ID::int64 value) { + + code_ = value; +} +inline void Error::set_code(::PROTOBUF_NAMESPACE_ID::int64 value) { + _internal_set_code(value); + // @@protoc_insertion_point(field_set:flwr.proto.Error.code) +} + +// string reason = 2; +inline void Error::clear_reason() { + reason_.ClearToEmpty(); +} +inline const std::string& Error::reason() const { + // @@protoc_insertion_point(field_get:flwr.proto.Error.reason) + return _internal_reason(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void Error::set_reason(ArgT0&& arg0, ArgT... args) { + + reason_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:flwr.proto.Error.reason) +} +inline std::string* Error::mutable_reason() { + std::string* _s = _internal_mutable_reason(); + // @@protoc_insertion_point(field_mutable:flwr.proto.Error.reason) + return _s; +} +inline const std::string& Error::_internal_reason() const { + return reason_.Get(); +} +inline void Error::_internal_set_reason(const std::string& value) { + + reason_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); +} +inline std::string* Error::_internal_mutable_reason() { + + return reason_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); +} +inline std::string* Error::release_reason() { + // @@protoc_insertion_point(field_release:flwr.proto.Error.reason) + return reason_.Release(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); +} +inline void Error::set_allocated_reason(std::string* reason) { + if (reason != nullptr) { + + } else { + + } + reason_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), reason, + GetArenaForAllocation()); + // @@protoc_insertion_point(field_set_allocated:flwr.proto.Error.reason) +} + +#ifdef __GNUC__ + #pragma GCC diagnostic pop +#endif // __GNUC__ + +// @@protoc_insertion_point(namespace_scope) + +} // namespace proto +} // namespace flwr + +// @@protoc_insertion_point(global_scope) + +#include +#endif // GOOGLE_PROTOBUF_INCLUDED_GOOGLE_PROTOBUF_INCLUDED_flwr_2fproto_2ferror_2eproto diff --git a/src/cc/flwr/include/flwr/proto/fleet.grpc.pb.cc b/src/cc/flwr/include/flwr/proto/fleet.grpc.pb.cc index c71a6a3e1c45..0e6c69ad14ac 100644 --- a/src/cc/flwr/include/flwr/proto/fleet.grpc.pb.cc +++ b/src/cc/flwr/include/flwr/proto/fleet.grpc.pb.cc @@ -25,8 +25,10 @@ namespace proto { static const char* Fleet_method_names[] = { "/flwr.proto.Fleet/CreateNode", "/flwr.proto.Fleet/DeleteNode", + "/flwr.proto.Fleet/Ping", "/flwr.proto.Fleet/PullTaskIns", "/flwr.proto.Fleet/PushTaskRes", + "/flwr.proto.Fleet/GetRun", }; std::unique_ptr< Fleet::Stub> Fleet::NewStub(const std::shared_ptr< ::grpc::ChannelInterface>& channel, const ::grpc::StubOptions& options) { @@ -38,8 +40,10 @@ std::unique_ptr< Fleet::Stub> Fleet::NewStub(const std::shared_ptr< ::grpc::Chan Fleet::Stub::Stub(const std::shared_ptr< ::grpc::ChannelInterface>& channel, const ::grpc::StubOptions& options) : channel_(channel), rpcmethod_CreateNode_(Fleet_method_names[0], options.suffix_for_stats(),::grpc::internal::RpcMethod::NORMAL_RPC, channel) , rpcmethod_DeleteNode_(Fleet_method_names[1], options.suffix_for_stats(),::grpc::internal::RpcMethod::NORMAL_RPC, channel) - , rpcmethod_PullTaskIns_(Fleet_method_names[2], options.suffix_for_stats(),::grpc::internal::RpcMethod::NORMAL_RPC, channel) - , rpcmethod_PushTaskRes_(Fleet_method_names[3], options.suffix_for_stats(),::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_Ping_(Fleet_method_names[2], options.suffix_for_stats(),::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_PullTaskIns_(Fleet_method_names[3], options.suffix_for_stats(),::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_PushTaskRes_(Fleet_method_names[4], options.suffix_for_stats(),::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_GetRun_(Fleet_method_names[5], options.suffix_for_stats(),::grpc::internal::RpcMethod::NORMAL_RPC, channel) {} ::grpc::Status Fleet::Stub::CreateNode(::grpc::ClientContext* context, const ::flwr::proto::CreateNodeRequest& request, ::flwr::proto::CreateNodeResponse* response) { @@ -88,6 +92,29 @@ ::grpc::ClientAsyncResponseReader< ::flwr::proto::DeleteNodeResponse>* Fleet::St return result; } +::grpc::Status Fleet::Stub::Ping(::grpc::ClientContext* context, const ::flwr::proto::PingRequest& request, ::flwr::proto::PingResponse* response) { + return ::grpc::internal::BlockingUnaryCall< ::flwr::proto::PingRequest, ::flwr::proto::PingResponse, ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>(channel_.get(), rpcmethod_Ping_, context, request, response); +} + +void Fleet::Stub::async::Ping(::grpc::ClientContext* context, const ::flwr::proto::PingRequest* request, ::flwr::proto::PingResponse* response, std::function f) { + ::grpc::internal::CallbackUnaryCall< ::flwr::proto::PingRequest, ::flwr::proto::PingResponse, ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>(stub_->channel_.get(), stub_->rpcmethod_Ping_, context, request, response, std::move(f)); +} + +void Fleet::Stub::async::Ping(::grpc::ClientContext* context, const ::flwr::proto::PingRequest* request, ::flwr::proto::PingResponse* response, ::grpc::ClientUnaryReactor* reactor) { + ::grpc::internal::ClientCallbackUnaryFactory::Create< ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>(stub_->channel_.get(), stub_->rpcmethod_Ping_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::flwr::proto::PingResponse>* Fleet::Stub::PrepareAsyncPingRaw(::grpc::ClientContext* context, const ::flwr::proto::PingRequest& request, ::grpc::CompletionQueue* cq) { + return ::grpc::internal::ClientAsyncResponseReaderHelper::Create< ::flwr::proto::PingResponse, ::flwr::proto::PingRequest, ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>(channel_.get(), cq, rpcmethod_Ping_, context, request); +} + +::grpc::ClientAsyncResponseReader< ::flwr::proto::PingResponse>* Fleet::Stub::AsyncPingRaw(::grpc::ClientContext* context, const ::flwr::proto::PingRequest& request, ::grpc::CompletionQueue* cq) { + auto* result = + this->PrepareAsyncPingRaw(context, request, cq); + result->StartCall(); + return result; +} + ::grpc::Status Fleet::Stub::PullTaskIns(::grpc::ClientContext* context, const ::flwr::proto::PullTaskInsRequest& request, ::flwr::proto::PullTaskInsResponse* response) { return ::grpc::internal::BlockingUnaryCall< ::flwr::proto::PullTaskInsRequest, ::flwr::proto::PullTaskInsResponse, ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>(channel_.get(), rpcmethod_PullTaskIns_, context, request, response); } @@ -134,6 +161,29 @@ ::grpc::ClientAsyncResponseReader< ::flwr::proto::PushTaskResResponse>* Fleet::S return result; } +::grpc::Status Fleet::Stub::GetRun(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest& request, ::flwr::proto::GetRunResponse* response) { + return ::grpc::internal::BlockingUnaryCall< ::flwr::proto::GetRunRequest, ::flwr::proto::GetRunResponse, ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>(channel_.get(), rpcmethod_GetRun_, context, request, response); +} + +void Fleet::Stub::async::GetRun(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest* request, ::flwr::proto::GetRunResponse* response, std::function f) { + ::grpc::internal::CallbackUnaryCall< ::flwr::proto::GetRunRequest, ::flwr::proto::GetRunResponse, ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>(stub_->channel_.get(), stub_->rpcmethod_GetRun_, context, request, response, std::move(f)); +} + +void Fleet::Stub::async::GetRun(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest* request, ::flwr::proto::GetRunResponse* response, ::grpc::ClientUnaryReactor* reactor) { + ::grpc::internal::ClientCallbackUnaryFactory::Create< ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>(stub_->channel_.get(), stub_->rpcmethod_GetRun_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::flwr::proto::GetRunResponse>* Fleet::Stub::PrepareAsyncGetRunRaw(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest& request, ::grpc::CompletionQueue* cq) { + return ::grpc::internal::ClientAsyncResponseReaderHelper::Create< ::flwr::proto::GetRunResponse, ::flwr::proto::GetRunRequest, ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>(channel_.get(), cq, rpcmethod_GetRun_, context, request); +} + +::grpc::ClientAsyncResponseReader< ::flwr::proto::GetRunResponse>* Fleet::Stub::AsyncGetRunRaw(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest& request, ::grpc::CompletionQueue* cq) { + auto* result = + this->PrepareAsyncGetRunRaw(context, request, cq); + result->StartCall(); + return result; +} + Fleet::Service::Service() { AddMethod(new ::grpc::internal::RpcServiceMethod( Fleet_method_names[0], @@ -158,6 +208,16 @@ Fleet::Service::Service() { AddMethod(new ::grpc::internal::RpcServiceMethod( Fleet_method_names[2], ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< Fleet::Service, ::flwr::proto::PingRequest, ::flwr::proto::PingResponse, ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>( + [](Fleet::Service* service, + ::grpc::ServerContext* ctx, + const ::flwr::proto::PingRequest* req, + ::flwr::proto::PingResponse* resp) { + return service->Ping(ctx, req, resp); + }, this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + Fleet_method_names[3], + ::grpc::internal::RpcMethod::NORMAL_RPC, new ::grpc::internal::RpcMethodHandler< Fleet::Service, ::flwr::proto::PullTaskInsRequest, ::flwr::proto::PullTaskInsResponse, ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>( [](Fleet::Service* service, ::grpc::ServerContext* ctx, @@ -166,7 +226,7 @@ Fleet::Service::Service() { return service->PullTaskIns(ctx, req, resp); }, this))); AddMethod(new ::grpc::internal::RpcServiceMethod( - Fleet_method_names[3], + Fleet_method_names[4], ::grpc::internal::RpcMethod::NORMAL_RPC, new ::grpc::internal::RpcMethodHandler< Fleet::Service, ::flwr::proto::PushTaskResRequest, ::flwr::proto::PushTaskResResponse, ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>( [](Fleet::Service* service, @@ -175,6 +235,16 @@ Fleet::Service::Service() { ::flwr::proto::PushTaskResResponse* resp) { return service->PushTaskRes(ctx, req, resp); }, this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + Fleet_method_names[5], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< Fleet::Service, ::flwr::proto::GetRunRequest, ::flwr::proto::GetRunResponse, ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>( + [](Fleet::Service* service, + ::grpc::ServerContext* ctx, + const ::flwr::proto::GetRunRequest* req, + ::flwr::proto::GetRunResponse* resp) { + return service->GetRun(ctx, req, resp); + }, this))); } Fleet::Service::~Service() { @@ -194,6 +264,13 @@ ::grpc::Status Fleet::Service::DeleteNode(::grpc::ServerContext* context, const return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } +::grpc::Status Fleet::Service::Ping(::grpc::ServerContext* context, const ::flwr::proto::PingRequest* request, ::flwr::proto::PingResponse* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + ::grpc::Status Fleet::Service::PullTaskIns(::grpc::ServerContext* context, const ::flwr::proto::PullTaskInsRequest* request, ::flwr::proto::PullTaskInsResponse* response) { (void) context; (void) request; @@ -208,6 +285,13 @@ ::grpc::Status Fleet::Service::PushTaskRes(::grpc::ServerContext* context, const return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } +::grpc::Status Fleet::Service::GetRun(::grpc::ServerContext* context, const ::flwr::proto::GetRunRequest* request, ::flwr::proto::GetRunResponse* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + } // namespace flwr } // namespace proto diff --git a/src/cc/flwr/include/flwr/proto/fleet.grpc.pb.h b/src/cc/flwr/include/flwr/proto/fleet.grpc.pb.h index 03d445142c37..fb1e4bf7b6c4 100644 --- a/src/cc/flwr/include/flwr/proto/fleet.grpc.pb.h +++ b/src/cc/flwr/include/flwr/proto/fleet.grpc.pb.h @@ -66,6 +66,13 @@ class Fleet final { std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::DeleteNodeResponse>> PrepareAsyncDeleteNode(::grpc::ClientContext* context, const ::flwr::proto::DeleteNodeRequest& request, ::grpc::CompletionQueue* cq) { return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::DeleteNodeResponse>>(PrepareAsyncDeleteNodeRaw(context, request, cq)); } + virtual ::grpc::Status Ping(::grpc::ClientContext* context, const ::flwr::proto::PingRequest& request, ::flwr::proto::PingResponse* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::PingResponse>> AsyncPing(::grpc::ClientContext* context, const ::flwr::proto::PingRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::PingResponse>>(AsyncPingRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::PingResponse>> PrepareAsyncPing(::grpc::ClientContext* context, const ::flwr::proto::PingRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::PingResponse>>(PrepareAsyncPingRaw(context, request, cq)); + } // Retrieve one or more tasks, if possible // // HTTP API path: /api/v1/fleet/pull-task-ins @@ -86,6 +93,13 @@ class Fleet final { std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::PushTaskResResponse>> PrepareAsyncPushTaskRes(::grpc::ClientContext* context, const ::flwr::proto::PushTaskResRequest& request, ::grpc::CompletionQueue* cq) { return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::PushTaskResResponse>>(PrepareAsyncPushTaskResRaw(context, request, cq)); } + virtual ::grpc::Status GetRun(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest& request, ::flwr::proto::GetRunResponse* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::GetRunResponse>> AsyncGetRun(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::GetRunResponse>>(AsyncGetRunRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::GetRunResponse>> PrepareAsyncGetRun(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::GetRunResponse>>(PrepareAsyncGetRunRaw(context, request, cq)); + } class async_interface { public: virtual ~async_interface() {} @@ -93,6 +107,8 @@ class Fleet final { virtual void CreateNode(::grpc::ClientContext* context, const ::flwr::proto::CreateNodeRequest* request, ::flwr::proto::CreateNodeResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; virtual void DeleteNode(::grpc::ClientContext* context, const ::flwr::proto::DeleteNodeRequest* request, ::flwr::proto::DeleteNodeResponse* response, std::function) = 0; virtual void DeleteNode(::grpc::ClientContext* context, const ::flwr::proto::DeleteNodeRequest* request, ::flwr::proto::DeleteNodeResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; + virtual void Ping(::grpc::ClientContext* context, const ::flwr::proto::PingRequest* request, ::flwr::proto::PingResponse* response, std::function) = 0; + virtual void Ping(::grpc::ClientContext* context, const ::flwr::proto::PingRequest* request, ::flwr::proto::PingResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; // Retrieve one or more tasks, if possible // // HTTP API path: /api/v1/fleet/pull-task-ins @@ -103,6 +119,8 @@ class Fleet final { // HTTP API path: /api/v1/fleet/push-task-res virtual void PushTaskRes(::grpc::ClientContext* context, const ::flwr::proto::PushTaskResRequest* request, ::flwr::proto::PushTaskResResponse* response, std::function) = 0; virtual void PushTaskRes(::grpc::ClientContext* context, const ::flwr::proto::PushTaskResRequest* request, ::flwr::proto::PushTaskResResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; + virtual void GetRun(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest* request, ::flwr::proto::GetRunResponse* response, std::function) = 0; + virtual void GetRun(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest* request, ::flwr::proto::GetRunResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; }; typedef class async_interface experimental_async_interface; virtual class async_interface* async() { return nullptr; } @@ -112,10 +130,14 @@ class Fleet final { virtual ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::CreateNodeResponse>* PrepareAsyncCreateNodeRaw(::grpc::ClientContext* context, const ::flwr::proto::CreateNodeRequest& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::DeleteNodeResponse>* AsyncDeleteNodeRaw(::grpc::ClientContext* context, const ::flwr::proto::DeleteNodeRequest& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::DeleteNodeResponse>* PrepareAsyncDeleteNodeRaw(::grpc::ClientContext* context, const ::flwr::proto::DeleteNodeRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::PingResponse>* AsyncPingRaw(::grpc::ClientContext* context, const ::flwr::proto::PingRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::PingResponse>* PrepareAsyncPingRaw(::grpc::ClientContext* context, const ::flwr::proto::PingRequest& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::PullTaskInsResponse>* AsyncPullTaskInsRaw(::grpc::ClientContext* context, const ::flwr::proto::PullTaskInsRequest& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::PullTaskInsResponse>* PrepareAsyncPullTaskInsRaw(::grpc::ClientContext* context, const ::flwr::proto::PullTaskInsRequest& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::PushTaskResResponse>* AsyncPushTaskResRaw(::grpc::ClientContext* context, const ::flwr::proto::PushTaskResRequest& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::PushTaskResResponse>* PrepareAsyncPushTaskResRaw(::grpc::ClientContext* context, const ::flwr::proto::PushTaskResRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::GetRunResponse>* AsyncGetRunRaw(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::GetRunResponse>* PrepareAsyncGetRunRaw(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest& request, ::grpc::CompletionQueue* cq) = 0; }; class Stub final : public StubInterface { public: @@ -134,6 +156,13 @@ class Fleet final { std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::flwr::proto::DeleteNodeResponse>> PrepareAsyncDeleteNode(::grpc::ClientContext* context, const ::flwr::proto::DeleteNodeRequest& request, ::grpc::CompletionQueue* cq) { return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::flwr::proto::DeleteNodeResponse>>(PrepareAsyncDeleteNodeRaw(context, request, cq)); } + ::grpc::Status Ping(::grpc::ClientContext* context, const ::flwr::proto::PingRequest& request, ::flwr::proto::PingResponse* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::flwr::proto::PingResponse>> AsyncPing(::grpc::ClientContext* context, const ::flwr::proto::PingRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::flwr::proto::PingResponse>>(AsyncPingRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::flwr::proto::PingResponse>> PrepareAsyncPing(::grpc::ClientContext* context, const ::flwr::proto::PingRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::flwr::proto::PingResponse>>(PrepareAsyncPingRaw(context, request, cq)); + } ::grpc::Status PullTaskIns(::grpc::ClientContext* context, const ::flwr::proto::PullTaskInsRequest& request, ::flwr::proto::PullTaskInsResponse* response) override; std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::flwr::proto::PullTaskInsResponse>> AsyncPullTaskIns(::grpc::ClientContext* context, const ::flwr::proto::PullTaskInsRequest& request, ::grpc::CompletionQueue* cq) { return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::flwr::proto::PullTaskInsResponse>>(AsyncPullTaskInsRaw(context, request, cq)); @@ -148,6 +177,13 @@ class Fleet final { std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::flwr::proto::PushTaskResResponse>> PrepareAsyncPushTaskRes(::grpc::ClientContext* context, const ::flwr::proto::PushTaskResRequest& request, ::grpc::CompletionQueue* cq) { return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::flwr::proto::PushTaskResResponse>>(PrepareAsyncPushTaskResRaw(context, request, cq)); } + ::grpc::Status GetRun(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest& request, ::flwr::proto::GetRunResponse* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::flwr::proto::GetRunResponse>> AsyncGetRun(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::flwr::proto::GetRunResponse>>(AsyncGetRunRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::flwr::proto::GetRunResponse>> PrepareAsyncGetRun(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::flwr::proto::GetRunResponse>>(PrepareAsyncGetRunRaw(context, request, cq)); + } class async final : public StubInterface::async_interface { public: @@ -155,10 +191,14 @@ class Fleet final { void CreateNode(::grpc::ClientContext* context, const ::flwr::proto::CreateNodeRequest* request, ::flwr::proto::CreateNodeResponse* response, ::grpc::ClientUnaryReactor* reactor) override; void DeleteNode(::grpc::ClientContext* context, const ::flwr::proto::DeleteNodeRequest* request, ::flwr::proto::DeleteNodeResponse* response, std::function) override; void DeleteNode(::grpc::ClientContext* context, const ::flwr::proto::DeleteNodeRequest* request, ::flwr::proto::DeleteNodeResponse* response, ::grpc::ClientUnaryReactor* reactor) override; + void Ping(::grpc::ClientContext* context, const ::flwr::proto::PingRequest* request, ::flwr::proto::PingResponse* response, std::function) override; + void Ping(::grpc::ClientContext* context, const ::flwr::proto::PingRequest* request, ::flwr::proto::PingResponse* response, ::grpc::ClientUnaryReactor* reactor) override; void PullTaskIns(::grpc::ClientContext* context, const ::flwr::proto::PullTaskInsRequest* request, ::flwr::proto::PullTaskInsResponse* response, std::function) override; void PullTaskIns(::grpc::ClientContext* context, const ::flwr::proto::PullTaskInsRequest* request, ::flwr::proto::PullTaskInsResponse* response, ::grpc::ClientUnaryReactor* reactor) override; void PushTaskRes(::grpc::ClientContext* context, const ::flwr::proto::PushTaskResRequest* request, ::flwr::proto::PushTaskResResponse* response, std::function) override; void PushTaskRes(::grpc::ClientContext* context, const ::flwr::proto::PushTaskResRequest* request, ::flwr::proto::PushTaskResResponse* response, ::grpc::ClientUnaryReactor* reactor) override; + void GetRun(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest* request, ::flwr::proto::GetRunResponse* response, std::function) override; + void GetRun(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest* request, ::flwr::proto::GetRunResponse* response, ::grpc::ClientUnaryReactor* reactor) override; private: friend class Stub; explicit async(Stub* stub): stub_(stub) { } @@ -174,14 +214,20 @@ class Fleet final { ::grpc::ClientAsyncResponseReader< ::flwr::proto::CreateNodeResponse>* PrepareAsyncCreateNodeRaw(::grpc::ClientContext* context, const ::flwr::proto::CreateNodeRequest& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::flwr::proto::DeleteNodeResponse>* AsyncDeleteNodeRaw(::grpc::ClientContext* context, const ::flwr::proto::DeleteNodeRequest& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::flwr::proto::DeleteNodeResponse>* PrepareAsyncDeleteNodeRaw(::grpc::ClientContext* context, const ::flwr::proto::DeleteNodeRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::flwr::proto::PingResponse>* AsyncPingRaw(::grpc::ClientContext* context, const ::flwr::proto::PingRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::flwr::proto::PingResponse>* PrepareAsyncPingRaw(::grpc::ClientContext* context, const ::flwr::proto::PingRequest& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::flwr::proto::PullTaskInsResponse>* AsyncPullTaskInsRaw(::grpc::ClientContext* context, const ::flwr::proto::PullTaskInsRequest& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::flwr::proto::PullTaskInsResponse>* PrepareAsyncPullTaskInsRaw(::grpc::ClientContext* context, const ::flwr::proto::PullTaskInsRequest& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::flwr::proto::PushTaskResResponse>* AsyncPushTaskResRaw(::grpc::ClientContext* context, const ::flwr::proto::PushTaskResRequest& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::flwr::proto::PushTaskResResponse>* PrepareAsyncPushTaskResRaw(::grpc::ClientContext* context, const ::flwr::proto::PushTaskResRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::flwr::proto::GetRunResponse>* AsyncGetRunRaw(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::flwr::proto::GetRunResponse>* PrepareAsyncGetRunRaw(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest& request, ::grpc::CompletionQueue* cq) override; const ::grpc::internal::RpcMethod rpcmethod_CreateNode_; const ::grpc::internal::RpcMethod rpcmethod_DeleteNode_; + const ::grpc::internal::RpcMethod rpcmethod_Ping_; const ::grpc::internal::RpcMethod rpcmethod_PullTaskIns_; const ::grpc::internal::RpcMethod rpcmethod_PushTaskRes_; + const ::grpc::internal::RpcMethod rpcmethod_GetRun_; }; static std::unique_ptr NewStub(const std::shared_ptr< ::grpc::ChannelInterface>& channel, const ::grpc::StubOptions& options = ::grpc::StubOptions()); @@ -191,6 +237,7 @@ class Fleet final { virtual ~Service(); virtual ::grpc::Status CreateNode(::grpc::ServerContext* context, const ::flwr::proto::CreateNodeRequest* request, ::flwr::proto::CreateNodeResponse* response); virtual ::grpc::Status DeleteNode(::grpc::ServerContext* context, const ::flwr::proto::DeleteNodeRequest* request, ::flwr::proto::DeleteNodeResponse* response); + virtual ::grpc::Status Ping(::grpc::ServerContext* context, const ::flwr::proto::PingRequest* request, ::flwr::proto::PingResponse* response); // Retrieve one or more tasks, if possible // // HTTP API path: /api/v1/fleet/pull-task-ins @@ -199,6 +246,7 @@ class Fleet final { // // HTTP API path: /api/v1/fleet/push-task-res virtual ::grpc::Status PushTaskRes(::grpc::ServerContext* context, const ::flwr::proto::PushTaskResRequest* request, ::flwr::proto::PushTaskResResponse* response); + virtual ::grpc::Status GetRun(::grpc::ServerContext* context, const ::flwr::proto::GetRunRequest* request, ::flwr::proto::GetRunResponse* response); }; template class WithAsyncMethod_CreateNode : public BaseClass { @@ -241,12 +289,32 @@ class Fleet final { } }; template + class WithAsyncMethod_Ping : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_Ping() { + ::grpc::Service::MarkMethodAsync(2); + } + ~WithAsyncMethod_Ping() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Ping(::grpc::ServerContext* /*context*/, const ::flwr::proto::PingRequest* /*request*/, ::flwr::proto::PingResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestPing(::grpc::ServerContext* context, ::flwr::proto::PingRequest* request, ::grpc::ServerAsyncResponseWriter< ::flwr::proto::PingResponse>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(2, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template class WithAsyncMethod_PullTaskIns : public BaseClass { private: void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithAsyncMethod_PullTaskIns() { - ::grpc::Service::MarkMethodAsync(2); + ::grpc::Service::MarkMethodAsync(3); } ~WithAsyncMethod_PullTaskIns() override { BaseClassMustBeDerivedFromService(this); @@ -257,7 +325,7 @@ class Fleet final { return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } void RequestPullTaskIns(::grpc::ServerContext* context, ::flwr::proto::PullTaskInsRequest* request, ::grpc::ServerAsyncResponseWriter< ::flwr::proto::PullTaskInsResponse>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(2, context, request, response, new_call_cq, notification_cq, tag); + ::grpc::Service::RequestAsyncUnary(3, context, request, response, new_call_cq, notification_cq, tag); } }; template @@ -266,7 +334,7 @@ class Fleet final { void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithAsyncMethod_PushTaskRes() { - ::grpc::Service::MarkMethodAsync(3); + ::grpc::Service::MarkMethodAsync(4); } ~WithAsyncMethod_PushTaskRes() override { BaseClassMustBeDerivedFromService(this); @@ -277,10 +345,30 @@ class Fleet final { return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } void RequestPushTaskRes(::grpc::ServerContext* context, ::flwr::proto::PushTaskResRequest* request, ::grpc::ServerAsyncResponseWriter< ::flwr::proto::PushTaskResResponse>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(3, context, request, response, new_call_cq, notification_cq, tag); + ::grpc::Service::RequestAsyncUnary(4, context, request, response, new_call_cq, notification_cq, tag); } }; - typedef WithAsyncMethod_CreateNode > > > AsyncService; + template + class WithAsyncMethod_GetRun : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_GetRun() { + ::grpc::Service::MarkMethodAsync(5); + } + ~WithAsyncMethod_GetRun() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status GetRun(::grpc::ServerContext* /*context*/, const ::flwr::proto::GetRunRequest* /*request*/, ::flwr::proto::GetRunResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestGetRun(::grpc::ServerContext* context, ::flwr::proto::GetRunRequest* request, ::grpc::ServerAsyncResponseWriter< ::flwr::proto::GetRunResponse>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(5, context, request, response, new_call_cq, notification_cq, tag); + } + }; + typedef WithAsyncMethod_CreateNode > > > > > AsyncService; template class WithCallbackMethod_CreateNode : public BaseClass { private: @@ -336,18 +424,45 @@ class Fleet final { ::grpc::CallbackServerContext* /*context*/, const ::flwr::proto::DeleteNodeRequest* /*request*/, ::flwr::proto::DeleteNodeResponse* /*response*/) { return nullptr; } }; template + class WithCallbackMethod_Ping : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithCallbackMethod_Ping() { + ::grpc::Service::MarkMethodCallback(2, + new ::grpc::internal::CallbackUnaryHandler< ::flwr::proto::PingRequest, ::flwr::proto::PingResponse>( + [this]( + ::grpc::CallbackServerContext* context, const ::flwr::proto::PingRequest* request, ::flwr::proto::PingResponse* response) { return this->Ping(context, request, response); }));} + void SetMessageAllocatorFor_Ping( + ::grpc::MessageAllocator< ::flwr::proto::PingRequest, ::flwr::proto::PingResponse>* allocator) { + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(2); + static_cast<::grpc::internal::CallbackUnaryHandler< ::flwr::proto::PingRequest, ::flwr::proto::PingResponse>*>(handler) + ->SetMessageAllocator(allocator); + } + ~WithCallbackMethod_Ping() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Ping(::grpc::ServerContext* /*context*/, const ::flwr::proto::PingRequest* /*request*/, ::flwr::proto::PingResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual ::grpc::ServerUnaryReactor* Ping( + ::grpc::CallbackServerContext* /*context*/, const ::flwr::proto::PingRequest* /*request*/, ::flwr::proto::PingResponse* /*response*/) { return nullptr; } + }; + template class WithCallbackMethod_PullTaskIns : public BaseClass { private: void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithCallbackMethod_PullTaskIns() { - ::grpc::Service::MarkMethodCallback(2, + ::grpc::Service::MarkMethodCallback(3, new ::grpc::internal::CallbackUnaryHandler< ::flwr::proto::PullTaskInsRequest, ::flwr::proto::PullTaskInsResponse>( [this]( ::grpc::CallbackServerContext* context, const ::flwr::proto::PullTaskInsRequest* request, ::flwr::proto::PullTaskInsResponse* response) { return this->PullTaskIns(context, request, response); }));} void SetMessageAllocatorFor_PullTaskIns( ::grpc::MessageAllocator< ::flwr::proto::PullTaskInsRequest, ::flwr::proto::PullTaskInsResponse>* allocator) { - ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(2); + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(3); static_cast<::grpc::internal::CallbackUnaryHandler< ::flwr::proto::PullTaskInsRequest, ::flwr::proto::PullTaskInsResponse>*>(handler) ->SetMessageAllocator(allocator); } @@ -368,13 +483,13 @@ class Fleet final { void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithCallbackMethod_PushTaskRes() { - ::grpc::Service::MarkMethodCallback(3, + ::grpc::Service::MarkMethodCallback(4, new ::grpc::internal::CallbackUnaryHandler< ::flwr::proto::PushTaskResRequest, ::flwr::proto::PushTaskResResponse>( [this]( ::grpc::CallbackServerContext* context, const ::flwr::proto::PushTaskResRequest* request, ::flwr::proto::PushTaskResResponse* response) { return this->PushTaskRes(context, request, response); }));} void SetMessageAllocatorFor_PushTaskRes( ::grpc::MessageAllocator< ::flwr::proto::PushTaskResRequest, ::flwr::proto::PushTaskResResponse>* allocator) { - ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(3); + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(4); static_cast<::grpc::internal::CallbackUnaryHandler< ::flwr::proto::PushTaskResRequest, ::flwr::proto::PushTaskResResponse>*>(handler) ->SetMessageAllocator(allocator); } @@ -389,7 +504,34 @@ class Fleet final { virtual ::grpc::ServerUnaryReactor* PushTaskRes( ::grpc::CallbackServerContext* /*context*/, const ::flwr::proto::PushTaskResRequest* /*request*/, ::flwr::proto::PushTaskResResponse* /*response*/) { return nullptr; } }; - typedef WithCallbackMethod_CreateNode > > > CallbackService; + template + class WithCallbackMethod_GetRun : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithCallbackMethod_GetRun() { + ::grpc::Service::MarkMethodCallback(5, + new ::grpc::internal::CallbackUnaryHandler< ::flwr::proto::GetRunRequest, ::flwr::proto::GetRunResponse>( + [this]( + ::grpc::CallbackServerContext* context, const ::flwr::proto::GetRunRequest* request, ::flwr::proto::GetRunResponse* response) { return this->GetRun(context, request, response); }));} + void SetMessageAllocatorFor_GetRun( + ::grpc::MessageAllocator< ::flwr::proto::GetRunRequest, ::flwr::proto::GetRunResponse>* allocator) { + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(5); + static_cast<::grpc::internal::CallbackUnaryHandler< ::flwr::proto::GetRunRequest, ::flwr::proto::GetRunResponse>*>(handler) + ->SetMessageAllocator(allocator); + } + ~WithCallbackMethod_GetRun() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status GetRun(::grpc::ServerContext* /*context*/, const ::flwr::proto::GetRunRequest* /*request*/, ::flwr::proto::GetRunResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual ::grpc::ServerUnaryReactor* GetRun( + ::grpc::CallbackServerContext* /*context*/, const ::flwr::proto::GetRunRequest* /*request*/, ::flwr::proto::GetRunResponse* /*response*/) { return nullptr; } + }; + typedef WithCallbackMethod_CreateNode > > > > > CallbackService; typedef CallbackService ExperimentalCallbackService; template class WithGenericMethod_CreateNode : public BaseClass { @@ -426,12 +568,29 @@ class Fleet final { } }; template + class WithGenericMethod_Ping : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_Ping() { + ::grpc::Service::MarkMethodGeneric(2); + } + ~WithGenericMethod_Ping() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Ping(::grpc::ServerContext* /*context*/, const ::flwr::proto::PingRequest* /*request*/, ::flwr::proto::PingResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template class WithGenericMethod_PullTaskIns : public BaseClass { private: void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithGenericMethod_PullTaskIns() { - ::grpc::Service::MarkMethodGeneric(2); + ::grpc::Service::MarkMethodGeneric(3); } ~WithGenericMethod_PullTaskIns() override { BaseClassMustBeDerivedFromService(this); @@ -448,7 +607,7 @@ class Fleet final { void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithGenericMethod_PushTaskRes() { - ::grpc::Service::MarkMethodGeneric(3); + ::grpc::Service::MarkMethodGeneric(4); } ~WithGenericMethod_PushTaskRes() override { BaseClassMustBeDerivedFromService(this); @@ -460,6 +619,23 @@ class Fleet final { } }; template + class WithGenericMethod_GetRun : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_GetRun() { + ::grpc::Service::MarkMethodGeneric(5); + } + ~WithGenericMethod_GetRun() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status GetRun(::grpc::ServerContext* /*context*/, const ::flwr::proto::GetRunRequest* /*request*/, ::flwr::proto::GetRunResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template class WithRawMethod_CreateNode : public BaseClass { private: void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} @@ -500,12 +676,32 @@ class Fleet final { } }; template + class WithRawMethod_Ping : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_Ping() { + ::grpc::Service::MarkMethodRaw(2); + } + ~WithRawMethod_Ping() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Ping(::grpc::ServerContext* /*context*/, const ::flwr::proto::PingRequest* /*request*/, ::flwr::proto::PingResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestPing(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(2, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template class WithRawMethod_PullTaskIns : public BaseClass { private: void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithRawMethod_PullTaskIns() { - ::grpc::Service::MarkMethodRaw(2); + ::grpc::Service::MarkMethodRaw(3); } ~WithRawMethod_PullTaskIns() override { BaseClassMustBeDerivedFromService(this); @@ -516,7 +712,7 @@ class Fleet final { return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } void RequestPullTaskIns(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(2, context, request, response, new_call_cq, notification_cq, tag); + ::grpc::Service::RequestAsyncUnary(3, context, request, response, new_call_cq, notification_cq, tag); } }; template @@ -525,7 +721,7 @@ class Fleet final { void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithRawMethod_PushTaskRes() { - ::grpc::Service::MarkMethodRaw(3); + ::grpc::Service::MarkMethodRaw(4); } ~WithRawMethod_PushTaskRes() override { BaseClassMustBeDerivedFromService(this); @@ -536,7 +732,27 @@ class Fleet final { return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } void RequestPushTaskRes(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(3, context, request, response, new_call_cq, notification_cq, tag); + ::grpc::Service::RequestAsyncUnary(4, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithRawMethod_GetRun : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_GetRun() { + ::grpc::Service::MarkMethodRaw(5); + } + ~WithRawMethod_GetRun() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status GetRun(::grpc::ServerContext* /*context*/, const ::flwr::proto::GetRunRequest* /*request*/, ::flwr::proto::GetRunResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestGetRun(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(5, context, request, response, new_call_cq, notification_cq, tag); } }; template @@ -584,12 +800,34 @@ class Fleet final { ::grpc::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) { return nullptr; } }; template + class WithRawCallbackMethod_Ping : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawCallbackMethod_Ping() { + ::grpc::Service::MarkMethodRawCallback(2, + new ::grpc::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this]( + ::grpc::CallbackServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response) { return this->Ping(context, request, response); })); + } + ~WithRawCallbackMethod_Ping() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Ping(::grpc::ServerContext* /*context*/, const ::flwr::proto::PingRequest* /*request*/, ::flwr::proto::PingResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual ::grpc::ServerUnaryReactor* Ping( + ::grpc::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) { return nullptr; } + }; + template class WithRawCallbackMethod_PullTaskIns : public BaseClass { private: void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithRawCallbackMethod_PullTaskIns() { - ::grpc::Service::MarkMethodRawCallback(2, + ::grpc::Service::MarkMethodRawCallback(3, new ::grpc::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( [this]( ::grpc::CallbackServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response) { return this->PullTaskIns(context, request, response); })); @@ -611,7 +849,7 @@ class Fleet final { void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithRawCallbackMethod_PushTaskRes() { - ::grpc::Service::MarkMethodRawCallback(3, + ::grpc::Service::MarkMethodRawCallback(4, new ::grpc::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( [this]( ::grpc::CallbackServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response) { return this->PushTaskRes(context, request, response); })); @@ -628,6 +866,28 @@ class Fleet final { ::grpc::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) { return nullptr; } }; template + class WithRawCallbackMethod_GetRun : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawCallbackMethod_GetRun() { + ::grpc::Service::MarkMethodRawCallback(5, + new ::grpc::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this]( + ::grpc::CallbackServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response) { return this->GetRun(context, request, response); })); + } + ~WithRawCallbackMethod_GetRun() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status GetRun(::grpc::ServerContext* /*context*/, const ::flwr::proto::GetRunRequest* /*request*/, ::flwr::proto::GetRunResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual ::grpc::ServerUnaryReactor* GetRun( + ::grpc::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) { return nullptr; } + }; + template class WithStreamedUnaryMethod_CreateNode : public BaseClass { private: void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} @@ -682,12 +942,39 @@ class Fleet final { virtual ::grpc::Status StreamedDeleteNode(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::flwr::proto::DeleteNodeRequest,::flwr::proto::DeleteNodeResponse>* server_unary_streamer) = 0; }; template + class WithStreamedUnaryMethod_Ping : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_Ping() { + ::grpc::Service::MarkMethodStreamed(2, + new ::grpc::internal::StreamedUnaryHandler< + ::flwr::proto::PingRequest, ::flwr::proto::PingResponse>( + [this](::grpc::ServerContext* context, + ::grpc::ServerUnaryStreamer< + ::flwr::proto::PingRequest, ::flwr::proto::PingResponse>* streamer) { + return this->StreamedPing(context, + streamer); + })); + } + ~WithStreamedUnaryMethod_Ping() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status Ping(::grpc::ServerContext* /*context*/, const ::flwr::proto::PingRequest* /*request*/, ::flwr::proto::PingResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedPing(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::flwr::proto::PingRequest,::flwr::proto::PingResponse>* server_unary_streamer) = 0; + }; + template class WithStreamedUnaryMethod_PullTaskIns : public BaseClass { private: void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithStreamedUnaryMethod_PullTaskIns() { - ::grpc::Service::MarkMethodStreamed(2, + ::grpc::Service::MarkMethodStreamed(3, new ::grpc::internal::StreamedUnaryHandler< ::flwr::proto::PullTaskInsRequest, ::flwr::proto::PullTaskInsResponse>( [this](::grpc::ServerContext* context, @@ -714,7 +1001,7 @@ class Fleet final { void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithStreamedUnaryMethod_PushTaskRes() { - ::grpc::Service::MarkMethodStreamed(3, + ::grpc::Service::MarkMethodStreamed(4, new ::grpc::internal::StreamedUnaryHandler< ::flwr::proto::PushTaskResRequest, ::flwr::proto::PushTaskResResponse>( [this](::grpc::ServerContext* context, @@ -735,9 +1022,36 @@ class Fleet final { // replace default version of method with streamed unary virtual ::grpc::Status StreamedPushTaskRes(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::flwr::proto::PushTaskResRequest,::flwr::proto::PushTaskResResponse>* server_unary_streamer) = 0; }; - typedef WithStreamedUnaryMethod_CreateNode > > > StreamedUnaryService; + template + class WithStreamedUnaryMethod_GetRun : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_GetRun() { + ::grpc::Service::MarkMethodStreamed(5, + new ::grpc::internal::StreamedUnaryHandler< + ::flwr::proto::GetRunRequest, ::flwr::proto::GetRunResponse>( + [this](::grpc::ServerContext* context, + ::grpc::ServerUnaryStreamer< + ::flwr::proto::GetRunRequest, ::flwr::proto::GetRunResponse>* streamer) { + return this->StreamedGetRun(context, + streamer); + })); + } + ~WithStreamedUnaryMethod_GetRun() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status GetRun(::grpc::ServerContext* /*context*/, const ::flwr::proto::GetRunRequest* /*request*/, ::flwr::proto::GetRunResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedGetRun(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::flwr::proto::GetRunRequest,::flwr::proto::GetRunResponse>* server_unary_streamer) = 0; + }; + typedef WithStreamedUnaryMethod_CreateNode > > > > > StreamedUnaryService; typedef Service SplitStreamedService; - typedef WithStreamedUnaryMethod_CreateNode > > > StreamedService; + typedef WithStreamedUnaryMethod_CreateNode > > > > > StreamedService; }; } // namespace proto diff --git a/src/cc/flwr/include/flwr/proto/fleet.pb.cc b/src/cc/flwr/include/flwr/proto/fleet.pb.cc index 302331374db1..d221658623c3 100644 --- a/src/cc/flwr/include/flwr/proto/fleet.pb.cc +++ b/src/cc/flwr/include/flwr/proto/fleet.pb.cc @@ -19,7 +19,8 @@ PROTOBUF_PRAGMA_INIT_SEG namespace flwr { namespace proto { constexpr CreateNodeRequest::CreateNodeRequest( - ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized){} + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : ping_interval_(0){} struct CreateNodeRequestDefaultTypeInternal { constexpr CreateNodeRequestDefaultTypeInternal() : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} @@ -64,6 +65,31 @@ struct DeleteNodeResponseDefaultTypeInternal { }; }; PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT DeleteNodeResponseDefaultTypeInternal _DeleteNodeResponse_default_instance_; +constexpr PingRequest::PingRequest( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : node_(nullptr) + , ping_interval_(0){} +struct PingRequestDefaultTypeInternal { + constexpr PingRequestDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~PingRequestDefaultTypeInternal() {} + union { + PingRequest _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PingRequestDefaultTypeInternal _PingRequest_default_instance_; +constexpr PingResponse::PingResponse( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : success_(false){} +struct PingResponseDefaultTypeInternal { + constexpr PingResponseDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~PingResponseDefaultTypeInternal() {} + union { + PingResponse _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PingResponseDefaultTypeInternal _PingResponse_default_instance_; constexpr PullTaskInsRequest::PullTaskInsRequest( ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) : task_ids_() @@ -126,6 +152,44 @@ struct PushTaskResResponseDefaultTypeInternal { }; }; PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PushTaskResResponseDefaultTypeInternal _PushTaskResResponse_default_instance_; +constexpr Run::Run( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : fab_id_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string) + , fab_version_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string) + , run_id_(int64_t{0}){} +struct RunDefaultTypeInternal { + constexpr RunDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~RunDefaultTypeInternal() {} + union { + Run _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT RunDefaultTypeInternal _Run_default_instance_; +constexpr GetRunRequest::GetRunRequest( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : run_id_(int64_t{0}){} +struct GetRunRequestDefaultTypeInternal { + constexpr GetRunRequestDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~GetRunRequestDefaultTypeInternal() {} + union { + GetRunRequest _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT GetRunRequestDefaultTypeInternal _GetRunRequest_default_instance_; +constexpr GetRunResponse::GetRunResponse( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : run_(nullptr){} +struct GetRunResponseDefaultTypeInternal { + constexpr GetRunResponseDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~GetRunResponseDefaultTypeInternal() {} + union { + GetRunResponse _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT GetRunResponseDefaultTypeInternal _GetRunResponse_default_instance_; constexpr Reconnect::Reconnect( ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) : reconnect_(uint64_t{0u}){} @@ -140,7 +204,7 @@ struct ReconnectDefaultTypeInternal { PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT ReconnectDefaultTypeInternal _Reconnect_default_instance_; } // namespace proto } // namespace flwr -static ::PROTOBUF_NAMESPACE_ID::Metadata file_level_metadata_flwr_2fproto_2ffleet_2eproto[10]; +static ::PROTOBUF_NAMESPACE_ID::Metadata file_level_metadata_flwr_2fproto_2ffleet_2eproto[15]; static constexpr ::PROTOBUF_NAMESPACE_ID::EnumDescriptor const** file_level_enum_descriptors_flwr_2fproto_2ffleet_2eproto = nullptr; static constexpr ::PROTOBUF_NAMESPACE_ID::ServiceDescriptor const** file_level_service_descriptors_flwr_2fproto_2ffleet_2eproto = nullptr; @@ -151,6 +215,7 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_flwr_2fproto_2ffleet_2eproto:: ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::CreateNodeRequest, ping_interval_), ~0u, // no _has_bits_ PROTOBUF_FIELD_OFFSET(::flwr::proto::CreateNodeResponse, _internal_metadata_), ~0u, // no _extensions_ @@ -172,6 +237,21 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_flwr_2fproto_2ffleet_2eproto:: ~0u, // no _weak_field_map_ ~0u, // no _inlined_string_donated_ ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::PingRequest, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::PingRequest, node_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::PingRequest, ping_interval_), + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::PingResponse, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::PingResponse, success_), + ~0u, // no _has_bits_ PROTOBUF_FIELD_OFFSET(::flwr::proto::PullTaskInsRequest, _internal_metadata_), ~0u, // no _extensions_ ~0u, // no _oneof_case_ @@ -213,6 +293,29 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_flwr_2fproto_2ffleet_2eproto:: PROTOBUF_FIELD_OFFSET(::flwr::proto::PushTaskResResponse, reconnect_), PROTOBUF_FIELD_OFFSET(::flwr::proto::PushTaskResResponse, results_), ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::Run, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::Run, run_id_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::Run, fab_id_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::Run, fab_version_), + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::GetRunRequest, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::GetRunRequest, run_id_), + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::GetRunResponse, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::GetRunResponse, run_), + ~0u, // no _has_bits_ PROTOBUF_FIELD_OFFSET(::flwr::proto::Reconnect, _internal_metadata_), ~0u, // no _extensions_ ~0u, // no _oneof_case_ @@ -222,15 +325,20 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_flwr_2fproto_2ffleet_2eproto:: }; static const ::PROTOBUF_NAMESPACE_ID::internal::MigrationSchema schemas[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { { 0, -1, -1, sizeof(::flwr::proto::CreateNodeRequest)}, - { 6, -1, -1, sizeof(::flwr::proto::CreateNodeResponse)}, - { 13, -1, -1, sizeof(::flwr::proto::DeleteNodeRequest)}, - { 20, -1, -1, sizeof(::flwr::proto::DeleteNodeResponse)}, - { 26, -1, -1, sizeof(::flwr::proto::PullTaskInsRequest)}, - { 34, -1, -1, sizeof(::flwr::proto::PullTaskInsResponse)}, - { 42, -1, -1, sizeof(::flwr::proto::PushTaskResRequest)}, - { 49, 57, -1, sizeof(::flwr::proto::PushTaskResResponse_ResultsEntry_DoNotUse)}, - { 59, -1, -1, sizeof(::flwr::proto::PushTaskResResponse)}, - { 67, -1, -1, sizeof(::flwr::proto::Reconnect)}, + { 7, -1, -1, sizeof(::flwr::proto::CreateNodeResponse)}, + { 14, -1, -1, sizeof(::flwr::proto::DeleteNodeRequest)}, + { 21, -1, -1, sizeof(::flwr::proto::DeleteNodeResponse)}, + { 27, -1, -1, sizeof(::flwr::proto::PingRequest)}, + { 35, -1, -1, sizeof(::flwr::proto::PingResponse)}, + { 42, -1, -1, sizeof(::flwr::proto::PullTaskInsRequest)}, + { 50, -1, -1, sizeof(::flwr::proto::PullTaskInsResponse)}, + { 58, -1, -1, sizeof(::flwr::proto::PushTaskResRequest)}, + { 65, 73, -1, sizeof(::flwr::proto::PushTaskResResponse_ResultsEntry_DoNotUse)}, + { 75, -1, -1, sizeof(::flwr::proto::PushTaskResResponse)}, + { 83, -1, -1, sizeof(::flwr::proto::Run)}, + { 92, -1, -1, sizeof(::flwr::proto::GetRunRequest)}, + { 99, -1, -1, sizeof(::flwr::proto::GetRunResponse)}, + { 106, -1, -1, sizeof(::flwr::proto::Reconnect)}, }; static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] = { @@ -238,41 +346,56 @@ static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] = reinterpret_cast(&::flwr::proto::_CreateNodeResponse_default_instance_), reinterpret_cast(&::flwr::proto::_DeleteNodeRequest_default_instance_), reinterpret_cast(&::flwr::proto::_DeleteNodeResponse_default_instance_), + reinterpret_cast(&::flwr::proto::_PingRequest_default_instance_), + reinterpret_cast(&::flwr::proto::_PingResponse_default_instance_), reinterpret_cast(&::flwr::proto::_PullTaskInsRequest_default_instance_), reinterpret_cast(&::flwr::proto::_PullTaskInsResponse_default_instance_), reinterpret_cast(&::flwr::proto::_PushTaskResRequest_default_instance_), reinterpret_cast(&::flwr::proto::_PushTaskResResponse_ResultsEntry_DoNotUse_default_instance_), reinterpret_cast(&::flwr::proto::_PushTaskResResponse_default_instance_), + reinterpret_cast(&::flwr::proto::_Run_default_instance_), + reinterpret_cast(&::flwr::proto::_GetRunRequest_default_instance_), + reinterpret_cast(&::flwr::proto::_GetRunResponse_default_instance_), reinterpret_cast(&::flwr::proto::_Reconnect_default_instance_), }; const char descriptor_table_protodef_flwr_2fproto_2ffleet_2eproto[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = "\n\026flwr/proto/fleet.proto\022\nflwr.proto\032\025fl" "wr/proto/node.proto\032\025flwr/proto/task.pro" - "to\"\023\n\021CreateNodeRequest\"4\n\022CreateNodeRes" - "ponse\022\036\n\004node\030\001 \001(\0132\020.flwr.proto.Node\"3\n" - "\021DeleteNodeRequest\022\036\n\004node\030\001 \001(\0132\020.flwr." - "proto.Node\"\024\n\022DeleteNodeResponse\"F\n\022Pull" - "TaskInsRequest\022\036\n\004node\030\001 \001(\0132\020.flwr.prot" - "o.Node\022\020\n\010task_ids\030\002 \003(\t\"k\n\023PullTaskInsR" - "esponse\022(\n\treconnect\030\001 \001(\0132\025.flwr.proto." - "Reconnect\022*\n\rtask_ins_list\030\002 \003(\0132\023.flwr." - "proto.TaskIns\"@\n\022PushTaskResRequest\022*\n\rt" - "ask_res_list\030\001 \003(\0132\023.flwr.proto.TaskRes\"" - "\256\001\n\023PushTaskResResponse\022(\n\treconnect\030\001 \001" - "(\0132\025.flwr.proto.Reconnect\022=\n\007results\030\002 \003" - "(\0132,.flwr.proto.PushTaskResResponse.Resu" - "ltsEntry\032.\n\014ResultsEntry\022\013\n\003key\030\001 \001(\t\022\r\n" - "\005value\030\002 \001(\r:\0028\001\"\036\n\tReconnect\022\021\n\treconne" - "ct\030\001 \001(\0042\311\002\n\005Fleet\022M\n\nCreateNode\022\035.flwr." - "proto.CreateNodeRequest\032\036.flwr.proto.Cre" - "ateNodeResponse\"\000\022M\n\nDeleteNode\022\035.flwr.p" - "roto.DeleteNodeRequest\032\036.flwr.proto.Dele" - "teNodeResponse\"\000\022P\n\013PullTaskIns\022\036.flwr.p" - "roto.PullTaskInsRequest\032\037.flwr.proto.Pul" - "lTaskInsResponse\"\000\022P\n\013PushTaskRes\022\036.flwr" - ".proto.PushTaskResRequest\032\037.flwr.proto.P" - "ushTaskResResponse\"\000b\006proto3" + "to\"*\n\021CreateNodeRequest\022\025\n\rping_interval" + "\030\001 \001(\001\"4\n\022CreateNodeResponse\022\036\n\004node\030\001 \001" + "(\0132\020.flwr.proto.Node\"3\n\021DeleteNodeReques" + "t\022\036\n\004node\030\001 \001(\0132\020.flwr.proto.Node\"\024\n\022Del" + "eteNodeResponse\"D\n\013PingRequest\022\036\n\004node\030\001" + " \001(\0132\020.flwr.proto.Node\022\025\n\rping_interval\030" + "\002 \001(\001\"\037\n\014PingResponse\022\017\n\007success\030\001 \001(\010\"F" + "\n\022PullTaskInsRequest\022\036\n\004node\030\001 \001(\0132\020.flw" + "r.proto.Node\022\020\n\010task_ids\030\002 \003(\t\"k\n\023PullTa" + "skInsResponse\022(\n\treconnect\030\001 \001(\0132\025.flwr." + "proto.Reconnect\022*\n\rtask_ins_list\030\002 \003(\0132\023" + ".flwr.proto.TaskIns\"@\n\022PushTaskResReques" + "t\022*\n\rtask_res_list\030\001 \003(\0132\023.flwr.proto.Ta" + "skRes\"\256\001\n\023PushTaskResResponse\022(\n\treconne" + "ct\030\001 \001(\0132\025.flwr.proto.Reconnect\022=\n\007resul" + "ts\030\002 \003(\0132,.flwr.proto.PushTaskResRespons" + "e.ResultsEntry\032.\n\014ResultsEntry\022\013\n\003key\030\001 " + "\001(\t\022\r\n\005value\030\002 \001(\r:\0028\001\":\n\003Run\022\016\n\006run_id\030" + "\001 \001(\022\022\016\n\006fab_id\030\002 \001(\t\022\023\n\013fab_version\030\003 \001" + "(\t\"\037\n\rGetRunRequest\022\016\n\006run_id\030\001 \001(\022\".\n\016G" + "etRunResponse\022\034\n\003run\030\001 \001(\0132\017.flwr.proto." + "Run\"\036\n\tReconnect\022\021\n\treconnect\030\001 \001(\0042\311\003\n\005" + "Fleet\022M\n\nCreateNode\022\035.flwr.proto.CreateN" + "odeRequest\032\036.flwr.proto.CreateNodeRespon" + "se\"\000\022M\n\nDeleteNode\022\035.flwr.proto.DeleteNo" + "deRequest\032\036.flwr.proto.DeleteNodeRespons" + "e\"\000\022;\n\004Ping\022\027.flwr.proto.PingRequest\032\030.f" + "lwr.proto.PingResponse\"\000\022P\n\013PullTaskIns\022" + "\036.flwr.proto.PullTaskInsRequest\032\037.flwr.p" + "roto.PullTaskInsResponse\"\000\022P\n\013PushTaskRe" + "s\022\036.flwr.proto.PushTaskResRequest\032\037.flwr" + ".proto.PushTaskResResponse\"\000\022A\n\006GetRun\022\031" + ".flwr.proto.GetRunRequest\032\032.flwr.proto.G" + "etRunResponse\"\000b\006proto3" ; static const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable*const descriptor_table_flwr_2fproto_2ffleet_2eproto_deps[2] = { &::descriptor_table_flwr_2fproto_2fnode_2eproto, @@ -280,8 +403,8 @@ static const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable*const descriptor }; static ::PROTOBUF_NAMESPACE_ID::internal::once_flag descriptor_table_flwr_2fproto_2ffleet_2eproto_once; const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_flwr_2fproto_2ffleet_2eproto = { - false, false, 1028, descriptor_table_protodef_flwr_2fproto_2ffleet_2eproto, "flwr/proto/fleet.proto", - &descriptor_table_flwr_2fproto_2ffleet_2eproto_once, descriptor_table_flwr_2fproto_2ffleet_2eproto_deps, 2, 10, + false, false, 1423, descriptor_table_protodef_flwr_2fproto_2ffleet_2eproto, "flwr/proto/fleet.proto", + &descriptor_table_flwr_2fproto_2ffleet_2eproto_once, descriptor_table_flwr_2fproto_2ffleet_2eproto_deps, 2, 15, schemas, file_default_instances, TableStruct_flwr_2fproto_2ffleet_2eproto::offsets, file_level_metadata_flwr_2fproto_2ffleet_2eproto, file_level_enum_descriptors_flwr_2fproto_2ffleet_2eproto, file_level_service_descriptors_flwr_2fproto_2ffleet_2eproto, }; @@ -302,30 +425,169 @@ class CreateNodeRequest::_Internal { CreateNodeRequest::CreateNodeRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) - : ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase(arena, is_message_owned) { + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } // @@protoc_insertion_point(arena_constructor:flwr.proto.CreateNodeRequest) } CreateNodeRequest::CreateNodeRequest(const CreateNodeRequest& from) - : ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase() { + : ::PROTOBUF_NAMESPACE_ID::Message() { _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + ping_interval_ = from.ping_interval_; // @@protoc_insertion_point(copy_constructor:flwr.proto.CreateNodeRequest) } +void CreateNodeRequest::SharedCtor() { +ping_interval_ = 0; +} + +CreateNodeRequest::~CreateNodeRequest() { + // @@protoc_insertion_point(destructor:flwr.proto.CreateNodeRequest) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void CreateNodeRequest::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); +} + +void CreateNodeRequest::ArenaDtor(void* object) { + CreateNodeRequest* _this = reinterpret_cast< CreateNodeRequest* >(object); + (void)_this; +} +void CreateNodeRequest::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +} +void CreateNodeRequest::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void CreateNodeRequest::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.CreateNodeRequest) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + ping_interval_ = 0; + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* CreateNodeRequest::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // double ping_interval = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 9)) { + ping_interval_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad(ptr); + ptr += sizeof(double); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* CreateNodeRequest::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.CreateNodeRequest) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // double ping_interval = 1; + if (!(this->_internal_ping_interval() <= 0 && this->_internal_ping_interval() >= 0)) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteDoubleToArray(1, this->_internal_ping_interval(), target); + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.CreateNodeRequest) + return target; +} + +size_t CreateNodeRequest::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.CreateNodeRequest) + size_t total_size = 0; + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + // double ping_interval = 1; + if (!(this->_internal_ping_interval() <= 0 && this->_internal_ping_interval() >= 0)) { + total_size += 1 + 8; + } + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} const ::PROTOBUF_NAMESPACE_ID::Message::ClassData CreateNodeRequest::_class_data_ = { - ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::CopyImpl, - ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::MergeImpl, + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + CreateNodeRequest::MergeImpl }; const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*CreateNodeRequest::GetClassData() const { return &_class_data_; } +void CreateNodeRequest::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + +void CreateNodeRequest::MergeFrom(const CreateNodeRequest& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.CreateNodeRequest) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + if (!(from._internal_ping_interval() <= 0 && from._internal_ping_interval() >= 0)) { + _internal_set_ping_interval(from._internal_ping_interval()); + } + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} +void CreateNodeRequest::CopyFrom(const CreateNodeRequest& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.CreateNodeRequest) + if (&from == this) return; + Clear(); + MergeFrom(from); +} +bool CreateNodeRequest::IsInitialized() const { + return true; +} +void CreateNodeRequest::InternalSwap(CreateNodeRequest* other) { + using std::swap; + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + swap(ping_interval_, other->ping_interval_); +} ::PROTOBUF_NAMESPACE_ID::Metadata CreateNodeRequest::GetMetadata() const { return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( @@ -776,84 +1038,86 @@ ::PROTOBUF_NAMESPACE_ID::Metadata DeleteNodeResponse::GetMetadata() const { // =================================================================== -class PullTaskInsRequest::_Internal { +class PingRequest::_Internal { public: - static const ::flwr::proto::Node& node(const PullTaskInsRequest* msg); + static const ::flwr::proto::Node& node(const PingRequest* msg); }; const ::flwr::proto::Node& -PullTaskInsRequest::_Internal::node(const PullTaskInsRequest* msg) { +PingRequest::_Internal::node(const PingRequest* msg) { return *msg->node_; } -void PullTaskInsRequest::clear_node() { +void PingRequest::clear_node() { if (GetArenaForAllocation() == nullptr && node_ != nullptr) { delete node_; } node_ = nullptr; } -PullTaskInsRequest::PullTaskInsRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, +PingRequest::PingRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) - : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), - task_ids_(arena) { + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { SharedCtor(); if (!is_message_owned) { RegisterArenaDtor(arena); } - // @@protoc_insertion_point(arena_constructor:flwr.proto.PullTaskInsRequest) + // @@protoc_insertion_point(arena_constructor:flwr.proto.PingRequest) } -PullTaskInsRequest::PullTaskInsRequest(const PullTaskInsRequest& from) - : ::PROTOBUF_NAMESPACE_ID::Message(), - task_ids_(from.task_ids_) { +PingRequest::PingRequest(const PingRequest& from) + : ::PROTOBUF_NAMESPACE_ID::Message() { _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); if (from._internal_has_node()) { node_ = new ::flwr::proto::Node(*from.node_); } else { node_ = nullptr; } - // @@protoc_insertion_point(copy_constructor:flwr.proto.PullTaskInsRequest) + ping_interval_ = from.ping_interval_; + // @@protoc_insertion_point(copy_constructor:flwr.proto.PingRequest) } -void PullTaskInsRequest::SharedCtor() { -node_ = nullptr; +void PingRequest::SharedCtor() { +::memset(reinterpret_cast(this) + static_cast( + reinterpret_cast(&node_) - reinterpret_cast(this)), + 0, static_cast(reinterpret_cast(&ping_interval_) - + reinterpret_cast(&node_)) + sizeof(ping_interval_)); } -PullTaskInsRequest::~PullTaskInsRequest() { - // @@protoc_insertion_point(destructor:flwr.proto.PullTaskInsRequest) +PingRequest::~PingRequest() { + // @@protoc_insertion_point(destructor:flwr.proto.PingRequest) if (GetArenaForAllocation() != nullptr) return; SharedDtor(); _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } -inline void PullTaskInsRequest::SharedDtor() { +inline void PingRequest::SharedDtor() { GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); if (this != internal_default_instance()) delete node_; } -void PullTaskInsRequest::ArenaDtor(void* object) { - PullTaskInsRequest* _this = reinterpret_cast< PullTaskInsRequest* >(object); +void PingRequest::ArenaDtor(void* object) { + PingRequest* _this = reinterpret_cast< PingRequest* >(object); (void)_this; } -void PullTaskInsRequest::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +void PingRequest::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { } -void PullTaskInsRequest::SetCachedSize(int size) const { +void PingRequest::SetCachedSize(int size) const { _cached_size_.Set(size); } -void PullTaskInsRequest::Clear() { -// @@protoc_insertion_point(message_clear_start:flwr.proto.PullTaskInsRequest) +void PingRequest::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.PingRequest) ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void) cached_has_bits; - task_ids_.Clear(); if (GetArenaForAllocation() == nullptr && node_ != nullptr) { delete node_; } node_ = nullptr; + ping_interval_ = 0; _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } -const char* PullTaskInsRequest::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +const char* PingRequest::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure while (!ctx->Done(&ptr)) { ::PROTOBUF_NAMESPACE_ID::uint32 tag; @@ -867,18 +1131,11 @@ const char* PullTaskInsRequest::_InternalParse(const char* ptr, ::PROTOBUF_NAMES } else goto handle_unusual; continue; - // repeated string task_ids = 2; + // double ping_interval = 2; case 2: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) { - ptr -= 1; - do { - ptr += 1; - auto str = _internal_add_task_ids(); - ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); - CHK_(::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "flwr.proto.PullTaskInsRequest.task_ids")); - CHK_(ptr); - if (!ctx->DataAvailable(ptr)) break; - } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<18>(ptr)); + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 17)) { + ping_interval_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad(ptr); + ptr += sizeof(double); } else goto handle_unusual; continue; @@ -905,9 +1162,9 @@ const char* PullTaskInsRequest::_InternalParse(const char* ptr, ::PROTOBUF_NAMES #undef CHK_ } -::PROTOBUF_NAMESPACE_ID::uint8* PullTaskInsRequest::_InternalSerialize( +::PROTOBUF_NAMESPACE_ID::uint8* PingRequest::_InternalSerialize( ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { - // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.PullTaskInsRequest) + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.PingRequest) ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; (void) cached_has_bits; @@ -919,40 +1176,28 @@ ::PROTOBUF_NAMESPACE_ID::uint8* PullTaskInsRequest::_InternalSerialize( 1, _Internal::node(this), target, stream); } - // repeated string task_ids = 2; - for (int i = 0, n = this->_internal_task_ids_size(); i < n; i++) { - const auto& s = this->_internal_task_ids(i); - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( - s.data(), static_cast(s.length()), - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, - "flwr.proto.PullTaskInsRequest.task_ids"); - target = stream->WriteString(2, s, target); + // double ping_interval = 2; + if (!(this->_internal_ping_interval() <= 0 && this->_internal_ping_interval() >= 0)) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteDoubleToArray(2, this->_internal_ping_interval(), target); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); } - // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.PullTaskInsRequest) + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.PingRequest) return target; } -size_t PullTaskInsRequest::ByteSizeLong() const { -// @@protoc_insertion_point(message_byte_size_start:flwr.proto.PullTaskInsRequest) +size_t PingRequest::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.PingRequest) size_t total_size = 0; ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void) cached_has_bits; - // repeated string task_ids = 2; - total_size += 1 * - ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(task_ids_.size()); - for (int i = 0, n = task_ids_.size(); i < n; i++) { - total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( - task_ids_.Get(i)); - } - // .flwr.proto.Node node = 1; if (this->_internal_has_node()) { total_size += 1 + @@ -960,54 +1205,65 @@ size_t PullTaskInsRequest::ByteSizeLong() const { *node_); } + // double ping_interval = 2; + if (!(this->_internal_ping_interval() <= 0 && this->_internal_ping_interval() >= 0)) { + total_size += 1 + 8; + } + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); } -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData PullTaskInsRequest::_class_data_ = { +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData PingRequest::_class_data_ = { ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, - PullTaskInsRequest::MergeImpl + PingRequest::MergeImpl }; -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*PullTaskInsRequest::GetClassData() const { return &_class_data_; } +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*PingRequest::GetClassData() const { return &_class_data_; } -void PullTaskInsRequest::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, +void PingRequest::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from) { - static_cast(to)->MergeFrom( - static_cast(from)); + static_cast(to)->MergeFrom( + static_cast(from)); } -void PullTaskInsRequest::MergeFrom(const PullTaskInsRequest& from) { -// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.PullTaskInsRequest) +void PingRequest::MergeFrom(const PingRequest& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.PingRequest) GOOGLE_DCHECK_NE(&from, this); ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; (void) cached_has_bits; - task_ids_.MergeFrom(from.task_ids_); if (from._internal_has_node()) { _internal_mutable_node()->::flwr::proto::Node::MergeFrom(from._internal_node()); } + if (!(from._internal_ping_interval() <= 0 && from._internal_ping_interval() >= 0)) { + _internal_set_ping_interval(from._internal_ping_interval()); + } _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); } -void PullTaskInsRequest::CopyFrom(const PullTaskInsRequest& from) { -// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.PullTaskInsRequest) +void PingRequest::CopyFrom(const PingRequest& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.PingRequest) if (&from == this) return; Clear(); MergeFrom(from); } -bool PullTaskInsRequest::IsInitialized() const { +bool PingRequest::IsInitialized() const { return true; } -void PullTaskInsRequest::InternalSwap(PullTaskInsRequest* other) { +void PingRequest::InternalSwap(PingRequest* other) { using std::swap; _internal_metadata_.InternalSwap(&other->_internal_metadata_); - task_ids_.InternalSwap(&other->task_ids_); - swap(node_, other->node_); + ::PROTOBUF_NAMESPACE_ID::internal::memswap< + PROTOBUF_FIELD_OFFSET(PingRequest, ping_interval_) + + sizeof(PingRequest::ping_interval_) + - PROTOBUF_FIELD_OFFSET(PingRequest, node_)>( + reinterpret_cast(&node_), + reinterpret_cast(&other->node_)); } -::PROTOBUF_NAMESPACE_ID::Metadata PullTaskInsRequest::GetMetadata() const { +::PROTOBUF_NAMESPACE_ID::Metadata PingRequest::GetMetadata() const { return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( &descriptor_table_flwr_2fproto_2ffleet_2eproto_getter, &descriptor_table_flwr_2fproto_2ffleet_2eproto_once, file_level_metadata_flwr_2fproto_2ffleet_2eproto[4]); @@ -1015,101 +1271,284 @@ ::PROTOBUF_NAMESPACE_ID::Metadata PullTaskInsRequest::GetMetadata() const { // =================================================================== -class PullTaskInsResponse::_Internal { +class PingResponse::_Internal { public: - static const ::flwr::proto::Reconnect& reconnect(const PullTaskInsResponse* msg); }; -const ::flwr::proto::Reconnect& -PullTaskInsResponse::_Internal::reconnect(const PullTaskInsResponse* msg) { - return *msg->reconnect_; -} -void PullTaskInsResponse::clear_task_ins_list() { - task_ins_list_.Clear(); -} -PullTaskInsResponse::PullTaskInsResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, +PingResponse::PingResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) - : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), - task_ins_list_(arena) { + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { SharedCtor(); if (!is_message_owned) { RegisterArenaDtor(arena); } - // @@protoc_insertion_point(arena_constructor:flwr.proto.PullTaskInsResponse) + // @@protoc_insertion_point(arena_constructor:flwr.proto.PingResponse) } -PullTaskInsResponse::PullTaskInsResponse(const PullTaskInsResponse& from) - : ::PROTOBUF_NAMESPACE_ID::Message(), - task_ins_list_(from.task_ins_list_) { +PingResponse::PingResponse(const PingResponse& from) + : ::PROTOBUF_NAMESPACE_ID::Message() { _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); - if (from._internal_has_reconnect()) { - reconnect_ = new ::flwr::proto::Reconnect(*from.reconnect_); + success_ = from.success_; + // @@protoc_insertion_point(copy_constructor:flwr.proto.PingResponse) +} + +void PingResponse::SharedCtor() { +success_ = false; +} + +PingResponse::~PingResponse() { + // @@protoc_insertion_point(destructor:flwr.proto.PingResponse) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void PingResponse::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); +} + +void PingResponse::ArenaDtor(void* object) { + PingResponse* _this = reinterpret_cast< PingResponse* >(object); + (void)_this; +} +void PingResponse::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +} +void PingResponse::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void PingResponse::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.PingResponse) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + success_ = false; + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* PingResponse::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // bool success = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 8)) { + success_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); + CHK_(ptr); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* PingResponse::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.PingResponse) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // bool success = 1; + if (this->_internal_success() != 0) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(1, this->_internal_success(), target); + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.PingResponse) + return target; +} + +size_t PingResponse::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.PingResponse) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // bool success = 1; + if (this->_internal_success() != 0) { + total_size += 1 + 1; + } + + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData PingResponse::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + PingResponse::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*PingResponse::GetClassData() const { return &_class_data_; } + +void PingResponse::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void PingResponse::MergeFrom(const PingResponse& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.PingResponse) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + if (from._internal_success() != 0) { + _internal_set_success(from._internal_success()); + } + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void PingResponse::CopyFrom(const PingResponse& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.PingResponse) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool PingResponse::IsInitialized() const { + return true; +} + +void PingResponse::InternalSwap(PingResponse* other) { + using std::swap; + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + swap(success_, other->success_); +} + +::PROTOBUF_NAMESPACE_ID::Metadata PingResponse::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2ffleet_2eproto_getter, &descriptor_table_flwr_2fproto_2ffleet_2eproto_once, + file_level_metadata_flwr_2fproto_2ffleet_2eproto[5]); +} + +// =================================================================== + +class PullTaskInsRequest::_Internal { + public: + static const ::flwr::proto::Node& node(const PullTaskInsRequest* msg); +}; + +const ::flwr::proto::Node& +PullTaskInsRequest::_Internal::node(const PullTaskInsRequest* msg) { + return *msg->node_; +} +void PullTaskInsRequest::clear_node() { + if (GetArenaForAllocation() == nullptr && node_ != nullptr) { + delete node_; + } + node_ = nullptr; +} +PullTaskInsRequest::PullTaskInsRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), + task_ids_(arena) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.PullTaskInsRequest) +} +PullTaskInsRequest::PullTaskInsRequest(const PullTaskInsRequest& from) + : ::PROTOBUF_NAMESPACE_ID::Message(), + task_ids_(from.task_ids_) { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + if (from._internal_has_node()) { + node_ = new ::flwr::proto::Node(*from.node_); } else { - reconnect_ = nullptr; + node_ = nullptr; } - // @@protoc_insertion_point(copy_constructor:flwr.proto.PullTaskInsResponse) + // @@protoc_insertion_point(copy_constructor:flwr.proto.PullTaskInsRequest) } -void PullTaskInsResponse::SharedCtor() { -reconnect_ = nullptr; +void PullTaskInsRequest::SharedCtor() { +node_ = nullptr; } -PullTaskInsResponse::~PullTaskInsResponse() { - // @@protoc_insertion_point(destructor:flwr.proto.PullTaskInsResponse) +PullTaskInsRequest::~PullTaskInsRequest() { + // @@protoc_insertion_point(destructor:flwr.proto.PullTaskInsRequest) if (GetArenaForAllocation() != nullptr) return; SharedDtor(); _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } -inline void PullTaskInsResponse::SharedDtor() { +inline void PullTaskInsRequest::SharedDtor() { GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); - if (this != internal_default_instance()) delete reconnect_; + if (this != internal_default_instance()) delete node_; } -void PullTaskInsResponse::ArenaDtor(void* object) { - PullTaskInsResponse* _this = reinterpret_cast< PullTaskInsResponse* >(object); +void PullTaskInsRequest::ArenaDtor(void* object) { + PullTaskInsRequest* _this = reinterpret_cast< PullTaskInsRequest* >(object); (void)_this; } -void PullTaskInsResponse::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +void PullTaskInsRequest::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { } -void PullTaskInsResponse::SetCachedSize(int size) const { +void PullTaskInsRequest::SetCachedSize(int size) const { _cached_size_.Set(size); } -void PullTaskInsResponse::Clear() { -// @@protoc_insertion_point(message_clear_start:flwr.proto.PullTaskInsResponse) +void PullTaskInsRequest::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.PullTaskInsRequest) ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void) cached_has_bits; - task_ins_list_.Clear(); - if (GetArenaForAllocation() == nullptr && reconnect_ != nullptr) { - delete reconnect_; + task_ids_.Clear(); + if (GetArenaForAllocation() == nullptr && node_ != nullptr) { + delete node_; } - reconnect_ = nullptr; + node_ = nullptr; _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } -const char* PullTaskInsResponse::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +const char* PullTaskInsRequest::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure while (!ctx->Done(&ptr)) { ::PROTOBUF_NAMESPACE_ID::uint32 tag; ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); switch (tag >> 3) { - // .flwr.proto.Reconnect reconnect = 1; + // .flwr.proto.Node node = 1; case 1: if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { - ptr = ctx->ParseMessage(_internal_mutable_reconnect(), ptr); + ptr = ctx->ParseMessage(_internal_mutable_node(), ptr); CHK_(ptr); } else goto handle_unusual; continue; - // repeated .flwr.proto.TaskIns task_ins_list = 2; + // repeated string task_ids = 2; case 2: if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) { ptr -= 1; do { ptr += 1; - ptr = ctx->ParseMessage(_internal_add_task_ins_list(), ptr); + auto str = _internal_add_task_ids(); + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); + CHK_(::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "flwr.proto.PullTaskInsRequest.task_ids")); CHK_(ptr); if (!ctx->DataAvailable(ptr)) break; } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<18>(ptr)); @@ -1139,187 +1578,1149 @@ const char* PullTaskInsResponse::_InternalParse(const char* ptr, ::PROTOBUF_NAME #undef CHK_ } -::PROTOBUF_NAMESPACE_ID::uint8* PullTaskInsResponse::_InternalSerialize( +::PROTOBUF_NAMESPACE_ID::uint8* PullTaskInsRequest::_InternalSerialize( ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { - // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.PullTaskInsResponse) + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.PullTaskInsRequest) ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; (void) cached_has_bits; - // .flwr.proto.Reconnect reconnect = 1; - if (this->_internal_has_reconnect()) { + // .flwr.proto.Node node = 1; + if (this->_internal_has_node()) { target = stream->EnsureSpace(target); target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: InternalWriteMessage( - 1, _Internal::reconnect(this), target, stream); + 1, _Internal::node(this), target, stream); } - // repeated .flwr.proto.TaskIns task_ins_list = 2; - for (unsigned int i = 0, - n = static_cast(this->_internal_task_ins_list_size()); i < n; i++) { - target = stream->EnsureSpace(target); - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: - InternalWriteMessage(2, this->_internal_task_ins_list(i), target, stream); + // repeated string task_ids = 2; + for (int i = 0, n = this->_internal_task_ids_size(); i < n; i++) { + const auto& s = this->_internal_task_ids(i); + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + s.data(), static_cast(s.length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "flwr.proto.PullTaskInsRequest.task_ids"); + target = stream->WriteString(2, s, target); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); } - // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.PullTaskInsResponse) + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.PullTaskInsRequest) return target; } -size_t PullTaskInsResponse::ByteSizeLong() const { -// @@protoc_insertion_point(message_byte_size_start:flwr.proto.PullTaskInsResponse) +size_t PullTaskInsRequest::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.PullTaskInsRequest) size_t total_size = 0; ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void) cached_has_bits; - // repeated .flwr.proto.TaskIns task_ins_list = 2; - total_size += 1UL * this->_internal_task_ins_list_size(); - for (const auto& msg : this->task_ins_list_) { - total_size += - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg); + // repeated string task_ids = 2; + total_size += 1 * + ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(task_ids_.size()); + for (int i = 0, n = task_ids_.size(); i < n; i++) { + total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( + task_ids_.Get(i)); + } + + // .flwr.proto.Node node = 1; + if (this->_internal_has_node()) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( + *node_); + } + + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData PullTaskInsRequest::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + PullTaskInsRequest::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*PullTaskInsRequest::GetClassData() const { return &_class_data_; } + +void PullTaskInsRequest::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void PullTaskInsRequest::MergeFrom(const PullTaskInsRequest& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.PullTaskInsRequest) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + task_ids_.MergeFrom(from.task_ids_); + if (from._internal_has_node()) { + _internal_mutable_node()->::flwr::proto::Node::MergeFrom(from._internal_node()); + } + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void PullTaskInsRequest::CopyFrom(const PullTaskInsRequest& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.PullTaskInsRequest) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool PullTaskInsRequest::IsInitialized() const { + return true; +} + +void PullTaskInsRequest::InternalSwap(PullTaskInsRequest* other) { + using std::swap; + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + task_ids_.InternalSwap(&other->task_ids_); + swap(node_, other->node_); +} + +::PROTOBUF_NAMESPACE_ID::Metadata PullTaskInsRequest::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2ffleet_2eproto_getter, &descriptor_table_flwr_2fproto_2ffleet_2eproto_once, + file_level_metadata_flwr_2fproto_2ffleet_2eproto[6]); +} + +// =================================================================== + +class PullTaskInsResponse::_Internal { + public: + static const ::flwr::proto::Reconnect& reconnect(const PullTaskInsResponse* msg); +}; + +const ::flwr::proto::Reconnect& +PullTaskInsResponse::_Internal::reconnect(const PullTaskInsResponse* msg) { + return *msg->reconnect_; +} +void PullTaskInsResponse::clear_task_ins_list() { + task_ins_list_.Clear(); +} +PullTaskInsResponse::PullTaskInsResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), + task_ins_list_(arena) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.PullTaskInsResponse) +} +PullTaskInsResponse::PullTaskInsResponse(const PullTaskInsResponse& from) + : ::PROTOBUF_NAMESPACE_ID::Message(), + task_ins_list_(from.task_ins_list_) { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + if (from._internal_has_reconnect()) { + reconnect_ = new ::flwr::proto::Reconnect(*from.reconnect_); + } else { + reconnect_ = nullptr; + } + // @@protoc_insertion_point(copy_constructor:flwr.proto.PullTaskInsResponse) +} + +void PullTaskInsResponse::SharedCtor() { +reconnect_ = nullptr; +} + +PullTaskInsResponse::~PullTaskInsResponse() { + // @@protoc_insertion_point(destructor:flwr.proto.PullTaskInsResponse) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void PullTaskInsResponse::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); + if (this != internal_default_instance()) delete reconnect_; +} + +void PullTaskInsResponse::ArenaDtor(void* object) { + PullTaskInsResponse* _this = reinterpret_cast< PullTaskInsResponse* >(object); + (void)_this; +} +void PullTaskInsResponse::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +} +void PullTaskInsResponse::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void PullTaskInsResponse::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.PullTaskInsResponse) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + task_ins_list_.Clear(); + if (GetArenaForAllocation() == nullptr && reconnect_ != nullptr) { + delete reconnect_; + } + reconnect_ = nullptr; + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* PullTaskInsResponse::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // .flwr.proto.Reconnect reconnect = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { + ptr = ctx->ParseMessage(_internal_mutable_reconnect(), ptr); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // repeated .flwr.proto.TaskIns task_ins_list = 2; + case 2: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) { + ptr -= 1; + do { + ptr += 1; + ptr = ctx->ParseMessage(_internal_add_task_ins_list(), ptr); + CHK_(ptr); + if (!ctx->DataAvailable(ptr)) break; + } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<18>(ptr)); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* PullTaskInsResponse::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.PullTaskInsResponse) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // .flwr.proto.Reconnect reconnect = 1; + if (this->_internal_has_reconnect()) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage( + 1, _Internal::reconnect(this), target, stream); + } + + // repeated .flwr.proto.TaskIns task_ins_list = 2; + for (unsigned int i = 0, + n = static_cast(this->_internal_task_ins_list_size()); i < n; i++) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage(2, this->_internal_task_ins_list(i), target, stream); + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.PullTaskInsResponse) + return target; +} + +size_t PullTaskInsResponse::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.PullTaskInsResponse) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // repeated .flwr.proto.TaskIns task_ins_list = 2; + total_size += 1UL * this->_internal_task_ins_list_size(); + for (const auto& msg : this->task_ins_list_) { + total_size += + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg); + } + + // .flwr.proto.Reconnect reconnect = 1; + if (this->_internal_has_reconnect()) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( + *reconnect_); + } + + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData PullTaskInsResponse::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + PullTaskInsResponse::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*PullTaskInsResponse::GetClassData() const { return &_class_data_; } + +void PullTaskInsResponse::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void PullTaskInsResponse::MergeFrom(const PullTaskInsResponse& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.PullTaskInsResponse) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + task_ins_list_.MergeFrom(from.task_ins_list_); + if (from._internal_has_reconnect()) { + _internal_mutable_reconnect()->::flwr::proto::Reconnect::MergeFrom(from._internal_reconnect()); + } + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void PullTaskInsResponse::CopyFrom(const PullTaskInsResponse& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.PullTaskInsResponse) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool PullTaskInsResponse::IsInitialized() const { + return true; +} + +void PullTaskInsResponse::InternalSwap(PullTaskInsResponse* other) { + using std::swap; + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + task_ins_list_.InternalSwap(&other->task_ins_list_); + swap(reconnect_, other->reconnect_); +} + +::PROTOBUF_NAMESPACE_ID::Metadata PullTaskInsResponse::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2ffleet_2eproto_getter, &descriptor_table_flwr_2fproto_2ffleet_2eproto_once, + file_level_metadata_flwr_2fproto_2ffleet_2eproto[7]); +} + +// =================================================================== + +class PushTaskResRequest::_Internal { + public: +}; + +void PushTaskResRequest::clear_task_res_list() { + task_res_list_.Clear(); +} +PushTaskResRequest::PushTaskResRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), + task_res_list_(arena) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.PushTaskResRequest) +} +PushTaskResRequest::PushTaskResRequest(const PushTaskResRequest& from) + : ::PROTOBUF_NAMESPACE_ID::Message(), + task_res_list_(from.task_res_list_) { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + // @@protoc_insertion_point(copy_constructor:flwr.proto.PushTaskResRequest) +} + +void PushTaskResRequest::SharedCtor() { +} + +PushTaskResRequest::~PushTaskResRequest() { + // @@protoc_insertion_point(destructor:flwr.proto.PushTaskResRequest) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void PushTaskResRequest::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); +} + +void PushTaskResRequest::ArenaDtor(void* object) { + PushTaskResRequest* _this = reinterpret_cast< PushTaskResRequest* >(object); + (void)_this; +} +void PushTaskResRequest::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +} +void PushTaskResRequest::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void PushTaskResRequest::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.PushTaskResRequest) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + task_res_list_.Clear(); + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* PushTaskResRequest::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // repeated .flwr.proto.TaskRes task_res_list = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { + ptr -= 1; + do { + ptr += 1; + ptr = ctx->ParseMessage(_internal_add_task_res_list(), ptr); + CHK_(ptr); + if (!ctx->DataAvailable(ptr)) break; + } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr)); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* PushTaskResRequest::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.PushTaskResRequest) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // repeated .flwr.proto.TaskRes task_res_list = 1; + for (unsigned int i = 0, + n = static_cast(this->_internal_task_res_list_size()); i < n; i++) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage(1, this->_internal_task_res_list(i), target, stream); + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.PushTaskResRequest) + return target; +} + +size_t PushTaskResRequest::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.PushTaskResRequest) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // repeated .flwr.proto.TaskRes task_res_list = 1; + total_size += 1UL * this->_internal_task_res_list_size(); + for (const auto& msg : this->task_res_list_) { + total_size += + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg); + } + + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData PushTaskResRequest::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + PushTaskResRequest::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*PushTaskResRequest::GetClassData() const { return &_class_data_; } + +void PushTaskResRequest::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void PushTaskResRequest::MergeFrom(const PushTaskResRequest& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.PushTaskResRequest) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + task_res_list_.MergeFrom(from.task_res_list_); + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void PushTaskResRequest::CopyFrom(const PushTaskResRequest& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.PushTaskResRequest) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool PushTaskResRequest::IsInitialized() const { + return true; +} + +void PushTaskResRequest::InternalSwap(PushTaskResRequest* other) { + using std::swap; + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + task_res_list_.InternalSwap(&other->task_res_list_); +} + +::PROTOBUF_NAMESPACE_ID::Metadata PushTaskResRequest::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2ffleet_2eproto_getter, &descriptor_table_flwr_2fproto_2ffleet_2eproto_once, + file_level_metadata_flwr_2fproto_2ffleet_2eproto[8]); +} + +// =================================================================== + +PushTaskResResponse_ResultsEntry_DoNotUse::PushTaskResResponse_ResultsEntry_DoNotUse() {} +PushTaskResResponse_ResultsEntry_DoNotUse::PushTaskResResponse_ResultsEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena) + : SuperType(arena) {} +void PushTaskResResponse_ResultsEntry_DoNotUse::MergeFrom(const PushTaskResResponse_ResultsEntry_DoNotUse& other) { + MergeFromInternal(other); +} +::PROTOBUF_NAMESPACE_ID::Metadata PushTaskResResponse_ResultsEntry_DoNotUse::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2ffleet_2eproto_getter, &descriptor_table_flwr_2fproto_2ffleet_2eproto_once, + file_level_metadata_flwr_2fproto_2ffleet_2eproto[9]); +} + +// =================================================================== + +class PushTaskResResponse::_Internal { + public: + static const ::flwr::proto::Reconnect& reconnect(const PushTaskResResponse* msg); +}; + +const ::flwr::proto::Reconnect& +PushTaskResResponse::_Internal::reconnect(const PushTaskResResponse* msg) { + return *msg->reconnect_; +} +PushTaskResResponse::PushTaskResResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), + results_(arena) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.PushTaskResResponse) +} +PushTaskResResponse::PushTaskResResponse(const PushTaskResResponse& from) + : ::PROTOBUF_NAMESPACE_ID::Message() { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + results_.MergeFrom(from.results_); + if (from._internal_has_reconnect()) { + reconnect_ = new ::flwr::proto::Reconnect(*from.reconnect_); + } else { + reconnect_ = nullptr; + } + // @@protoc_insertion_point(copy_constructor:flwr.proto.PushTaskResResponse) +} + +void PushTaskResResponse::SharedCtor() { +reconnect_ = nullptr; +} + +PushTaskResResponse::~PushTaskResResponse() { + // @@protoc_insertion_point(destructor:flwr.proto.PushTaskResResponse) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void PushTaskResResponse::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); + if (this != internal_default_instance()) delete reconnect_; +} + +void PushTaskResResponse::ArenaDtor(void* object) { + PushTaskResResponse* _this = reinterpret_cast< PushTaskResResponse* >(object); + (void)_this; + _this->results_. ~MapField(); +} +inline void PushTaskResResponse::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena) { + if (arena != nullptr) { + arena->OwnCustomDestructor(this, &PushTaskResResponse::ArenaDtor); + } +} +void PushTaskResResponse::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void PushTaskResResponse::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.PushTaskResResponse) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + results_.Clear(); + if (GetArenaForAllocation() == nullptr && reconnect_ != nullptr) { + delete reconnect_; + } + reconnect_ = nullptr; + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* PushTaskResResponse::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // .flwr.proto.Reconnect reconnect = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { + ptr = ctx->ParseMessage(_internal_mutable_reconnect(), ptr); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // map results = 2; + case 2: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) { + ptr -= 1; + do { + ptr += 1; + ptr = ctx->ParseMessage(&results_, ptr); + CHK_(ptr); + if (!ctx->DataAvailable(ptr)) break; + } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<18>(ptr)); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* PushTaskResResponse::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.PushTaskResResponse) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // .flwr.proto.Reconnect reconnect = 1; + if (this->_internal_has_reconnect()) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage( + 1, _Internal::reconnect(this), target, stream); + } + + // map results = 2; + if (!this->_internal_results().empty()) { + typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >::const_pointer + ConstPtr; + typedef ConstPtr SortItem; + typedef ::PROTOBUF_NAMESPACE_ID::internal::CompareByDerefFirst Less; + struct Utf8Check { + static void Check(ConstPtr p) { + (void)p; + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + p->first.data(), static_cast(p->first.length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "flwr.proto.PushTaskResResponse.ResultsEntry.key"); + } + }; + + if (stream->IsSerializationDeterministic() && + this->_internal_results().size() > 1) { + ::std::unique_ptr items( + new SortItem[this->_internal_results().size()]); + typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >::size_type size_type; + size_type n = 0; + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >::const_iterator + it = this->_internal_results().begin(); + it != this->_internal_results().end(); ++it, ++n) { + items[static_cast(n)] = SortItem(&*it); + } + ::std::sort(&items[0], &items[static_cast(n)], Less()); + for (size_type i = 0; i < n; i++) { + target = PushTaskResResponse_ResultsEntry_DoNotUse::Funcs::InternalSerialize(2, items[static_cast(i)]->first, items[static_cast(i)]->second, target, stream); + Utf8Check::Check(&(*items[static_cast(i)])); + } + } else { + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >::const_iterator + it = this->_internal_results().begin(); + it != this->_internal_results().end(); ++it) { + target = PushTaskResResponse_ResultsEntry_DoNotUse::Funcs::InternalSerialize(2, it->first, it->second, target, stream); + Utf8Check::Check(&(*it)); + } + } + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.PushTaskResResponse) + return target; +} + +size_t PushTaskResResponse::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.PushTaskResResponse) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // map results = 2; + total_size += 1 * + ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->_internal_results_size()); + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >::const_iterator + it = this->_internal_results().begin(); + it != this->_internal_results().end(); ++it) { + total_size += PushTaskResResponse_ResultsEntry_DoNotUse::Funcs::ByteSizeLong(it->first, it->second); + } + + // .flwr.proto.Reconnect reconnect = 1; + if (this->_internal_has_reconnect()) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( + *reconnect_); + } + + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData PushTaskResResponse::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + PushTaskResResponse::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*PushTaskResResponse::GetClassData() const { return &_class_data_; } + +void PushTaskResResponse::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void PushTaskResResponse::MergeFrom(const PushTaskResResponse& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.PushTaskResResponse) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + results_.MergeFrom(from.results_); + if (from._internal_has_reconnect()) { + _internal_mutable_reconnect()->::flwr::proto::Reconnect::MergeFrom(from._internal_reconnect()); + } + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void PushTaskResResponse::CopyFrom(const PushTaskResResponse& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.PushTaskResResponse) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool PushTaskResResponse::IsInitialized() const { + return true; +} + +void PushTaskResResponse::InternalSwap(PushTaskResResponse* other) { + using std::swap; + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + results_.InternalSwap(&other->results_); + swap(reconnect_, other->reconnect_); +} + +::PROTOBUF_NAMESPACE_ID::Metadata PushTaskResResponse::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2ffleet_2eproto_getter, &descriptor_table_flwr_2fproto_2ffleet_2eproto_once, + file_level_metadata_flwr_2fproto_2ffleet_2eproto[10]); +} + +// =================================================================== + +class Run::_Internal { + public: +}; + +Run::Run(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.Run) +} +Run::Run(const Run& from) + : ::PROTOBUF_NAMESPACE_ID::Message() { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + fab_id_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + if (!from._internal_fab_id().empty()) { + fab_id_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, from._internal_fab_id(), + GetArenaForAllocation()); + } + fab_version_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + if (!from._internal_fab_version().empty()) { + fab_version_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, from._internal_fab_version(), + GetArenaForAllocation()); + } + run_id_ = from.run_id_; + // @@protoc_insertion_point(copy_constructor:flwr.proto.Run) +} + +void Run::SharedCtor() { +fab_id_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +fab_version_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +run_id_ = int64_t{0}; +} + +Run::~Run() { + // @@protoc_insertion_point(destructor:flwr.proto.Run) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void Run::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); + fab_id_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + fab_version_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} + +void Run::ArenaDtor(void* object) { + Run* _this = reinterpret_cast< Run* >(object); + (void)_this; +} +void Run::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +} +void Run::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void Run::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.Run) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + fab_id_.ClearToEmpty(); + fab_version_.ClearToEmpty(); + run_id_ = int64_t{0}; + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* Run::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // sint64 run_id = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 8)) { + run_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarintZigZag64(&ptr); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // string fab_id = 2; + case 2: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) { + auto str = _internal_mutable_fab_id(); + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); + CHK_(::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "flwr.proto.Run.fab_id")); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // string fab_version = 3; + case 3: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 26)) { + auto str = _internal_mutable_fab_version(); + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); + CHK_(::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "flwr.proto.Run.fab_version")); + CHK_(ptr); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* Run::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.Run) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // sint64 run_id = 1; + if (this->_internal_run_id() != 0) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteSInt64ToArray(1, this->_internal_run_id(), target); + } + + // string fab_id = 2; + if (!this->_internal_fab_id().empty()) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->_internal_fab_id().data(), static_cast(this->_internal_fab_id().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "flwr.proto.Run.fab_id"); + target = stream->WriteStringMaybeAliased( + 2, this->_internal_fab_id(), target); + } + + // string fab_version = 3; + if (!this->_internal_fab_version().empty()) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->_internal_fab_version().data(), static_cast(this->_internal_fab_version().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "flwr.proto.Run.fab_version"); + target = stream->WriteStringMaybeAliased( + 3, this->_internal_fab_version(), target); + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.Run) + return target; +} + +size_t Run::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.Run) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // string fab_id = 2; + if (!this->_internal_fab_id().empty()) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( + this->_internal_fab_id()); } - // .flwr.proto.Reconnect reconnect = 1; - if (this->_internal_has_reconnect()) { + // string fab_version = 3; + if (!this->_internal_fab_version().empty()) { total_size += 1 + - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( - *reconnect_); + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( + this->_internal_fab_version()); + } + + // sint64 run_id = 1; + if (this->_internal_run_id() != 0) { + total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SInt64SizePlusOne(this->_internal_run_id()); } return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); } -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData PullTaskInsResponse::_class_data_ = { +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData Run::_class_data_ = { ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, - PullTaskInsResponse::MergeImpl + Run::MergeImpl }; -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*PullTaskInsResponse::GetClassData() const { return &_class_data_; } +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*Run::GetClassData() const { return &_class_data_; } -void PullTaskInsResponse::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, +void Run::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from) { - static_cast(to)->MergeFrom( - static_cast(from)); + static_cast(to)->MergeFrom( + static_cast(from)); } -void PullTaskInsResponse::MergeFrom(const PullTaskInsResponse& from) { -// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.PullTaskInsResponse) +void Run::MergeFrom(const Run& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.Run) GOOGLE_DCHECK_NE(&from, this); ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; (void) cached_has_bits; - task_ins_list_.MergeFrom(from.task_ins_list_); - if (from._internal_has_reconnect()) { - _internal_mutable_reconnect()->::flwr::proto::Reconnect::MergeFrom(from._internal_reconnect()); + if (!from._internal_fab_id().empty()) { + _internal_set_fab_id(from._internal_fab_id()); + } + if (!from._internal_fab_version().empty()) { + _internal_set_fab_version(from._internal_fab_version()); + } + if (from._internal_run_id() != 0) { + _internal_set_run_id(from._internal_run_id()); } _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); } -void PullTaskInsResponse::CopyFrom(const PullTaskInsResponse& from) { -// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.PullTaskInsResponse) +void Run::CopyFrom(const Run& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.Run) if (&from == this) return; Clear(); MergeFrom(from); } -bool PullTaskInsResponse::IsInitialized() const { +bool Run::IsInitialized() const { return true; } -void PullTaskInsResponse::InternalSwap(PullTaskInsResponse* other) { +void Run::InternalSwap(Run* other) { using std::swap; + auto* lhs_arena = GetArenaForAllocation(); + auto* rhs_arena = other->GetArenaForAllocation(); _internal_metadata_.InternalSwap(&other->_internal_metadata_); - task_ins_list_.InternalSwap(&other->task_ins_list_); - swap(reconnect_, other->reconnect_); -} - -::PROTOBUF_NAMESPACE_ID::Metadata PullTaskInsResponse::GetMetadata() const { + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap( + &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), + &fab_id_, lhs_arena, + &other->fab_id_, rhs_arena + ); + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap( + &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), + &fab_version_, lhs_arena, + &other->fab_version_, rhs_arena + ); + swap(run_id_, other->run_id_); +} + +::PROTOBUF_NAMESPACE_ID::Metadata Run::GetMetadata() const { return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( &descriptor_table_flwr_2fproto_2ffleet_2eproto_getter, &descriptor_table_flwr_2fproto_2ffleet_2eproto_once, - file_level_metadata_flwr_2fproto_2ffleet_2eproto[5]); + file_level_metadata_flwr_2fproto_2ffleet_2eproto[11]); } // =================================================================== -class PushTaskResRequest::_Internal { +class GetRunRequest::_Internal { public: }; -void PushTaskResRequest::clear_task_res_list() { - task_res_list_.Clear(); -} -PushTaskResRequest::PushTaskResRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, +GetRunRequest::GetRunRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) - : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), - task_res_list_(arena) { + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { SharedCtor(); if (!is_message_owned) { RegisterArenaDtor(arena); } - // @@protoc_insertion_point(arena_constructor:flwr.proto.PushTaskResRequest) + // @@protoc_insertion_point(arena_constructor:flwr.proto.GetRunRequest) } -PushTaskResRequest::PushTaskResRequest(const PushTaskResRequest& from) - : ::PROTOBUF_NAMESPACE_ID::Message(), - task_res_list_(from.task_res_list_) { +GetRunRequest::GetRunRequest(const GetRunRequest& from) + : ::PROTOBUF_NAMESPACE_ID::Message() { _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); - // @@protoc_insertion_point(copy_constructor:flwr.proto.PushTaskResRequest) + run_id_ = from.run_id_; + // @@protoc_insertion_point(copy_constructor:flwr.proto.GetRunRequest) } -void PushTaskResRequest::SharedCtor() { +void GetRunRequest::SharedCtor() { +run_id_ = int64_t{0}; } -PushTaskResRequest::~PushTaskResRequest() { - // @@protoc_insertion_point(destructor:flwr.proto.PushTaskResRequest) +GetRunRequest::~GetRunRequest() { + // @@protoc_insertion_point(destructor:flwr.proto.GetRunRequest) if (GetArenaForAllocation() != nullptr) return; SharedDtor(); _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } -inline void PushTaskResRequest::SharedDtor() { +inline void GetRunRequest::SharedDtor() { GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); } -void PushTaskResRequest::ArenaDtor(void* object) { - PushTaskResRequest* _this = reinterpret_cast< PushTaskResRequest* >(object); +void GetRunRequest::ArenaDtor(void* object) { + GetRunRequest* _this = reinterpret_cast< GetRunRequest* >(object); (void)_this; } -void PushTaskResRequest::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +void GetRunRequest::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { } -void PushTaskResRequest::SetCachedSize(int size) const { +void GetRunRequest::SetCachedSize(int size) const { _cached_size_.Set(size); } -void PushTaskResRequest::Clear() { -// @@protoc_insertion_point(message_clear_start:flwr.proto.PushTaskResRequest) +void GetRunRequest::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.GetRunRequest) ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void) cached_has_bits; - task_res_list_.Clear(); + run_id_ = int64_t{0}; _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } -const char* PushTaskResRequest::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +const char* GetRunRequest::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure while (!ctx->Done(&ptr)) { ::PROTOBUF_NAMESPACE_ID::uint32 tag; ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); switch (tag >> 3) { - // repeated .flwr.proto.TaskRes task_res_list = 1; + // sint64 run_id = 1; case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { - ptr -= 1; - do { - ptr += 1; - ptr = ctx->ParseMessage(_internal_add_task_res_list(), ptr); - CHK_(ptr); - if (!ctx->DataAvailable(ptr)) break; - } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr)); + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 8)) { + run_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarintZigZag64(&ptr); + CHK_(ptr); } else goto handle_unusual; continue; @@ -1346,210 +2747,174 @@ const char* PushTaskResRequest::_InternalParse(const char* ptr, ::PROTOBUF_NAMES #undef CHK_ } -::PROTOBUF_NAMESPACE_ID::uint8* PushTaskResRequest::_InternalSerialize( +::PROTOBUF_NAMESPACE_ID::uint8* GetRunRequest::_InternalSerialize( ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { - // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.PushTaskResRequest) + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.GetRunRequest) ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; (void) cached_has_bits; - // repeated .flwr.proto.TaskRes task_res_list = 1; - for (unsigned int i = 0, - n = static_cast(this->_internal_task_res_list_size()); i < n; i++) { + // sint64 run_id = 1; + if (this->_internal_run_id() != 0) { target = stream->EnsureSpace(target); - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: - InternalWriteMessage(1, this->_internal_task_res_list(i), target, stream); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteSInt64ToArray(1, this->_internal_run_id(), target); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); } - // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.PushTaskResRequest) + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.GetRunRequest) return target; } -size_t PushTaskResRequest::ByteSizeLong() const { -// @@protoc_insertion_point(message_byte_size_start:flwr.proto.PushTaskResRequest) +size_t GetRunRequest::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.GetRunRequest) size_t total_size = 0; ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void) cached_has_bits; - // repeated .flwr.proto.TaskRes task_res_list = 1; - total_size += 1UL * this->_internal_task_res_list_size(); - for (const auto& msg : this->task_res_list_) { - total_size += - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg); + // sint64 run_id = 1; + if (this->_internal_run_id() != 0) { + total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SInt64SizePlusOne(this->_internal_run_id()); } return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); } -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData PushTaskResRequest::_class_data_ = { +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData GetRunRequest::_class_data_ = { ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, - PushTaskResRequest::MergeImpl + GetRunRequest::MergeImpl }; -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*PushTaskResRequest::GetClassData() const { return &_class_data_; } +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetRunRequest::GetClassData() const { return &_class_data_; } -void PushTaskResRequest::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, +void GetRunRequest::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from) { - static_cast(to)->MergeFrom( - static_cast(from)); + static_cast(to)->MergeFrom( + static_cast(from)); } -void PushTaskResRequest::MergeFrom(const PushTaskResRequest& from) { -// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.PushTaskResRequest) +void GetRunRequest::MergeFrom(const GetRunRequest& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.GetRunRequest) GOOGLE_DCHECK_NE(&from, this); ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; (void) cached_has_bits; - task_res_list_.MergeFrom(from.task_res_list_); + if (from._internal_run_id() != 0) { + _internal_set_run_id(from._internal_run_id()); + } _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); } -void PushTaskResRequest::CopyFrom(const PushTaskResRequest& from) { -// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.PushTaskResRequest) +void GetRunRequest::CopyFrom(const GetRunRequest& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.GetRunRequest) if (&from == this) return; Clear(); MergeFrom(from); } -bool PushTaskResRequest::IsInitialized() const { +bool GetRunRequest::IsInitialized() const { return true; } -void PushTaskResRequest::InternalSwap(PushTaskResRequest* other) { +void GetRunRequest::InternalSwap(GetRunRequest* other) { using std::swap; _internal_metadata_.InternalSwap(&other->_internal_metadata_); - task_res_list_.InternalSwap(&other->task_res_list_); -} - -::PROTOBUF_NAMESPACE_ID::Metadata PushTaskResRequest::GetMetadata() const { - return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( - &descriptor_table_flwr_2fproto_2ffleet_2eproto_getter, &descriptor_table_flwr_2fproto_2ffleet_2eproto_once, - file_level_metadata_flwr_2fproto_2ffleet_2eproto[6]); + swap(run_id_, other->run_id_); } -// =================================================================== - -PushTaskResResponse_ResultsEntry_DoNotUse::PushTaskResResponse_ResultsEntry_DoNotUse() {} -PushTaskResResponse_ResultsEntry_DoNotUse::PushTaskResResponse_ResultsEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena) - : SuperType(arena) {} -void PushTaskResResponse_ResultsEntry_DoNotUse::MergeFrom(const PushTaskResResponse_ResultsEntry_DoNotUse& other) { - MergeFromInternal(other); -} -::PROTOBUF_NAMESPACE_ID::Metadata PushTaskResResponse_ResultsEntry_DoNotUse::GetMetadata() const { +::PROTOBUF_NAMESPACE_ID::Metadata GetRunRequest::GetMetadata() const { return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( &descriptor_table_flwr_2fproto_2ffleet_2eproto_getter, &descriptor_table_flwr_2fproto_2ffleet_2eproto_once, - file_level_metadata_flwr_2fproto_2ffleet_2eproto[7]); + file_level_metadata_flwr_2fproto_2ffleet_2eproto[12]); } // =================================================================== -class PushTaskResResponse::_Internal { +class GetRunResponse::_Internal { public: - static const ::flwr::proto::Reconnect& reconnect(const PushTaskResResponse* msg); + static const ::flwr::proto::Run& run(const GetRunResponse* msg); }; -const ::flwr::proto::Reconnect& -PushTaskResResponse::_Internal::reconnect(const PushTaskResResponse* msg) { - return *msg->reconnect_; +const ::flwr::proto::Run& +GetRunResponse::_Internal::run(const GetRunResponse* msg) { + return *msg->run_; } -PushTaskResResponse::PushTaskResResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, +GetRunResponse::GetRunResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) - : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), - results_(arena) { + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { SharedCtor(); if (!is_message_owned) { RegisterArenaDtor(arena); } - // @@protoc_insertion_point(arena_constructor:flwr.proto.PushTaskResResponse) + // @@protoc_insertion_point(arena_constructor:flwr.proto.GetRunResponse) } -PushTaskResResponse::PushTaskResResponse(const PushTaskResResponse& from) +GetRunResponse::GetRunResponse(const GetRunResponse& from) : ::PROTOBUF_NAMESPACE_ID::Message() { _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); - results_.MergeFrom(from.results_); - if (from._internal_has_reconnect()) { - reconnect_ = new ::flwr::proto::Reconnect(*from.reconnect_); + if (from._internal_has_run()) { + run_ = new ::flwr::proto::Run(*from.run_); } else { - reconnect_ = nullptr; + run_ = nullptr; } - // @@protoc_insertion_point(copy_constructor:flwr.proto.PushTaskResResponse) + // @@protoc_insertion_point(copy_constructor:flwr.proto.GetRunResponse) } -void PushTaskResResponse::SharedCtor() { -reconnect_ = nullptr; +void GetRunResponse::SharedCtor() { +run_ = nullptr; } -PushTaskResResponse::~PushTaskResResponse() { - // @@protoc_insertion_point(destructor:flwr.proto.PushTaskResResponse) +GetRunResponse::~GetRunResponse() { + // @@protoc_insertion_point(destructor:flwr.proto.GetRunResponse) if (GetArenaForAllocation() != nullptr) return; SharedDtor(); _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } -inline void PushTaskResResponse::SharedDtor() { +inline void GetRunResponse::SharedDtor() { GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); - if (this != internal_default_instance()) delete reconnect_; + if (this != internal_default_instance()) delete run_; } -void PushTaskResResponse::ArenaDtor(void* object) { - PushTaskResResponse* _this = reinterpret_cast< PushTaskResResponse* >(object); +void GetRunResponse::ArenaDtor(void* object) { + GetRunResponse* _this = reinterpret_cast< GetRunResponse* >(object); (void)_this; - _this->results_. ~MapField(); } -inline void PushTaskResResponse::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena) { - if (arena != nullptr) { - arena->OwnCustomDestructor(this, &PushTaskResResponse::ArenaDtor); - } +void GetRunResponse::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { } -void PushTaskResResponse::SetCachedSize(int size) const { +void GetRunResponse::SetCachedSize(int size) const { _cached_size_.Set(size); } -void PushTaskResResponse::Clear() { -// @@protoc_insertion_point(message_clear_start:flwr.proto.PushTaskResResponse) +void GetRunResponse::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.GetRunResponse) ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void) cached_has_bits; - results_.Clear(); - if (GetArenaForAllocation() == nullptr && reconnect_ != nullptr) { - delete reconnect_; + if (GetArenaForAllocation() == nullptr && run_ != nullptr) { + delete run_; } - reconnect_ = nullptr; + run_ = nullptr; _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } -const char* PushTaskResResponse::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +const char* GetRunResponse::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure while (!ctx->Done(&ptr)) { ::PROTOBUF_NAMESPACE_ID::uint32 tag; ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); switch (tag >> 3) { - // .flwr.proto.Reconnect reconnect = 1; + // .flwr.proto.Run run = 1; case 1: if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { - ptr = ctx->ParseMessage(_internal_mutable_reconnect(), ptr); + ptr = ctx->ParseMessage(_internal_mutable_run(), ptr); CHK_(ptr); } else goto handle_unusual; continue; - // map results = 2; - case 2: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) { - ptr -= 1; - do { - ptr += 1; - ptr = ctx->ParseMessage(&results_, ptr); - CHK_(ptr); - if (!ctx->DataAvailable(ptr)) break; - } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<18>(ptr)); - } else - goto handle_unusual; - continue; default: goto handle_unusual; } // switch @@ -1573,145 +2938,92 @@ const char* PushTaskResResponse::_InternalParse(const char* ptr, ::PROTOBUF_NAME #undef CHK_ } -::PROTOBUF_NAMESPACE_ID::uint8* PushTaskResResponse::_InternalSerialize( +::PROTOBUF_NAMESPACE_ID::uint8* GetRunResponse::_InternalSerialize( ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { - // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.PushTaskResResponse) + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.GetRunResponse) ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; (void) cached_has_bits; - // .flwr.proto.Reconnect reconnect = 1; - if (this->_internal_has_reconnect()) { + // .flwr.proto.Run run = 1; + if (this->_internal_has_run()) { target = stream->EnsureSpace(target); target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: InternalWriteMessage( - 1, _Internal::reconnect(this), target, stream); - } - - // map results = 2; - if (!this->_internal_results().empty()) { - typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >::const_pointer - ConstPtr; - typedef ConstPtr SortItem; - typedef ::PROTOBUF_NAMESPACE_ID::internal::CompareByDerefFirst Less; - struct Utf8Check { - static void Check(ConstPtr p) { - (void)p; - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( - p->first.data(), static_cast(p->first.length()), - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, - "flwr.proto.PushTaskResResponse.ResultsEntry.key"); - } - }; - - if (stream->IsSerializationDeterministic() && - this->_internal_results().size() > 1) { - ::std::unique_ptr items( - new SortItem[this->_internal_results().size()]); - typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >::size_type size_type; - size_type n = 0; - for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >::const_iterator - it = this->_internal_results().begin(); - it != this->_internal_results().end(); ++it, ++n) { - items[static_cast(n)] = SortItem(&*it); - } - ::std::sort(&items[0], &items[static_cast(n)], Less()); - for (size_type i = 0; i < n; i++) { - target = PushTaskResResponse_ResultsEntry_DoNotUse::Funcs::InternalSerialize(2, items[static_cast(i)]->first, items[static_cast(i)]->second, target, stream); - Utf8Check::Check(&(*items[static_cast(i)])); - } - } else { - for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >::const_iterator - it = this->_internal_results().begin(); - it != this->_internal_results().end(); ++it) { - target = PushTaskResResponse_ResultsEntry_DoNotUse::Funcs::InternalSerialize(2, it->first, it->second, target, stream); - Utf8Check::Check(&(*it)); - } - } + 1, _Internal::run(this), target, stream); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); } - // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.PushTaskResResponse) + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.GetRunResponse) return target; } -size_t PushTaskResResponse::ByteSizeLong() const { -// @@protoc_insertion_point(message_byte_size_start:flwr.proto.PushTaskResResponse) +size_t GetRunResponse::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.GetRunResponse) size_t total_size = 0; ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void) cached_has_bits; - // map results = 2; - total_size += 1 * - ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->_internal_results_size()); - for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >::const_iterator - it = this->_internal_results().begin(); - it != this->_internal_results().end(); ++it) { - total_size += PushTaskResResponse_ResultsEntry_DoNotUse::Funcs::ByteSizeLong(it->first, it->second); - } - - // .flwr.proto.Reconnect reconnect = 1; - if (this->_internal_has_reconnect()) { + // .flwr.proto.Run run = 1; + if (this->_internal_has_run()) { total_size += 1 + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( - *reconnect_); + *run_); } return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); } -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData PushTaskResResponse::_class_data_ = { +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData GetRunResponse::_class_data_ = { ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, - PushTaskResResponse::MergeImpl + GetRunResponse::MergeImpl }; -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*PushTaskResResponse::GetClassData() const { return &_class_data_; } +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetRunResponse::GetClassData() const { return &_class_data_; } -void PushTaskResResponse::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, +void GetRunResponse::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from) { - static_cast(to)->MergeFrom( - static_cast(from)); + static_cast(to)->MergeFrom( + static_cast(from)); } -void PushTaskResResponse::MergeFrom(const PushTaskResResponse& from) { -// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.PushTaskResResponse) +void GetRunResponse::MergeFrom(const GetRunResponse& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.GetRunResponse) GOOGLE_DCHECK_NE(&from, this); ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; (void) cached_has_bits; - results_.MergeFrom(from.results_); - if (from._internal_has_reconnect()) { - _internal_mutable_reconnect()->::flwr::proto::Reconnect::MergeFrom(from._internal_reconnect()); + if (from._internal_has_run()) { + _internal_mutable_run()->::flwr::proto::Run::MergeFrom(from._internal_run()); } _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); } -void PushTaskResResponse::CopyFrom(const PushTaskResResponse& from) { -// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.PushTaskResResponse) +void GetRunResponse::CopyFrom(const GetRunResponse& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.GetRunResponse) if (&from == this) return; Clear(); MergeFrom(from); } -bool PushTaskResResponse::IsInitialized() const { +bool GetRunResponse::IsInitialized() const { return true; } -void PushTaskResResponse::InternalSwap(PushTaskResResponse* other) { +void GetRunResponse::InternalSwap(GetRunResponse* other) { using std::swap; _internal_metadata_.InternalSwap(&other->_internal_metadata_); - results_.InternalSwap(&other->results_); - swap(reconnect_, other->reconnect_); + swap(run_, other->run_); } -::PROTOBUF_NAMESPACE_ID::Metadata PushTaskResResponse::GetMetadata() const { +::PROTOBUF_NAMESPACE_ID::Metadata GetRunResponse::GetMetadata() const { return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( &descriptor_table_flwr_2fproto_2ffleet_2eproto_getter, &descriptor_table_flwr_2fproto_2ffleet_2eproto_once, - file_level_metadata_flwr_2fproto_2ffleet_2eproto[8]); + file_level_metadata_flwr_2fproto_2ffleet_2eproto[13]); } // =================================================================== @@ -1889,7 +3201,7 @@ void Reconnect::InternalSwap(Reconnect* other) { ::PROTOBUF_NAMESPACE_ID::Metadata Reconnect::GetMetadata() const { return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( &descriptor_table_flwr_2fproto_2ffleet_2eproto_getter, &descriptor_table_flwr_2fproto_2ffleet_2eproto_once, - file_level_metadata_flwr_2fproto_2ffleet_2eproto[9]); + file_level_metadata_flwr_2fproto_2ffleet_2eproto[14]); } // @@protoc_insertion_point(namespace_scope) @@ -1908,6 +3220,12 @@ template<> PROTOBUF_NOINLINE ::flwr::proto::DeleteNodeRequest* Arena::CreateMayb template<> PROTOBUF_NOINLINE ::flwr::proto::DeleteNodeResponse* Arena::CreateMaybeMessage< ::flwr::proto::DeleteNodeResponse >(Arena* arena) { return Arena::CreateMessageInternal< ::flwr::proto::DeleteNodeResponse >(arena); } +template<> PROTOBUF_NOINLINE ::flwr::proto::PingRequest* Arena::CreateMaybeMessage< ::flwr::proto::PingRequest >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::PingRequest >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::PingResponse* Arena::CreateMaybeMessage< ::flwr::proto::PingResponse >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::PingResponse >(arena); +} template<> PROTOBUF_NOINLINE ::flwr::proto::PullTaskInsRequest* Arena::CreateMaybeMessage< ::flwr::proto::PullTaskInsRequest >(Arena* arena) { return Arena::CreateMessageInternal< ::flwr::proto::PullTaskInsRequest >(arena); } @@ -1923,6 +3241,15 @@ template<> PROTOBUF_NOINLINE ::flwr::proto::PushTaskResResponse_ResultsEntry_DoN template<> PROTOBUF_NOINLINE ::flwr::proto::PushTaskResResponse* Arena::CreateMaybeMessage< ::flwr::proto::PushTaskResResponse >(Arena* arena) { return Arena::CreateMessageInternal< ::flwr::proto::PushTaskResResponse >(arena); } +template<> PROTOBUF_NOINLINE ::flwr::proto::Run* Arena::CreateMaybeMessage< ::flwr::proto::Run >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::Run >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::GetRunRequest* Arena::CreateMaybeMessage< ::flwr::proto::GetRunRequest >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::GetRunRequest >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::GetRunResponse* Arena::CreateMaybeMessage< ::flwr::proto::GetRunResponse >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::GetRunResponse >(arena); +} template<> PROTOBUF_NOINLINE ::flwr::proto::Reconnect* Arena::CreateMaybeMessage< ::flwr::proto::Reconnect >(Arena* arena) { return Arena::CreateMessageInternal< ::flwr::proto::Reconnect >(arena); } diff --git a/src/cc/flwr/include/flwr/proto/fleet.pb.h b/src/cc/flwr/include/flwr/proto/fleet.pb.h index 842e800f5b1c..9ad30b5752f5 100644 --- a/src/cc/flwr/include/flwr/proto/fleet.pb.h +++ b/src/cc/flwr/include/flwr/proto/fleet.pb.h @@ -52,7 +52,7 @@ struct TableStruct_flwr_2fproto_2ffleet_2eproto { PROTOBUF_SECTION_VARIABLE(protodesc_cold); static const ::PROTOBUF_NAMESPACE_ID::internal::AuxiliaryParseTableField aux[] PROTOBUF_SECTION_VARIABLE(protodesc_cold); - static const ::PROTOBUF_NAMESPACE_ID::internal::ParseTable schema[10] + static const ::PROTOBUF_NAMESPACE_ID::internal::ParseTable schema[15] PROTOBUF_SECTION_VARIABLE(protodesc_cold); static const ::PROTOBUF_NAMESPACE_ID::internal::FieldMetadata field_metadata[]; static const ::PROTOBUF_NAMESPACE_ID::internal::SerializationTable serialization_table[]; @@ -73,6 +73,18 @@ extern DeleteNodeRequestDefaultTypeInternal _DeleteNodeRequest_default_instance_ class DeleteNodeResponse; struct DeleteNodeResponseDefaultTypeInternal; extern DeleteNodeResponseDefaultTypeInternal _DeleteNodeResponse_default_instance_; +class GetRunRequest; +struct GetRunRequestDefaultTypeInternal; +extern GetRunRequestDefaultTypeInternal _GetRunRequest_default_instance_; +class GetRunResponse; +struct GetRunResponseDefaultTypeInternal; +extern GetRunResponseDefaultTypeInternal _GetRunResponse_default_instance_; +class PingRequest; +struct PingRequestDefaultTypeInternal; +extern PingRequestDefaultTypeInternal _PingRequest_default_instance_; +class PingResponse; +struct PingResponseDefaultTypeInternal; +extern PingResponseDefaultTypeInternal _PingResponse_default_instance_; class PullTaskInsRequest; struct PullTaskInsRequestDefaultTypeInternal; extern PullTaskInsRequestDefaultTypeInternal _PullTaskInsRequest_default_instance_; @@ -91,6 +103,9 @@ extern PushTaskResResponse_ResultsEntry_DoNotUseDefaultTypeInternal _PushTaskRes class Reconnect; struct ReconnectDefaultTypeInternal; extern ReconnectDefaultTypeInternal _Reconnect_default_instance_; +class Run; +struct RunDefaultTypeInternal; +extern RunDefaultTypeInternal _Run_default_instance_; } // namespace proto } // namespace flwr PROTOBUF_NAMESPACE_OPEN @@ -98,12 +113,17 @@ template<> ::flwr::proto::CreateNodeRequest* Arena::CreateMaybeMessage<::flwr::p template<> ::flwr::proto::CreateNodeResponse* Arena::CreateMaybeMessage<::flwr::proto::CreateNodeResponse>(Arena*); template<> ::flwr::proto::DeleteNodeRequest* Arena::CreateMaybeMessage<::flwr::proto::DeleteNodeRequest>(Arena*); template<> ::flwr::proto::DeleteNodeResponse* Arena::CreateMaybeMessage<::flwr::proto::DeleteNodeResponse>(Arena*); +template<> ::flwr::proto::GetRunRequest* Arena::CreateMaybeMessage<::flwr::proto::GetRunRequest>(Arena*); +template<> ::flwr::proto::GetRunResponse* Arena::CreateMaybeMessage<::flwr::proto::GetRunResponse>(Arena*); +template<> ::flwr::proto::PingRequest* Arena::CreateMaybeMessage<::flwr::proto::PingRequest>(Arena*); +template<> ::flwr::proto::PingResponse* Arena::CreateMaybeMessage<::flwr::proto::PingResponse>(Arena*); template<> ::flwr::proto::PullTaskInsRequest* Arena::CreateMaybeMessage<::flwr::proto::PullTaskInsRequest>(Arena*); template<> ::flwr::proto::PullTaskInsResponse* Arena::CreateMaybeMessage<::flwr::proto::PullTaskInsResponse>(Arena*); template<> ::flwr::proto::PushTaskResRequest* Arena::CreateMaybeMessage<::flwr::proto::PushTaskResRequest>(Arena*); template<> ::flwr::proto::PushTaskResResponse* Arena::CreateMaybeMessage<::flwr::proto::PushTaskResResponse>(Arena*); template<> ::flwr::proto::PushTaskResResponse_ResultsEntry_DoNotUse* Arena::CreateMaybeMessage<::flwr::proto::PushTaskResResponse_ResultsEntry_DoNotUse>(Arena*); template<> ::flwr::proto::Reconnect* Arena::CreateMaybeMessage<::flwr::proto::Reconnect>(Arena*); +template<> ::flwr::proto::Run* Arena::CreateMaybeMessage<::flwr::proto::Run>(Arena*); PROTOBUF_NAMESPACE_CLOSE namespace flwr { namespace proto { @@ -111,9 +131,10 @@ namespace proto { // =================================================================== class CreateNodeRequest final : - public ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase /* @@protoc_insertion_point(class_definition:flwr.proto.CreateNodeRequest) */ { + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.CreateNodeRequest) */ { public: inline CreateNodeRequest() : CreateNodeRequest(nullptr) {} + ~CreateNodeRequest() override; explicit constexpr CreateNodeRequest(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); CreateNodeRequest(const CreateNodeRequest& from); @@ -185,15 +206,27 @@ class CreateNodeRequest final : CreateNodeRequest* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { return CreateMaybeMessage(arena); } - using ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::CopyFrom; - inline void CopyFrom(const CreateNodeRequest& from) { - ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::CopyImpl(this, from); - } - using ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::MergeFrom; - void MergeFrom(const CreateNodeRequest& from) { - ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::MergeImpl(this, from); - } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const CreateNodeRequest& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const CreateNodeRequest& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(CreateNodeRequest* other); friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { return "flwr.proto.CreateNodeRequest"; @@ -202,6 +235,8 @@ class CreateNodeRequest final : explicit CreateNodeRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); public: static const ClassData _class_data_; @@ -213,6 +248,18 @@ class CreateNodeRequest final : // accessors ------------------------------------------------------- + enum : int { + kPingIntervalFieldNumber = 1, + }; + // double ping_interval = 1; + void clear_ping_interval(); + double ping_interval() const; + void set_ping_interval(double value); + private: + double _internal_ping_interval() const; + void _internal_set_ping_interval(double value); + public: + // @@protoc_insertion_point(class_scope:flwr.proto.CreateNodeRequest) private: class _Internal; @@ -220,6 +267,7 @@ class CreateNodeRequest final : template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; typedef void InternalArenaConstructable_; typedef void DestructorSkippable_; + double ping_interval_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; friend struct ::TableStruct_flwr_2fproto_2ffleet_2eproto; }; @@ -644,24 +692,24 @@ class DeleteNodeResponse final : }; // ------------------------------------------------------------------- -class PullTaskInsRequest final : - public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.PullTaskInsRequest) */ { +class PingRequest final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.PingRequest) */ { public: - inline PullTaskInsRequest() : PullTaskInsRequest(nullptr) {} - ~PullTaskInsRequest() override; - explicit constexpr PullTaskInsRequest(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + inline PingRequest() : PingRequest(nullptr) {} + ~PingRequest() override; + explicit constexpr PingRequest(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); - PullTaskInsRequest(const PullTaskInsRequest& from); - PullTaskInsRequest(PullTaskInsRequest&& from) noexcept - : PullTaskInsRequest() { + PingRequest(const PingRequest& from); + PingRequest(PingRequest&& from) noexcept + : PingRequest() { *this = ::std::move(from); } - inline PullTaskInsRequest& operator=(const PullTaskInsRequest& from) { + inline PingRequest& operator=(const PingRequest& from) { CopyFrom(from); return *this; } - inline PullTaskInsRequest& operator=(PullTaskInsRequest&& from) noexcept { + inline PingRequest& operator=(PingRequest&& from) noexcept { if (this == &from) return *this; if (GetOwningArena() == from.GetOwningArena() #ifdef PROTOBUF_FORCE_COPY_IN_MOVE @@ -684,20 +732,20 @@ class PullTaskInsRequest final : static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { return default_instance().GetMetadata().reflection; } - static const PullTaskInsRequest& default_instance() { + static const PingRequest& default_instance() { return *internal_default_instance(); } - static inline const PullTaskInsRequest* internal_default_instance() { - return reinterpret_cast( - &_PullTaskInsRequest_default_instance_); + static inline const PingRequest* internal_default_instance() { + return reinterpret_cast( + &_PingRequest_default_instance_); } static constexpr int kIndexInFileMessages = 4; - friend void swap(PullTaskInsRequest& a, PullTaskInsRequest& b) { + friend void swap(PingRequest& a, PingRequest& b) { a.Swap(&b); } - inline void Swap(PullTaskInsRequest* other) { + inline void Swap(PingRequest* other) { if (other == this) return; if (GetOwningArena() == other->GetOwningArena()) { InternalSwap(other); @@ -705,7 +753,7 @@ class PullTaskInsRequest final : ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); } } - void UnsafeArenaSwap(PullTaskInsRequest* other) { + void UnsafeArenaSwap(PingRequest* other) { if (other == this) return; GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); @@ -713,17 +761,17 @@ class PullTaskInsRequest final : // implements Message ---------------------------------------------- - inline PullTaskInsRequest* New() const final { - return new PullTaskInsRequest(); + inline PingRequest* New() const final { + return new PingRequest(); } - PullTaskInsRequest* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { - return CreateMaybeMessage(arena); + PingRequest* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); } using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; - void CopyFrom(const PullTaskInsRequest& from); + void CopyFrom(const PingRequest& from); using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; - void MergeFrom(const PullTaskInsRequest& from); + void MergeFrom(const PingRequest& from); private: static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); public: @@ -740,13 +788,13 @@ class PullTaskInsRequest final : void SharedCtor(); void SharedDtor(); void SetCachedSize(int size) const final; - void InternalSwap(PullTaskInsRequest* other); + void InternalSwap(PingRequest* other); friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { - return "flwr.proto.PullTaskInsRequest"; + return "flwr.proto.PingRequest"; } protected: - explicit PullTaskInsRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, + explicit PingRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); private: static void ArenaDtor(void* object); @@ -763,33 +811,9 @@ class PullTaskInsRequest final : // accessors ------------------------------------------------------- enum : int { - kTaskIdsFieldNumber = 2, kNodeFieldNumber = 1, + kPingIntervalFieldNumber = 2, }; - // repeated string task_ids = 2; - int task_ids_size() const; - private: - int _internal_task_ids_size() const; - public: - void clear_task_ids(); - const std::string& task_ids(int index) const; - std::string* mutable_task_ids(int index); - void set_task_ids(int index, const std::string& value); - void set_task_ids(int index, std::string&& value); - void set_task_ids(int index, const char* value); - void set_task_ids(int index, const char* value, size_t size); - std::string* add_task_ids(); - void add_task_ids(const std::string& value); - void add_task_ids(std::string&& value); - void add_task_ids(const char* value); - void add_task_ids(const char* value, size_t size); - const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& task_ids() const; - ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* mutable_task_ids(); - private: - const std::string& _internal_task_ids(int index) const; - std::string* _internal_add_task_ids(); - public: - // .flwr.proto.Node node = 1; bool has_node() const; private: @@ -808,38 +832,47 @@ class PullTaskInsRequest final : ::flwr::proto::Node* node); ::flwr::proto::Node* unsafe_arena_release_node(); - // @@protoc_insertion_point(class_scope:flwr.proto.PullTaskInsRequest) + // double ping_interval = 2; + void clear_ping_interval(); + double ping_interval() const; + void set_ping_interval(double value); + private: + double _internal_ping_interval() const; + void _internal_set_ping_interval(double value); + public: + + // @@protoc_insertion_point(class_scope:flwr.proto.PingRequest) private: class _Internal; template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; typedef void InternalArenaConstructable_; typedef void DestructorSkippable_; - ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField task_ids_; ::flwr::proto::Node* node_; + double ping_interval_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; friend struct ::TableStruct_flwr_2fproto_2ffleet_2eproto; }; // ------------------------------------------------------------------- -class PullTaskInsResponse final : - public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.PullTaskInsResponse) */ { +class PingResponse final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.PingResponse) */ { public: - inline PullTaskInsResponse() : PullTaskInsResponse(nullptr) {} - ~PullTaskInsResponse() override; - explicit constexpr PullTaskInsResponse(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + inline PingResponse() : PingResponse(nullptr) {} + ~PingResponse() override; + explicit constexpr PingResponse(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); - PullTaskInsResponse(const PullTaskInsResponse& from); - PullTaskInsResponse(PullTaskInsResponse&& from) noexcept - : PullTaskInsResponse() { + PingResponse(const PingResponse& from); + PingResponse(PingResponse&& from) noexcept + : PingResponse() { *this = ::std::move(from); } - inline PullTaskInsResponse& operator=(const PullTaskInsResponse& from) { + inline PingResponse& operator=(const PingResponse& from) { CopyFrom(from); return *this; } - inline PullTaskInsResponse& operator=(PullTaskInsResponse&& from) noexcept { + inline PingResponse& operator=(PingResponse&& from) noexcept { if (this == &from) return *this; if (GetOwningArena() == from.GetOwningArena() #ifdef PROTOBUF_FORCE_COPY_IN_MOVE @@ -862,20 +895,20 @@ class PullTaskInsResponse final : static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { return default_instance().GetMetadata().reflection; } - static const PullTaskInsResponse& default_instance() { + static const PingResponse& default_instance() { return *internal_default_instance(); } - static inline const PullTaskInsResponse* internal_default_instance() { - return reinterpret_cast( - &_PullTaskInsResponse_default_instance_); + static inline const PingResponse* internal_default_instance() { + return reinterpret_cast( + &_PingResponse_default_instance_); } static constexpr int kIndexInFileMessages = 5; - friend void swap(PullTaskInsResponse& a, PullTaskInsResponse& b) { + friend void swap(PingResponse& a, PingResponse& b) { a.Swap(&b); } - inline void Swap(PullTaskInsResponse* other) { + inline void Swap(PingResponse* other) { if (other == this) return; if (GetOwningArena() == other->GetOwningArena()) { InternalSwap(other); @@ -883,7 +916,7 @@ class PullTaskInsResponse final : ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); } } - void UnsafeArenaSwap(PullTaskInsResponse* other) { + void UnsafeArenaSwap(PingResponse* other) { if (other == this) return; GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); @@ -891,17 +924,17 @@ class PullTaskInsResponse final : // implements Message ---------------------------------------------- - inline PullTaskInsResponse* New() const final { - return new PullTaskInsResponse(); + inline PingResponse* New() const final { + return new PingResponse(); } - PullTaskInsResponse* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { - return CreateMaybeMessage(arena); + PingResponse* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); } using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; - void CopyFrom(const PullTaskInsResponse& from); + void CopyFrom(const PingResponse& from); using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; - void MergeFrom(const PullTaskInsResponse& from); + void MergeFrom(const PingResponse& from); private: static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); public: @@ -918,13 +951,13 @@ class PullTaskInsResponse final : void SharedCtor(); void SharedDtor(); void SetCachedSize(int size) const final; - void InternalSwap(PullTaskInsResponse* other); + void InternalSwap(PingResponse* other); friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { - return "flwr.proto.PullTaskInsResponse"; + return "flwr.proto.PingResponse"; } protected: - explicit PullTaskInsResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, + explicit PingResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); private: static void ArenaDtor(void* object); @@ -941,77 +974,48 @@ class PullTaskInsResponse final : // accessors ------------------------------------------------------- enum : int { - kTaskInsListFieldNumber = 2, - kReconnectFieldNumber = 1, + kSuccessFieldNumber = 1, }; - // repeated .flwr.proto.TaskIns task_ins_list = 2; - int task_ins_list_size() const; - private: - int _internal_task_ins_list_size() const; - public: - void clear_task_ins_list(); - ::flwr::proto::TaskIns* mutable_task_ins_list(int index); - ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::flwr::proto::TaskIns >* - mutable_task_ins_list(); - private: - const ::flwr::proto::TaskIns& _internal_task_ins_list(int index) const; - ::flwr::proto::TaskIns* _internal_add_task_ins_list(); - public: - const ::flwr::proto::TaskIns& task_ins_list(int index) const; - ::flwr::proto::TaskIns* add_task_ins_list(); - const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::flwr::proto::TaskIns >& - task_ins_list() const; - - // .flwr.proto.Reconnect reconnect = 1; - bool has_reconnect() const; - private: - bool _internal_has_reconnect() const; - public: - void clear_reconnect(); - const ::flwr::proto::Reconnect& reconnect() const; - PROTOBUF_MUST_USE_RESULT ::flwr::proto::Reconnect* release_reconnect(); - ::flwr::proto::Reconnect* mutable_reconnect(); - void set_allocated_reconnect(::flwr::proto::Reconnect* reconnect); + // bool success = 1; + void clear_success(); + bool success() const; + void set_success(bool value); private: - const ::flwr::proto::Reconnect& _internal_reconnect() const; - ::flwr::proto::Reconnect* _internal_mutable_reconnect(); + bool _internal_success() const; + void _internal_set_success(bool value); public: - void unsafe_arena_set_allocated_reconnect( - ::flwr::proto::Reconnect* reconnect); - ::flwr::proto::Reconnect* unsafe_arena_release_reconnect(); - // @@protoc_insertion_point(class_scope:flwr.proto.PullTaskInsResponse) + // @@protoc_insertion_point(class_scope:flwr.proto.PingResponse) private: class _Internal; template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; typedef void InternalArenaConstructable_; typedef void DestructorSkippable_; - ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::flwr::proto::TaskIns > task_ins_list_; - ::flwr::proto::Reconnect* reconnect_; + bool success_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; friend struct ::TableStruct_flwr_2fproto_2ffleet_2eproto; }; // ------------------------------------------------------------------- -class PushTaskResRequest final : - public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.PushTaskResRequest) */ { +class PullTaskInsRequest final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.PullTaskInsRequest) */ { public: - inline PushTaskResRequest() : PushTaskResRequest(nullptr) {} - ~PushTaskResRequest() override; - explicit constexpr PushTaskResRequest(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + inline PullTaskInsRequest() : PullTaskInsRequest(nullptr) {} + ~PullTaskInsRequest() override; + explicit constexpr PullTaskInsRequest(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); - PushTaskResRequest(const PushTaskResRequest& from); - PushTaskResRequest(PushTaskResRequest&& from) noexcept - : PushTaskResRequest() { + PullTaskInsRequest(const PullTaskInsRequest& from); + PullTaskInsRequest(PullTaskInsRequest&& from) noexcept + : PullTaskInsRequest() { *this = ::std::move(from); } - inline PushTaskResRequest& operator=(const PushTaskResRequest& from) { + inline PullTaskInsRequest& operator=(const PullTaskInsRequest& from) { CopyFrom(from); return *this; } - inline PushTaskResRequest& operator=(PushTaskResRequest&& from) noexcept { + inline PullTaskInsRequest& operator=(PullTaskInsRequest&& from) noexcept { if (this == &from) return *this; if (GetOwningArena() == from.GetOwningArena() #ifdef PROTOBUF_FORCE_COPY_IN_MOVE @@ -1034,20 +1038,20 @@ class PushTaskResRequest final : static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { return default_instance().GetMetadata().reflection; } - static const PushTaskResRequest& default_instance() { + static const PullTaskInsRequest& default_instance() { return *internal_default_instance(); } - static inline const PushTaskResRequest* internal_default_instance() { - return reinterpret_cast( - &_PushTaskResRequest_default_instance_); + static inline const PullTaskInsRequest* internal_default_instance() { + return reinterpret_cast( + &_PullTaskInsRequest_default_instance_); } static constexpr int kIndexInFileMessages = 6; - friend void swap(PushTaskResRequest& a, PushTaskResRequest& b) { + friend void swap(PullTaskInsRequest& a, PullTaskInsRequest& b) { a.Swap(&b); } - inline void Swap(PushTaskResRequest* other) { + inline void Swap(PullTaskInsRequest* other) { if (other == this) return; if (GetOwningArena() == other->GetOwningArena()) { InternalSwap(other); @@ -1055,7 +1059,7 @@ class PushTaskResRequest final : ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); } } - void UnsafeArenaSwap(PushTaskResRequest* other) { + void UnsafeArenaSwap(PullTaskInsRequest* other) { if (other == this) return; GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); @@ -1063,17 +1067,17 @@ class PushTaskResRequest final : // implements Message ---------------------------------------------- - inline PushTaskResRequest* New() const final { - return new PushTaskResRequest(); + inline PullTaskInsRequest* New() const final { + return new PullTaskInsRequest(); } - PushTaskResRequest* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { - return CreateMaybeMessage(arena); + PullTaskInsRequest* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); } using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; - void CopyFrom(const PushTaskResRequest& from); + void CopyFrom(const PullTaskInsRequest& from); using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; - void MergeFrom(const PushTaskResRequest& from); + void MergeFrom(const PullTaskInsRequest& from); private: static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); public: @@ -1090,13 +1094,13 @@ class PushTaskResRequest final : void SharedCtor(); void SharedDtor(); void SetCachedSize(int size) const final; - void InternalSwap(PushTaskResRequest* other); + void InternalSwap(PullTaskInsRequest* other); friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { - return "flwr.proto.PushTaskResRequest"; + return "flwr.proto.PullTaskInsRequest"; } protected: - explicit PushTaskResRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, + explicit PullTaskInsRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); private: static void ArenaDtor(void* object); @@ -1113,82 +1117,255 @@ class PushTaskResRequest final : // accessors ------------------------------------------------------- enum : int { - kTaskResListFieldNumber = 1, + kTaskIdsFieldNumber = 2, + kNodeFieldNumber = 1, }; - // repeated .flwr.proto.TaskRes task_res_list = 1; - int task_res_list_size() const; + // repeated string task_ids = 2; + int task_ids_size() const; private: - int _internal_task_res_list_size() const; + int _internal_task_ids_size() const; public: - void clear_task_res_list(); - ::flwr::proto::TaskRes* mutable_task_res_list(int index); - ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::flwr::proto::TaskRes >* - mutable_task_res_list(); + void clear_task_ids(); + const std::string& task_ids(int index) const; + std::string* mutable_task_ids(int index); + void set_task_ids(int index, const std::string& value); + void set_task_ids(int index, std::string&& value); + void set_task_ids(int index, const char* value); + void set_task_ids(int index, const char* value, size_t size); + std::string* add_task_ids(); + void add_task_ids(const std::string& value); + void add_task_ids(std::string&& value); + void add_task_ids(const char* value); + void add_task_ids(const char* value, size_t size); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& task_ids() const; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* mutable_task_ids(); private: - const ::flwr::proto::TaskRes& _internal_task_res_list(int index) const; - ::flwr::proto::TaskRes* _internal_add_task_res_list(); + const std::string& _internal_task_ids(int index) const; + std::string* _internal_add_task_ids(); public: - const ::flwr::proto::TaskRes& task_res_list(int index) const; - ::flwr::proto::TaskRes* add_task_res_list(); - const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::flwr::proto::TaskRes >& - task_res_list() const; - - // @@protoc_insertion_point(class_scope:flwr.proto.PushTaskResRequest) - private: - class _Internal; - - template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; - typedef void InternalArenaConstructable_; - typedef void DestructorSkippable_; - ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::flwr::proto::TaskRes > task_res_list_; - mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; - friend struct ::TableStruct_flwr_2fproto_2ffleet_2eproto; -}; -// ------------------------------------------------------------------- -class PushTaskResResponse_ResultsEntry_DoNotUse : public ::PROTOBUF_NAMESPACE_ID::internal::MapEntry { -public: - typedef ::PROTOBUF_NAMESPACE_ID::internal::MapEntry SuperType; - PushTaskResResponse_ResultsEntry_DoNotUse(); - explicit constexpr PushTaskResResponse_ResultsEntry_DoNotUse( - ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); - explicit PushTaskResResponse_ResultsEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena); - void MergeFrom(const PushTaskResResponse_ResultsEntry_DoNotUse& other); - static const PushTaskResResponse_ResultsEntry_DoNotUse* internal_default_instance() { return reinterpret_cast(&_PushTaskResResponse_ResultsEntry_DoNotUse_default_instance_); } - static bool ValidateKey(std::string* s) { - return ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(s->data(), static_cast(s->size()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, "flwr.proto.PushTaskResResponse.ResultsEntry.key"); - } - static bool ValidateValue(void*) { return true; } + // .flwr.proto.Node node = 1; + bool has_node() const; + private: + bool _internal_has_node() const; + public: + void clear_node(); + const ::flwr::proto::Node& node() const; + PROTOBUF_MUST_USE_RESULT ::flwr::proto::Node* release_node(); + ::flwr::proto::Node* mutable_node(); + void set_allocated_node(::flwr::proto::Node* node); + private: + const ::flwr::proto::Node& _internal_node() const; + ::flwr::proto::Node* _internal_mutable_node(); + public: + void unsafe_arena_set_allocated_node( + ::flwr::proto::Node* node); + ::flwr::proto::Node* unsafe_arena_release_node(); + + // @@protoc_insertion_point(class_scope:flwr.proto.PullTaskInsRequest) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField task_ids_; + ::flwr::proto::Node* node_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flwr_2fproto_2ffleet_2eproto; +}; +// ------------------------------------------------------------------- + +class PullTaskInsResponse final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.PullTaskInsResponse) */ { + public: + inline PullTaskInsResponse() : PullTaskInsResponse(nullptr) {} + ~PullTaskInsResponse() override; + explicit constexpr PullTaskInsResponse(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + PullTaskInsResponse(const PullTaskInsResponse& from); + PullTaskInsResponse(PullTaskInsResponse&& from) noexcept + : PullTaskInsResponse() { + *this = ::std::move(from); + } + + inline PullTaskInsResponse& operator=(const PullTaskInsResponse& from) { + CopyFrom(from); + return *this; + } + inline PullTaskInsResponse& operator=(PullTaskInsResponse&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const PullTaskInsResponse& default_instance() { + return *internal_default_instance(); + } + static inline const PullTaskInsResponse* internal_default_instance() { + return reinterpret_cast( + &_PullTaskInsResponse_default_instance_); + } + static constexpr int kIndexInFileMessages = + 7; + + friend void swap(PullTaskInsResponse& a, PullTaskInsResponse& b) { + a.Swap(&b); + } + inline void Swap(PullTaskInsResponse* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(PullTaskInsResponse* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline PullTaskInsResponse* New() const final { + return new PullTaskInsResponse(); + } + + PullTaskInsResponse* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const PullTaskInsResponse& from); using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const PullTaskInsResponse& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(PullTaskInsResponse* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.PullTaskInsResponse"; + } + protected: + explicit PullTaskInsResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; -}; + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kTaskInsListFieldNumber = 2, + kReconnectFieldNumber = 1, + }; + // repeated .flwr.proto.TaskIns task_ins_list = 2; + int task_ins_list_size() const; + private: + int _internal_task_ins_list_size() const; + public: + void clear_task_ins_list(); + ::flwr::proto::TaskIns* mutable_task_ins_list(int index); + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::flwr::proto::TaskIns >* + mutable_task_ins_list(); + private: + const ::flwr::proto::TaskIns& _internal_task_ins_list(int index) const; + ::flwr::proto::TaskIns* _internal_add_task_ins_list(); + public: + const ::flwr::proto::TaskIns& task_ins_list(int index) const; + ::flwr::proto::TaskIns* add_task_ins_list(); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::flwr::proto::TaskIns >& + task_ins_list() const; + + // .flwr.proto.Reconnect reconnect = 1; + bool has_reconnect() const; + private: + bool _internal_has_reconnect() const; + public: + void clear_reconnect(); + const ::flwr::proto::Reconnect& reconnect() const; + PROTOBUF_MUST_USE_RESULT ::flwr::proto::Reconnect* release_reconnect(); + ::flwr::proto::Reconnect* mutable_reconnect(); + void set_allocated_reconnect(::flwr::proto::Reconnect* reconnect); + private: + const ::flwr::proto::Reconnect& _internal_reconnect() const; + ::flwr::proto::Reconnect* _internal_mutable_reconnect(); + public: + void unsafe_arena_set_allocated_reconnect( + ::flwr::proto::Reconnect* reconnect); + ::flwr::proto::Reconnect* unsafe_arena_release_reconnect(); + + // @@protoc_insertion_point(class_scope:flwr.proto.PullTaskInsResponse) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::flwr::proto::TaskIns > task_ins_list_; + ::flwr::proto::Reconnect* reconnect_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flwr_2fproto_2ffleet_2eproto; +}; // ------------------------------------------------------------------- -class PushTaskResResponse final : - public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.PushTaskResResponse) */ { +class PushTaskResRequest final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.PushTaskResRequest) */ { public: - inline PushTaskResResponse() : PushTaskResResponse(nullptr) {} - ~PushTaskResResponse() override; - explicit constexpr PushTaskResResponse(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + inline PushTaskResRequest() : PushTaskResRequest(nullptr) {} + ~PushTaskResRequest() override; + explicit constexpr PushTaskResRequest(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); - PushTaskResResponse(const PushTaskResResponse& from); - PushTaskResResponse(PushTaskResResponse&& from) noexcept - : PushTaskResResponse() { + PushTaskResRequest(const PushTaskResRequest& from); + PushTaskResRequest(PushTaskResRequest&& from) noexcept + : PushTaskResRequest() { *this = ::std::move(from); } - inline PushTaskResResponse& operator=(const PushTaskResResponse& from) { + inline PushTaskResRequest& operator=(const PushTaskResRequest& from) { CopyFrom(from); return *this; } - inline PushTaskResResponse& operator=(PushTaskResResponse&& from) noexcept { + inline PushTaskResRequest& operator=(PushTaskResRequest&& from) noexcept { if (this == &from) return *this; if (GetOwningArena() == from.GetOwningArena() #ifdef PROTOBUF_FORCE_COPY_IN_MOVE @@ -1211,20 +1388,20 @@ class PushTaskResResponse final : static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { return default_instance().GetMetadata().reflection; } - static const PushTaskResResponse& default_instance() { + static const PushTaskResRequest& default_instance() { return *internal_default_instance(); } - static inline const PushTaskResResponse* internal_default_instance() { - return reinterpret_cast( - &_PushTaskResResponse_default_instance_); + static inline const PushTaskResRequest* internal_default_instance() { + return reinterpret_cast( + &_PushTaskResRequest_default_instance_); } static constexpr int kIndexInFileMessages = 8; - friend void swap(PushTaskResResponse& a, PushTaskResResponse& b) { + friend void swap(PushTaskResRequest& a, PushTaskResRequest& b) { a.Swap(&b); } - inline void Swap(PushTaskResResponse* other) { + inline void Swap(PushTaskResRequest* other) { if (other == this) return; if (GetOwningArena() == other->GetOwningArena()) { InternalSwap(other); @@ -1232,7 +1409,7 @@ class PushTaskResResponse final : ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); } } - void UnsafeArenaSwap(PushTaskResResponse* other) { + void UnsafeArenaSwap(PushTaskResRequest* other) { if (other == this) return; GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); @@ -1240,17 +1417,688 @@ class PushTaskResResponse final : // implements Message ---------------------------------------------- - inline PushTaskResResponse* New() const final { - return new PushTaskResResponse(); + inline PushTaskResRequest* New() const final { + return new PushTaskResRequest(); } - PushTaskResResponse* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { - return CreateMaybeMessage(arena); + PushTaskResRequest* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const PushTaskResRequest& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const PushTaskResRequest& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(PushTaskResRequest* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.PushTaskResRequest"; + } + protected: + explicit PushTaskResRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kTaskResListFieldNumber = 1, + }; + // repeated .flwr.proto.TaskRes task_res_list = 1; + int task_res_list_size() const; + private: + int _internal_task_res_list_size() const; + public: + void clear_task_res_list(); + ::flwr::proto::TaskRes* mutable_task_res_list(int index); + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::flwr::proto::TaskRes >* + mutable_task_res_list(); + private: + const ::flwr::proto::TaskRes& _internal_task_res_list(int index) const; + ::flwr::proto::TaskRes* _internal_add_task_res_list(); + public: + const ::flwr::proto::TaskRes& task_res_list(int index) const; + ::flwr::proto::TaskRes* add_task_res_list(); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::flwr::proto::TaskRes >& + task_res_list() const; + + // @@protoc_insertion_point(class_scope:flwr.proto.PushTaskResRequest) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::flwr::proto::TaskRes > task_res_list_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flwr_2fproto_2ffleet_2eproto; +}; +// ------------------------------------------------------------------- + +class PushTaskResResponse_ResultsEntry_DoNotUse : public ::PROTOBUF_NAMESPACE_ID::internal::MapEntry { +public: + typedef ::PROTOBUF_NAMESPACE_ID::internal::MapEntry SuperType; + PushTaskResResponse_ResultsEntry_DoNotUse(); + explicit constexpr PushTaskResResponse_ResultsEntry_DoNotUse( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + explicit PushTaskResResponse_ResultsEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void MergeFrom(const PushTaskResResponse_ResultsEntry_DoNotUse& other); + static const PushTaskResResponse_ResultsEntry_DoNotUse* internal_default_instance() { return reinterpret_cast(&_PushTaskResResponse_ResultsEntry_DoNotUse_default_instance_); } + static bool ValidateKey(std::string* s) { + return ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(s->data(), static_cast(s->size()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, "flwr.proto.PushTaskResResponse.ResultsEntry.key"); + } + static bool ValidateValue(void*) { return true; } + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; +}; + +// ------------------------------------------------------------------- + +class PushTaskResResponse final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.PushTaskResResponse) */ { + public: + inline PushTaskResResponse() : PushTaskResResponse(nullptr) {} + ~PushTaskResResponse() override; + explicit constexpr PushTaskResResponse(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + PushTaskResResponse(const PushTaskResResponse& from); + PushTaskResResponse(PushTaskResResponse&& from) noexcept + : PushTaskResResponse() { + *this = ::std::move(from); + } + + inline PushTaskResResponse& operator=(const PushTaskResResponse& from) { + CopyFrom(from); + return *this; + } + inline PushTaskResResponse& operator=(PushTaskResResponse&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const PushTaskResResponse& default_instance() { + return *internal_default_instance(); + } + static inline const PushTaskResResponse* internal_default_instance() { + return reinterpret_cast( + &_PushTaskResResponse_default_instance_); + } + static constexpr int kIndexInFileMessages = + 10; + + friend void swap(PushTaskResResponse& a, PushTaskResResponse& b) { + a.Swap(&b); + } + inline void Swap(PushTaskResResponse* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(PushTaskResResponse* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline PushTaskResResponse* New() const final { + return new PushTaskResResponse(); + } + + PushTaskResResponse* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const PushTaskResResponse& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const PushTaskResResponse& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(PushTaskResResponse* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.PushTaskResResponse"; + } + protected: + explicit PushTaskResResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + + // accessors ------------------------------------------------------- + + enum : int { + kResultsFieldNumber = 2, + kReconnectFieldNumber = 1, + }; + // map results = 2; + int results_size() const; + private: + int _internal_results_size() const; + public: + void clear_results(); + private: + const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >& + _internal_results() const; + ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >* + _internal_mutable_results(); + public: + const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >& + results() const; + ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >* + mutable_results(); + + // .flwr.proto.Reconnect reconnect = 1; + bool has_reconnect() const; + private: + bool _internal_has_reconnect() const; + public: + void clear_reconnect(); + const ::flwr::proto::Reconnect& reconnect() const; + PROTOBUF_MUST_USE_RESULT ::flwr::proto::Reconnect* release_reconnect(); + ::flwr::proto::Reconnect* mutable_reconnect(); + void set_allocated_reconnect(::flwr::proto::Reconnect* reconnect); + private: + const ::flwr::proto::Reconnect& _internal_reconnect() const; + ::flwr::proto::Reconnect* _internal_mutable_reconnect(); + public: + void unsafe_arena_set_allocated_reconnect( + ::flwr::proto::Reconnect* reconnect); + ::flwr::proto::Reconnect* unsafe_arena_release_reconnect(); + + // @@protoc_insertion_point(class_scope:flwr.proto.PushTaskResResponse) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + ::PROTOBUF_NAMESPACE_ID::internal::MapField< + PushTaskResResponse_ResultsEntry_DoNotUse, + std::string, ::PROTOBUF_NAMESPACE_ID::uint32, + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING, + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_UINT32> results_; + ::flwr::proto::Reconnect* reconnect_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flwr_2fproto_2ffleet_2eproto; +}; +// ------------------------------------------------------------------- + +class Run final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.Run) */ { + public: + inline Run() : Run(nullptr) {} + ~Run() override; + explicit constexpr Run(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + Run(const Run& from); + Run(Run&& from) noexcept + : Run() { + *this = ::std::move(from); + } + + inline Run& operator=(const Run& from) { + CopyFrom(from); + return *this; + } + inline Run& operator=(Run&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const Run& default_instance() { + return *internal_default_instance(); + } + static inline const Run* internal_default_instance() { + return reinterpret_cast( + &_Run_default_instance_); + } + static constexpr int kIndexInFileMessages = + 11; + + friend void swap(Run& a, Run& b) { + a.Swap(&b); + } + inline void Swap(Run* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(Run* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline Run* New() const final { + return new Run(); + } + + Run* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const Run& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const Run& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(Run* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.Run"; + } + protected: + explicit Run(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kFabIdFieldNumber = 2, + kFabVersionFieldNumber = 3, + kRunIdFieldNumber = 1, + }; + // string fab_id = 2; + void clear_fab_id(); + const std::string& fab_id() const; + template + void set_fab_id(ArgT0&& arg0, ArgT... args); + std::string* mutable_fab_id(); + PROTOBUF_MUST_USE_RESULT std::string* release_fab_id(); + void set_allocated_fab_id(std::string* fab_id); + private: + const std::string& _internal_fab_id() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_fab_id(const std::string& value); + std::string* _internal_mutable_fab_id(); + public: + + // string fab_version = 3; + void clear_fab_version(); + const std::string& fab_version() const; + template + void set_fab_version(ArgT0&& arg0, ArgT... args); + std::string* mutable_fab_version(); + PROTOBUF_MUST_USE_RESULT std::string* release_fab_version(); + void set_allocated_fab_version(std::string* fab_version); + private: + const std::string& _internal_fab_version() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_fab_version(const std::string& value); + std::string* _internal_mutable_fab_version(); + public: + + // sint64 run_id = 1; + void clear_run_id(); + ::PROTOBUF_NAMESPACE_ID::int64 run_id() const; + void set_run_id(::PROTOBUF_NAMESPACE_ID::int64 value); + private: + ::PROTOBUF_NAMESPACE_ID::int64 _internal_run_id() const; + void _internal_set_run_id(::PROTOBUF_NAMESPACE_ID::int64 value); + public: + + // @@protoc_insertion_point(class_scope:flwr.proto.Run) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr fab_id_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr fab_version_; + ::PROTOBUF_NAMESPACE_ID::int64 run_id_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flwr_2fproto_2ffleet_2eproto; +}; +// ------------------------------------------------------------------- + +class GetRunRequest final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.GetRunRequest) */ { + public: + inline GetRunRequest() : GetRunRequest(nullptr) {} + ~GetRunRequest() override; + explicit constexpr GetRunRequest(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + GetRunRequest(const GetRunRequest& from); + GetRunRequest(GetRunRequest&& from) noexcept + : GetRunRequest() { + *this = ::std::move(from); + } + + inline GetRunRequest& operator=(const GetRunRequest& from) { + CopyFrom(from); + return *this; + } + inline GetRunRequest& operator=(GetRunRequest&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const GetRunRequest& default_instance() { + return *internal_default_instance(); + } + static inline const GetRunRequest* internal_default_instance() { + return reinterpret_cast( + &_GetRunRequest_default_instance_); + } + static constexpr int kIndexInFileMessages = + 12; + + friend void swap(GetRunRequest& a, GetRunRequest& b) { + a.Swap(&b); + } + inline void Swap(GetRunRequest* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(GetRunRequest* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline GetRunRequest* New() const final { + return new GetRunRequest(); + } + + GetRunRequest* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const GetRunRequest& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const GetRunRequest& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(GetRunRequest* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.GetRunRequest"; + } + protected: + explicit GetRunRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kRunIdFieldNumber = 1, + }; + // sint64 run_id = 1; + void clear_run_id(); + ::PROTOBUF_NAMESPACE_ID::int64 run_id() const; + void set_run_id(::PROTOBUF_NAMESPACE_ID::int64 value); + private: + ::PROTOBUF_NAMESPACE_ID::int64 _internal_run_id() const; + void _internal_set_run_id(::PROTOBUF_NAMESPACE_ID::int64 value); + public: + + // @@protoc_insertion_point(class_scope:flwr.proto.GetRunRequest) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + ::PROTOBUF_NAMESPACE_ID::int64 run_id_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flwr_2fproto_2ffleet_2eproto; +}; +// ------------------------------------------------------------------- + +class GetRunResponse final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.GetRunResponse) */ { + public: + inline GetRunResponse() : GetRunResponse(nullptr) {} + ~GetRunResponse() override; + explicit constexpr GetRunResponse(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + GetRunResponse(const GetRunResponse& from); + GetRunResponse(GetRunResponse&& from) noexcept + : GetRunResponse() { + *this = ::std::move(from); + } + + inline GetRunResponse& operator=(const GetRunResponse& from) { + CopyFrom(from); + return *this; + } + inline GetRunResponse& operator=(GetRunResponse&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const GetRunResponse& default_instance() { + return *internal_default_instance(); + } + static inline const GetRunResponse* internal_default_instance() { + return reinterpret_cast( + &_GetRunResponse_default_instance_); + } + static constexpr int kIndexInFileMessages = + 13; + + friend void swap(GetRunResponse& a, GetRunResponse& b) { + a.Swap(&b); + } + inline void Swap(GetRunResponse* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(GetRunResponse* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline GetRunResponse* New() const final { + return new GetRunResponse(); + } + + GetRunResponse* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); } using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; - void CopyFrom(const PushTaskResResponse& from); + void CopyFrom(const GetRunResponse& from); using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; - void MergeFrom(const PushTaskResResponse& from); + void MergeFrom(const GetRunResponse& from); private: static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); public: @@ -1267,13 +2115,13 @@ class PushTaskResResponse final : void SharedCtor(); void SharedDtor(); void SetCachedSize(int size) const final; - void InternalSwap(PushTaskResResponse* other); + void InternalSwap(GetRunResponse* other); friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { - return "flwr.proto.PushTaskResResponse"; + return "flwr.proto.GetRunResponse"; } protected: - explicit PushTaskResResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, + explicit GetRunResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); private: static void ArenaDtor(void* object); @@ -1287,61 +2135,37 @@ class PushTaskResResponse final : // nested types ---------------------------------------------------- - // accessors ------------------------------------------------------- enum : int { - kResultsFieldNumber = 2, - kReconnectFieldNumber = 1, + kRunFieldNumber = 1, }; - // map results = 2; - int results_size() const; - private: - int _internal_results_size() const; - public: - void clear_results(); - private: - const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >& - _internal_results() const; - ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >* - _internal_mutable_results(); - public: - const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >& - results() const; - ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >* - mutable_results(); - - // .flwr.proto.Reconnect reconnect = 1; - bool has_reconnect() const; + // .flwr.proto.Run run = 1; + bool has_run() const; private: - bool _internal_has_reconnect() const; + bool _internal_has_run() const; public: - void clear_reconnect(); - const ::flwr::proto::Reconnect& reconnect() const; - PROTOBUF_MUST_USE_RESULT ::flwr::proto::Reconnect* release_reconnect(); - ::flwr::proto::Reconnect* mutable_reconnect(); - void set_allocated_reconnect(::flwr::proto::Reconnect* reconnect); + void clear_run(); + const ::flwr::proto::Run& run() const; + PROTOBUF_MUST_USE_RESULT ::flwr::proto::Run* release_run(); + ::flwr::proto::Run* mutable_run(); + void set_allocated_run(::flwr::proto::Run* run); private: - const ::flwr::proto::Reconnect& _internal_reconnect() const; - ::flwr::proto::Reconnect* _internal_mutable_reconnect(); + const ::flwr::proto::Run& _internal_run() const; + ::flwr::proto::Run* _internal_mutable_run(); public: - void unsafe_arena_set_allocated_reconnect( - ::flwr::proto::Reconnect* reconnect); - ::flwr::proto::Reconnect* unsafe_arena_release_reconnect(); + void unsafe_arena_set_allocated_run( + ::flwr::proto::Run* run); + ::flwr::proto::Run* unsafe_arena_release_run(); - // @@protoc_insertion_point(class_scope:flwr.proto.PushTaskResResponse) + // @@protoc_insertion_point(class_scope:flwr.proto.GetRunResponse) private: class _Internal; template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; typedef void InternalArenaConstructable_; typedef void DestructorSkippable_; - ::PROTOBUF_NAMESPACE_ID::internal::MapField< - PushTaskResResponse_ResultsEntry_DoNotUse, - std::string, ::PROTOBUF_NAMESPACE_ID::uint32, - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING, - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_UINT32> results_; - ::flwr::proto::Reconnect* reconnect_; + ::flwr::proto::Run* run_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; friend struct ::TableStruct_flwr_2fproto_2ffleet_2eproto; }; @@ -1395,7 +2219,7 @@ class Reconnect final : &_Reconnect_default_instance_); } static constexpr int kIndexInFileMessages = - 9; + 14; friend void swap(Reconnect& a, Reconnect& b) { a.Swap(&b); @@ -1499,6 +2323,26 @@ class Reconnect final : #endif // __GNUC__ // CreateNodeRequest +// double ping_interval = 1; +inline void CreateNodeRequest::clear_ping_interval() { + ping_interval_ = 0; +} +inline double CreateNodeRequest::_internal_ping_interval() const { + return ping_interval_; +} +inline double CreateNodeRequest::ping_interval() const { + // @@protoc_insertion_point(field_get:flwr.proto.CreateNodeRequest.ping_interval) + return _internal_ping_interval(); +} +inline void CreateNodeRequest::_internal_set_ping_interval(double value) { + + ping_interval_ = value; +} +inline void CreateNodeRequest::set_ping_interval(double value) { + _internal_set_ping_interval(value); + // @@protoc_insertion_point(field_set:flwr.proto.CreateNodeRequest.ping_interval) +} + // ------------------------------------------------------------------- // CreateNodeResponse @@ -1685,6 +2529,140 @@ inline void DeleteNodeRequest::set_allocated_node(::flwr::proto::Node* node) { // ------------------------------------------------------------------- +// PingRequest + +// .flwr.proto.Node node = 1; +inline bool PingRequest::_internal_has_node() const { + return this != internal_default_instance() && node_ != nullptr; +} +inline bool PingRequest::has_node() const { + return _internal_has_node(); +} +inline const ::flwr::proto::Node& PingRequest::_internal_node() const { + const ::flwr::proto::Node* p = node_; + return p != nullptr ? *p : reinterpret_cast( + ::flwr::proto::_Node_default_instance_); +} +inline const ::flwr::proto::Node& PingRequest::node() const { + // @@protoc_insertion_point(field_get:flwr.proto.PingRequest.node) + return _internal_node(); +} +inline void PingRequest::unsafe_arena_set_allocated_node( + ::flwr::proto::Node* node) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(node_); + } + node_ = node; + if (node) { + + } else { + + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.PingRequest.node) +} +inline ::flwr::proto::Node* PingRequest::release_node() { + + ::flwr::proto::Node* temp = node_; + node_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::flwr::proto::Node* PingRequest::unsafe_arena_release_node() { + // @@protoc_insertion_point(field_release:flwr.proto.PingRequest.node) + + ::flwr::proto::Node* temp = node_; + node_ = nullptr; + return temp; +} +inline ::flwr::proto::Node* PingRequest::_internal_mutable_node() { + + if (node_ == nullptr) { + auto* p = CreateMaybeMessage<::flwr::proto::Node>(GetArenaForAllocation()); + node_ = p; + } + return node_; +} +inline ::flwr::proto::Node* PingRequest::mutable_node() { + ::flwr::proto::Node* _msg = _internal_mutable_node(); + // @@protoc_insertion_point(field_mutable:flwr.proto.PingRequest.node) + return _msg; +} +inline void PingRequest::set_allocated_node(::flwr::proto::Node* node) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(node_); + } + if (node) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper< + ::PROTOBUF_NAMESPACE_ID::MessageLite>::GetOwningArena( + reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(node)); + if (message_arena != submessage_arena) { + node = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, node, submessage_arena); + } + + } else { + + } + node_ = node; + // @@protoc_insertion_point(field_set_allocated:flwr.proto.PingRequest.node) +} + +// double ping_interval = 2; +inline void PingRequest::clear_ping_interval() { + ping_interval_ = 0; +} +inline double PingRequest::_internal_ping_interval() const { + return ping_interval_; +} +inline double PingRequest::ping_interval() const { + // @@protoc_insertion_point(field_get:flwr.proto.PingRequest.ping_interval) + return _internal_ping_interval(); +} +inline void PingRequest::_internal_set_ping_interval(double value) { + + ping_interval_ = value; +} +inline void PingRequest::set_ping_interval(double value) { + _internal_set_ping_interval(value); + // @@protoc_insertion_point(field_set:flwr.proto.PingRequest.ping_interval) +} + +// ------------------------------------------------------------------- + +// PingResponse + +// bool success = 1; +inline void PingResponse::clear_success() { + success_ = false; +} +inline bool PingResponse::_internal_success() const { + return success_; +} +inline bool PingResponse::success() const { + // @@protoc_insertion_point(field_get:flwr.proto.PingResponse.success) + return _internal_success(); +} +inline void PingResponse::_internal_set_success(bool value) { + + success_ = value; +} +inline void PingResponse::set_success(bool value) { + _internal_set_success(value); + // @@protoc_insertion_point(field_set:flwr.proto.PingResponse.success) +} + +// ------------------------------------------------------------------- + // PullTaskInsRequest // .flwr.proto.Node node = 1; @@ -2147,6 +3125,240 @@ PushTaskResResponse::mutable_results() { // ------------------------------------------------------------------- +// Run + +// sint64 run_id = 1; +inline void Run::clear_run_id() { + run_id_ = int64_t{0}; +} +inline ::PROTOBUF_NAMESPACE_ID::int64 Run::_internal_run_id() const { + return run_id_; +} +inline ::PROTOBUF_NAMESPACE_ID::int64 Run::run_id() const { + // @@protoc_insertion_point(field_get:flwr.proto.Run.run_id) + return _internal_run_id(); +} +inline void Run::_internal_set_run_id(::PROTOBUF_NAMESPACE_ID::int64 value) { + + run_id_ = value; +} +inline void Run::set_run_id(::PROTOBUF_NAMESPACE_ID::int64 value) { + _internal_set_run_id(value); + // @@protoc_insertion_point(field_set:flwr.proto.Run.run_id) +} + +// string fab_id = 2; +inline void Run::clear_fab_id() { + fab_id_.ClearToEmpty(); +} +inline const std::string& Run::fab_id() const { + // @@protoc_insertion_point(field_get:flwr.proto.Run.fab_id) + return _internal_fab_id(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void Run::set_fab_id(ArgT0&& arg0, ArgT... args) { + + fab_id_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:flwr.proto.Run.fab_id) +} +inline std::string* Run::mutable_fab_id() { + std::string* _s = _internal_mutable_fab_id(); + // @@protoc_insertion_point(field_mutable:flwr.proto.Run.fab_id) + return _s; +} +inline const std::string& Run::_internal_fab_id() const { + return fab_id_.Get(); +} +inline void Run::_internal_set_fab_id(const std::string& value) { + + fab_id_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); +} +inline std::string* Run::_internal_mutable_fab_id() { + + return fab_id_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); +} +inline std::string* Run::release_fab_id() { + // @@protoc_insertion_point(field_release:flwr.proto.Run.fab_id) + return fab_id_.Release(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); +} +inline void Run::set_allocated_fab_id(std::string* fab_id) { + if (fab_id != nullptr) { + + } else { + + } + fab_id_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), fab_id, + GetArenaForAllocation()); + // @@protoc_insertion_point(field_set_allocated:flwr.proto.Run.fab_id) +} + +// string fab_version = 3; +inline void Run::clear_fab_version() { + fab_version_.ClearToEmpty(); +} +inline const std::string& Run::fab_version() const { + // @@protoc_insertion_point(field_get:flwr.proto.Run.fab_version) + return _internal_fab_version(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void Run::set_fab_version(ArgT0&& arg0, ArgT... args) { + + fab_version_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:flwr.proto.Run.fab_version) +} +inline std::string* Run::mutable_fab_version() { + std::string* _s = _internal_mutable_fab_version(); + // @@protoc_insertion_point(field_mutable:flwr.proto.Run.fab_version) + return _s; +} +inline const std::string& Run::_internal_fab_version() const { + return fab_version_.Get(); +} +inline void Run::_internal_set_fab_version(const std::string& value) { + + fab_version_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); +} +inline std::string* Run::_internal_mutable_fab_version() { + + return fab_version_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); +} +inline std::string* Run::release_fab_version() { + // @@protoc_insertion_point(field_release:flwr.proto.Run.fab_version) + return fab_version_.Release(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); +} +inline void Run::set_allocated_fab_version(std::string* fab_version) { + if (fab_version != nullptr) { + + } else { + + } + fab_version_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), fab_version, + GetArenaForAllocation()); + // @@protoc_insertion_point(field_set_allocated:flwr.proto.Run.fab_version) +} + +// ------------------------------------------------------------------- + +// GetRunRequest + +// sint64 run_id = 1; +inline void GetRunRequest::clear_run_id() { + run_id_ = int64_t{0}; +} +inline ::PROTOBUF_NAMESPACE_ID::int64 GetRunRequest::_internal_run_id() const { + return run_id_; +} +inline ::PROTOBUF_NAMESPACE_ID::int64 GetRunRequest::run_id() const { + // @@protoc_insertion_point(field_get:flwr.proto.GetRunRequest.run_id) + return _internal_run_id(); +} +inline void GetRunRequest::_internal_set_run_id(::PROTOBUF_NAMESPACE_ID::int64 value) { + + run_id_ = value; +} +inline void GetRunRequest::set_run_id(::PROTOBUF_NAMESPACE_ID::int64 value) { + _internal_set_run_id(value); + // @@protoc_insertion_point(field_set:flwr.proto.GetRunRequest.run_id) +} + +// ------------------------------------------------------------------- + +// GetRunResponse + +// .flwr.proto.Run run = 1; +inline bool GetRunResponse::_internal_has_run() const { + return this != internal_default_instance() && run_ != nullptr; +} +inline bool GetRunResponse::has_run() const { + return _internal_has_run(); +} +inline void GetRunResponse::clear_run() { + if (GetArenaForAllocation() == nullptr && run_ != nullptr) { + delete run_; + } + run_ = nullptr; +} +inline const ::flwr::proto::Run& GetRunResponse::_internal_run() const { + const ::flwr::proto::Run* p = run_; + return p != nullptr ? *p : reinterpret_cast( + ::flwr::proto::_Run_default_instance_); +} +inline const ::flwr::proto::Run& GetRunResponse::run() const { + // @@protoc_insertion_point(field_get:flwr.proto.GetRunResponse.run) + return _internal_run(); +} +inline void GetRunResponse::unsafe_arena_set_allocated_run( + ::flwr::proto::Run* run) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(run_); + } + run_ = run; + if (run) { + + } else { + + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.GetRunResponse.run) +} +inline ::flwr::proto::Run* GetRunResponse::release_run() { + + ::flwr::proto::Run* temp = run_; + run_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::flwr::proto::Run* GetRunResponse::unsafe_arena_release_run() { + // @@protoc_insertion_point(field_release:flwr.proto.GetRunResponse.run) + + ::flwr::proto::Run* temp = run_; + run_ = nullptr; + return temp; +} +inline ::flwr::proto::Run* GetRunResponse::_internal_mutable_run() { + + if (run_ == nullptr) { + auto* p = CreateMaybeMessage<::flwr::proto::Run>(GetArenaForAllocation()); + run_ = p; + } + return run_; +} +inline ::flwr::proto::Run* GetRunResponse::mutable_run() { + ::flwr::proto::Run* _msg = _internal_mutable_run(); + // @@protoc_insertion_point(field_mutable:flwr.proto.GetRunResponse.run) + return _msg; +} +inline void GetRunResponse::set_allocated_run(::flwr::proto::Run* run) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete run_; + } + if (run) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::Run>::GetOwningArena(run); + if (message_arena != submessage_arena) { + run = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, run, submessage_arena); + } + + } else { + + } + run_ = run; + // @@protoc_insertion_point(field_set_allocated:flwr.proto.GetRunResponse.run) +} + +// ------------------------------------------------------------------- + // Reconnect // uint64 reconnect = 1; @@ -2190,6 +3402,16 @@ inline void Reconnect::set_reconnect(::PROTOBUF_NAMESPACE_ID::uint64 value) { // ------------------------------------------------------------------- +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + // @@protoc_insertion_point(namespace_scope) diff --git a/src/cc/flwr/include/flwr/proto/recordset.grpc.pb.cc b/src/cc/flwr/include/flwr/proto/recordset.grpc.pb.cc new file mode 100644 index 000000000000..4fb909308dc2 --- /dev/null +++ b/src/cc/flwr/include/flwr/proto/recordset.grpc.pb.cc @@ -0,0 +1,27 @@ +// Generated by the gRPC C++ plugin. +// If you make any local change, they will be lost. +// source: flwr/proto/recordset.proto + +#include "flwr/proto/recordset.pb.h" +#include "flwr/proto/recordset.grpc.pb.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +namespace flwr { +namespace proto { + +} // namespace flwr +} // namespace proto + diff --git a/src/cc/flwr/include/flwr/proto/recordset.grpc.pb.h b/src/cc/flwr/include/flwr/proto/recordset.grpc.pb.h new file mode 100644 index 000000000000..0aeae1ab16a6 --- /dev/null +++ b/src/cc/flwr/include/flwr/proto/recordset.grpc.pb.h @@ -0,0 +1,51 @@ +// Generated by the gRPC C++ plugin. +// If you make any local change, they will be lost. +// source: flwr/proto/recordset.proto +// Original file comments: +// Copyright 2024 Flower Labs GmbH. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +#ifndef GRPC_flwr_2fproto_2frecordset_2eproto__INCLUDED +#define GRPC_flwr_2fproto_2frecordset_2eproto__INCLUDED + +#include "flwr/proto/recordset.pb.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace flwr { +namespace proto { + +} // namespace proto +} // namespace flwr + + +#endif // GRPC_flwr_2fproto_2frecordset_2eproto__INCLUDED diff --git a/src/cc/flwr/include/flwr/proto/recordset.pb.cc b/src/cc/flwr/include/flwr/proto/recordset.pb.cc new file mode 100644 index 000000000000..a7cf72084d7a --- /dev/null +++ b/src/cc/flwr/include/flwr/proto/recordset.pb.cc @@ -0,0 +1,3907 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: flwr/proto/recordset.proto + +#include "flwr/proto/recordset.pb.h" + +#include + +#include +#include +#include +#include +#include +#include +#include +// @@protoc_insertion_point(includes) +#include + +PROTOBUF_PRAGMA_INIT_SEG +namespace flwr { +namespace proto { +constexpr DoubleList::DoubleList( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : vals_(){} +struct DoubleListDefaultTypeInternal { + constexpr DoubleListDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~DoubleListDefaultTypeInternal() {} + union { + DoubleList _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT DoubleListDefaultTypeInternal _DoubleList_default_instance_; +constexpr Sint64List::Sint64List( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : vals_() + , _vals_cached_byte_size_(0){} +struct Sint64ListDefaultTypeInternal { + constexpr Sint64ListDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~Sint64ListDefaultTypeInternal() {} + union { + Sint64List _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT Sint64ListDefaultTypeInternal _Sint64List_default_instance_; +constexpr BoolList::BoolList( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : vals_(){} +struct BoolListDefaultTypeInternal { + constexpr BoolListDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~BoolListDefaultTypeInternal() {} + union { + BoolList _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT BoolListDefaultTypeInternal _BoolList_default_instance_; +constexpr StringList::StringList( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : vals_(){} +struct StringListDefaultTypeInternal { + constexpr StringListDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~StringListDefaultTypeInternal() {} + union { + StringList _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT StringListDefaultTypeInternal _StringList_default_instance_; +constexpr BytesList::BytesList( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : vals_(){} +struct BytesListDefaultTypeInternal { + constexpr BytesListDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~BytesListDefaultTypeInternal() {} + union { + BytesList _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT BytesListDefaultTypeInternal _BytesList_default_instance_; +constexpr Array::Array( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : shape_() + , _shape_cached_byte_size_(0) + , dtype_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string) + , stype_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string) + , data_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string){} +struct ArrayDefaultTypeInternal { + constexpr ArrayDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~ArrayDefaultTypeInternal() {} + union { + Array _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT ArrayDefaultTypeInternal _Array_default_instance_; +constexpr MetricsRecordValue::MetricsRecordValue( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : _oneof_case_{}{} +struct MetricsRecordValueDefaultTypeInternal { + constexpr MetricsRecordValueDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~MetricsRecordValueDefaultTypeInternal() {} + union { + MetricsRecordValue _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT MetricsRecordValueDefaultTypeInternal _MetricsRecordValue_default_instance_; +constexpr ConfigsRecordValue::ConfigsRecordValue( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : _oneof_case_{}{} +struct ConfigsRecordValueDefaultTypeInternal { + constexpr ConfigsRecordValueDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~ConfigsRecordValueDefaultTypeInternal() {} + union { + ConfigsRecordValue _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT ConfigsRecordValueDefaultTypeInternal _ConfigsRecordValue_default_instance_; +constexpr ParametersRecord::ParametersRecord( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : data_keys_() + , data_values_(){} +struct ParametersRecordDefaultTypeInternal { + constexpr ParametersRecordDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~ParametersRecordDefaultTypeInternal() {} + union { + ParametersRecord _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT ParametersRecordDefaultTypeInternal _ParametersRecord_default_instance_; +constexpr MetricsRecord_DataEntry_DoNotUse::MetricsRecord_DataEntry_DoNotUse( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized){} +struct MetricsRecord_DataEntry_DoNotUseDefaultTypeInternal { + constexpr MetricsRecord_DataEntry_DoNotUseDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~MetricsRecord_DataEntry_DoNotUseDefaultTypeInternal() {} + union { + MetricsRecord_DataEntry_DoNotUse _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT MetricsRecord_DataEntry_DoNotUseDefaultTypeInternal _MetricsRecord_DataEntry_DoNotUse_default_instance_; +constexpr MetricsRecord::MetricsRecord( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : data_(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}){} +struct MetricsRecordDefaultTypeInternal { + constexpr MetricsRecordDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~MetricsRecordDefaultTypeInternal() {} + union { + MetricsRecord _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT MetricsRecordDefaultTypeInternal _MetricsRecord_default_instance_; +constexpr ConfigsRecord_DataEntry_DoNotUse::ConfigsRecord_DataEntry_DoNotUse( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized){} +struct ConfigsRecord_DataEntry_DoNotUseDefaultTypeInternal { + constexpr ConfigsRecord_DataEntry_DoNotUseDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~ConfigsRecord_DataEntry_DoNotUseDefaultTypeInternal() {} + union { + ConfigsRecord_DataEntry_DoNotUse _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT ConfigsRecord_DataEntry_DoNotUseDefaultTypeInternal _ConfigsRecord_DataEntry_DoNotUse_default_instance_; +constexpr ConfigsRecord::ConfigsRecord( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : data_(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}){} +struct ConfigsRecordDefaultTypeInternal { + constexpr ConfigsRecordDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~ConfigsRecordDefaultTypeInternal() {} + union { + ConfigsRecord _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT ConfigsRecordDefaultTypeInternal _ConfigsRecord_default_instance_; +constexpr RecordSet_ParametersEntry_DoNotUse::RecordSet_ParametersEntry_DoNotUse( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized){} +struct RecordSet_ParametersEntry_DoNotUseDefaultTypeInternal { + constexpr RecordSet_ParametersEntry_DoNotUseDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~RecordSet_ParametersEntry_DoNotUseDefaultTypeInternal() {} + union { + RecordSet_ParametersEntry_DoNotUse _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT RecordSet_ParametersEntry_DoNotUseDefaultTypeInternal _RecordSet_ParametersEntry_DoNotUse_default_instance_; +constexpr RecordSet_MetricsEntry_DoNotUse::RecordSet_MetricsEntry_DoNotUse( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized){} +struct RecordSet_MetricsEntry_DoNotUseDefaultTypeInternal { + constexpr RecordSet_MetricsEntry_DoNotUseDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~RecordSet_MetricsEntry_DoNotUseDefaultTypeInternal() {} + union { + RecordSet_MetricsEntry_DoNotUse _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT RecordSet_MetricsEntry_DoNotUseDefaultTypeInternal _RecordSet_MetricsEntry_DoNotUse_default_instance_; +constexpr RecordSet_ConfigsEntry_DoNotUse::RecordSet_ConfigsEntry_DoNotUse( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized){} +struct RecordSet_ConfigsEntry_DoNotUseDefaultTypeInternal { + constexpr RecordSet_ConfigsEntry_DoNotUseDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~RecordSet_ConfigsEntry_DoNotUseDefaultTypeInternal() {} + union { + RecordSet_ConfigsEntry_DoNotUse _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT RecordSet_ConfigsEntry_DoNotUseDefaultTypeInternal _RecordSet_ConfigsEntry_DoNotUse_default_instance_; +constexpr RecordSet::RecordSet( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : parameters_(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) + , metrics_(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) + , configs_(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}){} +struct RecordSetDefaultTypeInternal { + constexpr RecordSetDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~RecordSetDefaultTypeInternal() {} + union { + RecordSet _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT RecordSetDefaultTypeInternal _RecordSet_default_instance_; +} // namespace proto +} // namespace flwr +static ::PROTOBUF_NAMESPACE_ID::Metadata file_level_metadata_flwr_2fproto_2frecordset_2eproto[17]; +static constexpr ::PROTOBUF_NAMESPACE_ID::EnumDescriptor const** file_level_enum_descriptors_flwr_2fproto_2frecordset_2eproto = nullptr; +static constexpr ::PROTOBUF_NAMESPACE_ID::ServiceDescriptor const** file_level_service_descriptors_flwr_2fproto_2frecordset_2eproto = nullptr; + +const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_flwr_2fproto_2frecordset_2eproto::offsets[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::DoubleList, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::DoubleList, vals_), + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::Sint64List, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::Sint64List, vals_), + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::BoolList, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::BoolList, vals_), + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::StringList, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::StringList, vals_), + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::BytesList, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::BytesList, vals_), + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::Array, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::Array, dtype_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::Array, shape_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::Array, stype_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::Array, data_), + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::MetricsRecordValue, _internal_metadata_), + ~0u, // no _extensions_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::MetricsRecordValue, _oneof_case_[0]), + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, + ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, + ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, + ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, + PROTOBUF_FIELD_OFFSET(::flwr::proto::MetricsRecordValue, value_), + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::ConfigsRecordValue, _internal_metadata_), + ~0u, // no _extensions_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::ConfigsRecordValue, _oneof_case_[0]), + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, + ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, + ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, + ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, + ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, + ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, + ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, + ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, + ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, + ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, + PROTOBUF_FIELD_OFFSET(::flwr::proto::ConfigsRecordValue, value_), + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::ParametersRecord, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::ParametersRecord, data_keys_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::ParametersRecord, data_values_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::MetricsRecord_DataEntry_DoNotUse, _has_bits_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::MetricsRecord_DataEntry_DoNotUse, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::MetricsRecord_DataEntry_DoNotUse, key_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::MetricsRecord_DataEntry_DoNotUse, value_), + 0, + 1, + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::MetricsRecord, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::MetricsRecord, data_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::ConfigsRecord_DataEntry_DoNotUse, _has_bits_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::ConfigsRecord_DataEntry_DoNotUse, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::ConfigsRecord_DataEntry_DoNotUse, key_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::ConfigsRecord_DataEntry_DoNotUse, value_), + 0, + 1, + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::ConfigsRecord, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::ConfigsRecord, data_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::RecordSet_ParametersEntry_DoNotUse, _has_bits_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::RecordSet_ParametersEntry_DoNotUse, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::RecordSet_ParametersEntry_DoNotUse, key_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::RecordSet_ParametersEntry_DoNotUse, value_), + 0, + 1, + PROTOBUF_FIELD_OFFSET(::flwr::proto::RecordSet_MetricsEntry_DoNotUse, _has_bits_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::RecordSet_MetricsEntry_DoNotUse, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::RecordSet_MetricsEntry_DoNotUse, key_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::RecordSet_MetricsEntry_DoNotUse, value_), + 0, + 1, + PROTOBUF_FIELD_OFFSET(::flwr::proto::RecordSet_ConfigsEntry_DoNotUse, _has_bits_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::RecordSet_ConfigsEntry_DoNotUse, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::RecordSet_ConfigsEntry_DoNotUse, key_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::RecordSet_ConfigsEntry_DoNotUse, value_), + 0, + 1, + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::RecordSet, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::RecordSet, parameters_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::RecordSet, metrics_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::RecordSet, configs_), +}; +static const ::PROTOBUF_NAMESPACE_ID::internal::MigrationSchema schemas[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { + { 0, -1, -1, sizeof(::flwr::proto::DoubleList)}, + { 7, -1, -1, sizeof(::flwr::proto::Sint64List)}, + { 14, -1, -1, sizeof(::flwr::proto::BoolList)}, + { 21, -1, -1, sizeof(::flwr::proto::StringList)}, + { 28, -1, -1, sizeof(::flwr::proto::BytesList)}, + { 35, -1, -1, sizeof(::flwr::proto::Array)}, + { 45, -1, -1, sizeof(::flwr::proto::MetricsRecordValue)}, + { 56, -1, -1, sizeof(::flwr::proto::ConfigsRecordValue)}, + { 73, -1, -1, sizeof(::flwr::proto::ParametersRecord)}, + { 81, 89, -1, sizeof(::flwr::proto::MetricsRecord_DataEntry_DoNotUse)}, + { 91, -1, -1, sizeof(::flwr::proto::MetricsRecord)}, + { 98, 106, -1, sizeof(::flwr::proto::ConfigsRecord_DataEntry_DoNotUse)}, + { 108, -1, -1, sizeof(::flwr::proto::ConfigsRecord)}, + { 115, 123, -1, sizeof(::flwr::proto::RecordSet_ParametersEntry_DoNotUse)}, + { 125, 133, -1, sizeof(::flwr::proto::RecordSet_MetricsEntry_DoNotUse)}, + { 135, 143, -1, sizeof(::flwr::proto::RecordSet_ConfigsEntry_DoNotUse)}, + { 145, -1, -1, sizeof(::flwr::proto::RecordSet)}, +}; + +static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] = { + reinterpret_cast(&::flwr::proto::_DoubleList_default_instance_), + reinterpret_cast(&::flwr::proto::_Sint64List_default_instance_), + reinterpret_cast(&::flwr::proto::_BoolList_default_instance_), + reinterpret_cast(&::flwr::proto::_StringList_default_instance_), + reinterpret_cast(&::flwr::proto::_BytesList_default_instance_), + reinterpret_cast(&::flwr::proto::_Array_default_instance_), + reinterpret_cast(&::flwr::proto::_MetricsRecordValue_default_instance_), + reinterpret_cast(&::flwr::proto::_ConfigsRecordValue_default_instance_), + reinterpret_cast(&::flwr::proto::_ParametersRecord_default_instance_), + reinterpret_cast(&::flwr::proto::_MetricsRecord_DataEntry_DoNotUse_default_instance_), + reinterpret_cast(&::flwr::proto::_MetricsRecord_default_instance_), + reinterpret_cast(&::flwr::proto::_ConfigsRecord_DataEntry_DoNotUse_default_instance_), + reinterpret_cast(&::flwr::proto::_ConfigsRecord_default_instance_), + reinterpret_cast(&::flwr::proto::_RecordSet_ParametersEntry_DoNotUse_default_instance_), + reinterpret_cast(&::flwr::proto::_RecordSet_MetricsEntry_DoNotUse_default_instance_), + reinterpret_cast(&::flwr::proto::_RecordSet_ConfigsEntry_DoNotUse_default_instance_), + reinterpret_cast(&::flwr::proto::_RecordSet_default_instance_), +}; + +const char descriptor_table_protodef_flwr_2fproto_2frecordset_2eproto[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = + "\n\032flwr/proto/recordset.proto\022\nflwr.proto" + "\"\032\n\nDoubleList\022\014\n\004vals\030\001 \003(\001\"\032\n\nSint64Li" + "st\022\014\n\004vals\030\001 \003(\022\"\030\n\010BoolList\022\014\n\004vals\030\001 \003" + "(\010\"\032\n\nStringList\022\014\n\004vals\030\001 \003(\t\"\031\n\tBytesL" + "ist\022\014\n\004vals\030\001 \003(\014\"B\n\005Array\022\r\n\005dtype\030\001 \001(" + "\t\022\r\n\005shape\030\002 \003(\005\022\r\n\005stype\030\003 \001(\t\022\014\n\004data\030" + "\004 \001(\014\"\237\001\n\022MetricsRecordValue\022\020\n\006double\030\001" + " \001(\001H\000\022\020\n\006sint64\030\002 \001(\022H\000\022-\n\013double_list\030" + "\025 \001(\0132\026.flwr.proto.DoubleListH\000\022-\n\013sint6" + "4_list\030\026 \001(\0132\026.flwr.proto.Sint64ListH\000B\007" + "\n\005value\"\331\002\n\022ConfigsRecordValue\022\020\n\006double" + "\030\001 \001(\001H\000\022\020\n\006sint64\030\002 \001(\022H\000\022\016\n\004bool\030\003 \001(\010" + "H\000\022\020\n\006string\030\004 \001(\tH\000\022\017\n\005bytes\030\005 \001(\014H\000\022-\n" + "\013double_list\030\025 \001(\0132\026.flwr.proto.DoubleLi" + "stH\000\022-\n\013sint64_list\030\026 \001(\0132\026.flwr.proto.S" + "int64ListH\000\022)\n\tbool_list\030\027 \001(\0132\024.flwr.pr" + "oto.BoolListH\000\022-\n\013string_list\030\030 \001(\0132\026.fl" + "wr.proto.StringListH\000\022+\n\nbytes_list\030\031 \001(" + "\0132\025.flwr.proto.BytesListH\000B\007\n\005value\"M\n\020P" + "arametersRecord\022\021\n\tdata_keys\030\001 \003(\t\022&\n\013da" + "ta_values\030\002 \003(\0132\021.flwr.proto.Array\"\217\001\n\rM" + "etricsRecord\0221\n\004data\030\001 \003(\0132#.flwr.proto." + "MetricsRecord.DataEntry\032K\n\tDataEntry\022\013\n\003" + "key\030\001 \001(\t\022-\n\005value\030\002 \001(\0132\036.flwr.proto.Me" + "tricsRecordValue:\0028\001\"\217\001\n\rConfigsRecord\0221" + "\n\004data\030\001 \003(\0132#.flwr.proto.ConfigsRecord." + "DataEntry\032K\n\tDataEntry\022\013\n\003key\030\001 \001(\t\022-\n\005v" + "alue\030\002 \001(\0132\036.flwr.proto.ConfigsRecordVal" + "ue:\0028\001\"\227\003\n\tRecordSet\0229\n\nparameters\030\001 \003(\013" + "2%.flwr.proto.RecordSet.ParametersEntry\022" + "3\n\007metrics\030\002 \003(\0132\".flwr.proto.RecordSet." + "MetricsEntry\0223\n\007configs\030\003 \003(\0132\".flwr.pro" + "to.RecordSet.ConfigsEntry\032O\n\017ParametersE" + "ntry\022\013\n\003key\030\001 \001(\t\022+\n\005value\030\002 \001(\0132\034.flwr." + "proto.ParametersRecord:\0028\001\032I\n\014MetricsEnt" + "ry\022\013\n\003key\030\001 \001(\t\022(\n\005value\030\002 \001(\0132\031.flwr.pr" + "oto.MetricsRecord:\0028\001\032I\n\014ConfigsEntry\022\013\n" + "\003key\030\001 \001(\t\022(\n\005value\030\002 \001(\0132\031.flwr.proto.C" + "onfigsRecord:\0028\001b\006proto3" + ; +static ::PROTOBUF_NAMESPACE_ID::internal::once_flag descriptor_table_flwr_2fproto_2frecordset_2eproto_once; +const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_flwr_2fproto_2frecordset_2eproto = { + false, false, 1544, descriptor_table_protodef_flwr_2fproto_2frecordset_2eproto, "flwr/proto/recordset.proto", + &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, nullptr, 0, 17, + schemas, file_default_instances, TableStruct_flwr_2fproto_2frecordset_2eproto::offsets, + file_level_metadata_flwr_2fproto_2frecordset_2eproto, file_level_enum_descriptors_flwr_2fproto_2frecordset_2eproto, file_level_service_descriptors_flwr_2fproto_2frecordset_2eproto, +}; +PROTOBUF_ATTRIBUTE_WEAK const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable* descriptor_table_flwr_2fproto_2frecordset_2eproto_getter() { + return &descriptor_table_flwr_2fproto_2frecordset_2eproto; +} + +// Force running AddDescriptors() at dynamic initialization time. +PROTOBUF_ATTRIBUTE_INIT_PRIORITY static ::PROTOBUF_NAMESPACE_ID::internal::AddDescriptorsRunner dynamic_init_dummy_flwr_2fproto_2frecordset_2eproto(&descriptor_table_flwr_2fproto_2frecordset_2eproto); +namespace flwr { +namespace proto { + +// =================================================================== + +class DoubleList::_Internal { + public: +}; + +DoubleList::DoubleList(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), + vals_(arena) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.DoubleList) +} +DoubleList::DoubleList(const DoubleList& from) + : ::PROTOBUF_NAMESPACE_ID::Message(), + vals_(from.vals_) { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + // @@protoc_insertion_point(copy_constructor:flwr.proto.DoubleList) +} + +void DoubleList::SharedCtor() { +} + +DoubleList::~DoubleList() { + // @@protoc_insertion_point(destructor:flwr.proto.DoubleList) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void DoubleList::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); +} + +void DoubleList::ArenaDtor(void* object) { + DoubleList* _this = reinterpret_cast< DoubleList* >(object); + (void)_this; +} +void DoubleList::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +} +void DoubleList::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void DoubleList::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.DoubleList) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + vals_.Clear(); + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* DoubleList::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // repeated double vals = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { + ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedDoubleParser(_internal_mutable_vals(), ptr, ctx); + CHK_(ptr); + } else if (static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 9) { + _internal_add_vals(::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad(ptr)); + ptr += sizeof(double); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* DoubleList::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.DoubleList) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // repeated double vals = 1; + if (this->_internal_vals_size() > 0) { + target = stream->WriteFixedPacked(1, _internal_vals(), target); + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.DoubleList) + return target; +} + +size_t DoubleList::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.DoubleList) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // repeated double vals = 1; + { + unsigned int count = static_cast(this->_internal_vals_size()); + size_t data_size = 8UL * count; + if (data_size > 0) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size( + static_cast<::PROTOBUF_NAMESPACE_ID::int32>(data_size)); + } + total_size += data_size; + } + + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData DoubleList::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + DoubleList::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*DoubleList::GetClassData() const { return &_class_data_; } + +void DoubleList::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void DoubleList::MergeFrom(const DoubleList& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.DoubleList) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + vals_.MergeFrom(from.vals_); + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void DoubleList::CopyFrom(const DoubleList& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.DoubleList) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool DoubleList::IsInitialized() const { + return true; +} + +void DoubleList::InternalSwap(DoubleList* other) { + using std::swap; + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + vals_.InternalSwap(&other->vals_); +} + +::PROTOBUF_NAMESPACE_ID::Metadata DoubleList::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2frecordset_2eproto_getter, &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, + file_level_metadata_flwr_2fproto_2frecordset_2eproto[0]); +} + +// =================================================================== + +class Sint64List::_Internal { + public: +}; + +Sint64List::Sint64List(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), + vals_(arena) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.Sint64List) +} +Sint64List::Sint64List(const Sint64List& from) + : ::PROTOBUF_NAMESPACE_ID::Message(), + vals_(from.vals_) { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + // @@protoc_insertion_point(copy_constructor:flwr.proto.Sint64List) +} + +void Sint64List::SharedCtor() { +} + +Sint64List::~Sint64List() { + // @@protoc_insertion_point(destructor:flwr.proto.Sint64List) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void Sint64List::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); +} + +void Sint64List::ArenaDtor(void* object) { + Sint64List* _this = reinterpret_cast< Sint64List* >(object); + (void)_this; +} +void Sint64List::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +} +void Sint64List::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void Sint64List::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.Sint64List) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + vals_.Clear(); + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* Sint64List::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // repeated sint64 vals = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { + ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedSInt64Parser(_internal_mutable_vals(), ptr, ctx); + CHK_(ptr); + } else if (static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 8) { + _internal_add_vals(::PROTOBUF_NAMESPACE_ID::internal::ReadVarintZigZag64(&ptr)); + CHK_(ptr); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* Sint64List::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.Sint64List) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // repeated sint64 vals = 1; + { + int byte_size = _vals_cached_byte_size_.load(std::memory_order_relaxed); + if (byte_size > 0) { + target = stream->WriteSInt64Packed( + 1, _internal_vals(), byte_size, target); + } + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.Sint64List) + return target; +} + +size_t Sint64List::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.Sint64List) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // repeated sint64 vals = 1; + { + size_t data_size = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + SInt64Size(this->vals_); + if (data_size > 0) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size( + static_cast<::PROTOBUF_NAMESPACE_ID::int32>(data_size)); + } + int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(data_size); + _vals_cached_byte_size_.store(cached_size, + std::memory_order_relaxed); + total_size += data_size; + } + + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData Sint64List::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + Sint64List::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*Sint64List::GetClassData() const { return &_class_data_; } + +void Sint64List::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void Sint64List::MergeFrom(const Sint64List& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.Sint64List) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + vals_.MergeFrom(from.vals_); + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void Sint64List::CopyFrom(const Sint64List& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.Sint64List) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool Sint64List::IsInitialized() const { + return true; +} + +void Sint64List::InternalSwap(Sint64List* other) { + using std::swap; + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + vals_.InternalSwap(&other->vals_); +} + +::PROTOBUF_NAMESPACE_ID::Metadata Sint64List::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2frecordset_2eproto_getter, &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, + file_level_metadata_flwr_2fproto_2frecordset_2eproto[1]); +} + +// =================================================================== + +class BoolList::_Internal { + public: +}; + +BoolList::BoolList(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), + vals_(arena) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.BoolList) +} +BoolList::BoolList(const BoolList& from) + : ::PROTOBUF_NAMESPACE_ID::Message(), + vals_(from.vals_) { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + // @@protoc_insertion_point(copy_constructor:flwr.proto.BoolList) +} + +void BoolList::SharedCtor() { +} + +BoolList::~BoolList() { + // @@protoc_insertion_point(destructor:flwr.proto.BoolList) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void BoolList::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); +} + +void BoolList::ArenaDtor(void* object) { + BoolList* _this = reinterpret_cast< BoolList* >(object); + (void)_this; +} +void BoolList::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +} +void BoolList::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void BoolList::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.BoolList) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + vals_.Clear(); + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* BoolList::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // repeated bool vals = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { + ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedBoolParser(_internal_mutable_vals(), ptr, ctx); + CHK_(ptr); + } else if (static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 8) { + _internal_add_vals(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr)); + CHK_(ptr); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* BoolList::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.BoolList) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // repeated bool vals = 1; + if (this->_internal_vals_size() > 0) { + target = stream->WriteFixedPacked(1, _internal_vals(), target); + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.BoolList) + return target; +} + +size_t BoolList::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.BoolList) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // repeated bool vals = 1; + { + unsigned int count = static_cast(this->_internal_vals_size()); + size_t data_size = 1UL * count; + if (data_size > 0) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size( + static_cast<::PROTOBUF_NAMESPACE_ID::int32>(data_size)); + } + total_size += data_size; + } + + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData BoolList::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + BoolList::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*BoolList::GetClassData() const { return &_class_data_; } + +void BoolList::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void BoolList::MergeFrom(const BoolList& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.BoolList) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + vals_.MergeFrom(from.vals_); + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void BoolList::CopyFrom(const BoolList& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.BoolList) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool BoolList::IsInitialized() const { + return true; +} + +void BoolList::InternalSwap(BoolList* other) { + using std::swap; + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + vals_.InternalSwap(&other->vals_); +} + +::PROTOBUF_NAMESPACE_ID::Metadata BoolList::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2frecordset_2eproto_getter, &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, + file_level_metadata_flwr_2fproto_2frecordset_2eproto[2]); +} + +// =================================================================== + +class StringList::_Internal { + public: +}; + +StringList::StringList(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), + vals_(arena) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.StringList) +} +StringList::StringList(const StringList& from) + : ::PROTOBUF_NAMESPACE_ID::Message(), + vals_(from.vals_) { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + // @@protoc_insertion_point(copy_constructor:flwr.proto.StringList) +} + +void StringList::SharedCtor() { +} + +StringList::~StringList() { + // @@protoc_insertion_point(destructor:flwr.proto.StringList) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void StringList::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); +} + +void StringList::ArenaDtor(void* object) { + StringList* _this = reinterpret_cast< StringList* >(object); + (void)_this; +} +void StringList::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +} +void StringList::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void StringList::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.StringList) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + vals_.Clear(); + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* StringList::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // repeated string vals = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { + ptr -= 1; + do { + ptr += 1; + auto str = _internal_add_vals(); + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); + CHK_(::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "flwr.proto.StringList.vals")); + CHK_(ptr); + if (!ctx->DataAvailable(ptr)) break; + } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr)); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* StringList::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.StringList) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // repeated string vals = 1; + for (int i = 0, n = this->_internal_vals_size(); i < n; i++) { + const auto& s = this->_internal_vals(i); + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + s.data(), static_cast(s.length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "flwr.proto.StringList.vals"); + target = stream->WriteString(1, s, target); + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.StringList) + return target; +} + +size_t StringList::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.StringList) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // repeated string vals = 1; + total_size += 1 * + ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(vals_.size()); + for (int i = 0, n = vals_.size(); i < n; i++) { + total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( + vals_.Get(i)); + } + + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData StringList::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + StringList::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*StringList::GetClassData() const { return &_class_data_; } + +void StringList::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void StringList::MergeFrom(const StringList& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.StringList) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + vals_.MergeFrom(from.vals_); + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void StringList::CopyFrom(const StringList& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.StringList) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool StringList::IsInitialized() const { + return true; +} + +void StringList::InternalSwap(StringList* other) { + using std::swap; + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + vals_.InternalSwap(&other->vals_); +} + +::PROTOBUF_NAMESPACE_ID::Metadata StringList::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2frecordset_2eproto_getter, &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, + file_level_metadata_flwr_2fproto_2frecordset_2eproto[3]); +} + +// =================================================================== + +class BytesList::_Internal { + public: +}; + +BytesList::BytesList(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), + vals_(arena) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.BytesList) +} +BytesList::BytesList(const BytesList& from) + : ::PROTOBUF_NAMESPACE_ID::Message(), + vals_(from.vals_) { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + // @@protoc_insertion_point(copy_constructor:flwr.proto.BytesList) +} + +void BytesList::SharedCtor() { +} + +BytesList::~BytesList() { + // @@protoc_insertion_point(destructor:flwr.proto.BytesList) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void BytesList::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); +} + +void BytesList::ArenaDtor(void* object) { + BytesList* _this = reinterpret_cast< BytesList* >(object); + (void)_this; +} +void BytesList::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +} +void BytesList::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void BytesList::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.BytesList) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + vals_.Clear(); + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* BytesList::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // repeated bytes vals = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { + ptr -= 1; + do { + ptr += 1; + auto str = _internal_add_vals(); + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); + CHK_(ptr); + if (!ctx->DataAvailable(ptr)) break; + } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr)); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* BytesList::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.BytesList) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // repeated bytes vals = 1; + for (int i = 0, n = this->_internal_vals_size(); i < n; i++) { + const auto& s = this->_internal_vals(i); + target = stream->WriteBytes(1, s, target); + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.BytesList) + return target; +} + +size_t BytesList::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.BytesList) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // repeated bytes vals = 1; + total_size += 1 * + ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(vals_.size()); + for (int i = 0, n = vals_.size(); i < n; i++) { + total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::BytesSize( + vals_.Get(i)); + } + + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData BytesList::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + BytesList::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*BytesList::GetClassData() const { return &_class_data_; } + +void BytesList::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void BytesList::MergeFrom(const BytesList& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.BytesList) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + vals_.MergeFrom(from.vals_); + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void BytesList::CopyFrom(const BytesList& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.BytesList) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool BytesList::IsInitialized() const { + return true; +} + +void BytesList::InternalSwap(BytesList* other) { + using std::swap; + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + vals_.InternalSwap(&other->vals_); +} + +::PROTOBUF_NAMESPACE_ID::Metadata BytesList::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2frecordset_2eproto_getter, &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, + file_level_metadata_flwr_2fproto_2frecordset_2eproto[4]); +} + +// =================================================================== + +class Array::_Internal { + public: +}; + +Array::Array(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), + shape_(arena) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.Array) +} +Array::Array(const Array& from) + : ::PROTOBUF_NAMESPACE_ID::Message(), + shape_(from.shape_) { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + dtype_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + if (!from._internal_dtype().empty()) { + dtype_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, from._internal_dtype(), + GetArenaForAllocation()); + } + stype_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + if (!from._internal_stype().empty()) { + stype_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, from._internal_stype(), + GetArenaForAllocation()); + } + data_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + if (!from._internal_data().empty()) { + data_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, from._internal_data(), + GetArenaForAllocation()); + } + // @@protoc_insertion_point(copy_constructor:flwr.proto.Array) +} + +void Array::SharedCtor() { +dtype_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +stype_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +data_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} + +Array::~Array() { + // @@protoc_insertion_point(destructor:flwr.proto.Array) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void Array::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); + dtype_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + stype_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + data_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} + +void Array::ArenaDtor(void* object) { + Array* _this = reinterpret_cast< Array* >(object); + (void)_this; +} +void Array::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +} +void Array::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void Array::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.Array) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + shape_.Clear(); + dtype_.ClearToEmpty(); + stype_.ClearToEmpty(); + data_.ClearToEmpty(); + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* Array::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // string dtype = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { + auto str = _internal_mutable_dtype(); + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); + CHK_(::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "flwr.proto.Array.dtype")); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // repeated int32 shape = 2; + case 2: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) { + ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedInt32Parser(_internal_mutable_shape(), ptr, ctx); + CHK_(ptr); + } else if (static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 16) { + _internal_add_shape(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr)); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // string stype = 3; + case 3: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 26)) { + auto str = _internal_mutable_stype(); + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); + CHK_(::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "flwr.proto.Array.stype")); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // bytes data = 4; + case 4: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 34)) { + auto str = _internal_mutable_data(); + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); + CHK_(ptr); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* Array::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.Array) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // string dtype = 1; + if (!this->_internal_dtype().empty()) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->_internal_dtype().data(), static_cast(this->_internal_dtype().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "flwr.proto.Array.dtype"); + target = stream->WriteStringMaybeAliased( + 1, this->_internal_dtype(), target); + } + + // repeated int32 shape = 2; + { + int byte_size = _shape_cached_byte_size_.load(std::memory_order_relaxed); + if (byte_size > 0) { + target = stream->WriteInt32Packed( + 2, _internal_shape(), byte_size, target); + } + } + + // string stype = 3; + if (!this->_internal_stype().empty()) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->_internal_stype().data(), static_cast(this->_internal_stype().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "flwr.proto.Array.stype"); + target = stream->WriteStringMaybeAliased( + 3, this->_internal_stype(), target); + } + + // bytes data = 4; + if (!this->_internal_data().empty()) { + target = stream->WriteBytesMaybeAliased( + 4, this->_internal_data(), target); + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.Array) + return target; +} + +size_t Array::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.Array) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // repeated int32 shape = 2; + { + size_t data_size = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + Int32Size(this->shape_); + if (data_size > 0) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size( + static_cast<::PROTOBUF_NAMESPACE_ID::int32>(data_size)); + } + int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(data_size); + _shape_cached_byte_size_.store(cached_size, + std::memory_order_relaxed); + total_size += data_size; + } + + // string dtype = 1; + if (!this->_internal_dtype().empty()) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( + this->_internal_dtype()); + } + + // string stype = 3; + if (!this->_internal_stype().empty()) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( + this->_internal_stype()); + } + + // bytes data = 4; + if (!this->_internal_data().empty()) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::BytesSize( + this->_internal_data()); + } + + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData Array::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + Array::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*Array::GetClassData() const { return &_class_data_; } + +void Array::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void Array::MergeFrom(const Array& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.Array) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + shape_.MergeFrom(from.shape_); + if (!from._internal_dtype().empty()) { + _internal_set_dtype(from._internal_dtype()); + } + if (!from._internal_stype().empty()) { + _internal_set_stype(from._internal_stype()); + } + if (!from._internal_data().empty()) { + _internal_set_data(from._internal_data()); + } + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void Array::CopyFrom(const Array& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.Array) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool Array::IsInitialized() const { + return true; +} + +void Array::InternalSwap(Array* other) { + using std::swap; + auto* lhs_arena = GetArenaForAllocation(); + auto* rhs_arena = other->GetArenaForAllocation(); + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + shape_.InternalSwap(&other->shape_); + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap( + &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), + &dtype_, lhs_arena, + &other->dtype_, rhs_arena + ); + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap( + &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), + &stype_, lhs_arena, + &other->stype_, rhs_arena + ); + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap( + &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), + &data_, lhs_arena, + &other->data_, rhs_arena + ); +} + +::PROTOBUF_NAMESPACE_ID::Metadata Array::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2frecordset_2eproto_getter, &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, + file_level_metadata_flwr_2fproto_2frecordset_2eproto[5]); +} + +// =================================================================== + +class MetricsRecordValue::_Internal { + public: + static const ::flwr::proto::DoubleList& double_list(const MetricsRecordValue* msg); + static const ::flwr::proto::Sint64List& sint64_list(const MetricsRecordValue* msg); +}; + +const ::flwr::proto::DoubleList& +MetricsRecordValue::_Internal::double_list(const MetricsRecordValue* msg) { + return *msg->value_.double_list_; +} +const ::flwr::proto::Sint64List& +MetricsRecordValue::_Internal::sint64_list(const MetricsRecordValue* msg) { + return *msg->value_.sint64_list_; +} +void MetricsRecordValue::set_allocated_double_list(::flwr::proto::DoubleList* double_list) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + clear_value(); + if (double_list) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::DoubleList>::GetOwningArena(double_list); + if (message_arena != submessage_arena) { + double_list = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, double_list, submessage_arena); + } + set_has_double_list(); + value_.double_list_ = double_list; + } + // @@protoc_insertion_point(field_set_allocated:flwr.proto.MetricsRecordValue.double_list) +} +void MetricsRecordValue::set_allocated_sint64_list(::flwr::proto::Sint64List* sint64_list) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + clear_value(); + if (sint64_list) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::Sint64List>::GetOwningArena(sint64_list); + if (message_arena != submessage_arena) { + sint64_list = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, sint64_list, submessage_arena); + } + set_has_sint64_list(); + value_.sint64_list_ = sint64_list; + } + // @@protoc_insertion_point(field_set_allocated:flwr.proto.MetricsRecordValue.sint64_list) +} +MetricsRecordValue::MetricsRecordValue(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.MetricsRecordValue) +} +MetricsRecordValue::MetricsRecordValue(const MetricsRecordValue& from) + : ::PROTOBUF_NAMESPACE_ID::Message() { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + clear_has_value(); + switch (from.value_case()) { + case kDouble: { + _internal_set_double_(from._internal_double_()); + break; + } + case kSint64: { + _internal_set_sint64(from._internal_sint64()); + break; + } + case kDoubleList: { + _internal_mutable_double_list()->::flwr::proto::DoubleList::MergeFrom(from._internal_double_list()); + break; + } + case kSint64List: { + _internal_mutable_sint64_list()->::flwr::proto::Sint64List::MergeFrom(from._internal_sint64_list()); + break; + } + case VALUE_NOT_SET: { + break; + } + } + // @@protoc_insertion_point(copy_constructor:flwr.proto.MetricsRecordValue) +} + +void MetricsRecordValue::SharedCtor() { +clear_has_value(); +} + +MetricsRecordValue::~MetricsRecordValue() { + // @@protoc_insertion_point(destructor:flwr.proto.MetricsRecordValue) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void MetricsRecordValue::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); + if (has_value()) { + clear_value(); + } +} + +void MetricsRecordValue::ArenaDtor(void* object) { + MetricsRecordValue* _this = reinterpret_cast< MetricsRecordValue* >(object); + (void)_this; +} +void MetricsRecordValue::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +} +void MetricsRecordValue::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void MetricsRecordValue::clear_value() { +// @@protoc_insertion_point(one_of_clear_start:flwr.proto.MetricsRecordValue) + switch (value_case()) { + case kDouble: { + // No need to clear + break; + } + case kSint64: { + // No need to clear + break; + } + case kDoubleList: { + if (GetArenaForAllocation() == nullptr) { + delete value_.double_list_; + } + break; + } + case kSint64List: { + if (GetArenaForAllocation() == nullptr) { + delete value_.sint64_list_; + } + break; + } + case VALUE_NOT_SET: { + break; + } + } + _oneof_case_[0] = VALUE_NOT_SET; +} + + +void MetricsRecordValue::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.MetricsRecordValue) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + clear_value(); + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* MetricsRecordValue::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // double double = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 9)) { + _internal_set_double_(::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad(ptr)); + ptr += sizeof(double); + } else + goto handle_unusual; + continue; + // sint64 sint64 = 2; + case 2: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 16)) { + _internal_set_sint64(::PROTOBUF_NAMESPACE_ID::internal::ReadVarintZigZag64(&ptr)); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // .flwr.proto.DoubleList double_list = 21; + case 21: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 170)) { + ptr = ctx->ParseMessage(_internal_mutable_double_list(), ptr); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // .flwr.proto.Sint64List sint64_list = 22; + case 22: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 178)) { + ptr = ctx->ParseMessage(_internal_mutable_sint64_list(), ptr); + CHK_(ptr); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* MetricsRecordValue::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.MetricsRecordValue) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // double double = 1; + if (_internal_has_double_()) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteDoubleToArray(1, this->_internal_double_(), target); + } + + // sint64 sint64 = 2; + if (_internal_has_sint64()) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteSInt64ToArray(2, this->_internal_sint64(), target); + } + + // .flwr.proto.DoubleList double_list = 21; + if (_internal_has_double_list()) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage( + 21, _Internal::double_list(this), target, stream); + } + + // .flwr.proto.Sint64List sint64_list = 22; + if (_internal_has_sint64_list()) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage( + 22, _Internal::sint64_list(this), target, stream); + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.MetricsRecordValue) + return target; +} + +size_t MetricsRecordValue::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.MetricsRecordValue) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + switch (value_case()) { + // double double = 1; + case kDouble: { + total_size += 1 + 8; + break; + } + // sint64 sint64 = 2; + case kSint64: { + total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SInt64SizePlusOne(this->_internal_sint64()); + break; + } + // .flwr.proto.DoubleList double_list = 21; + case kDoubleList: { + total_size += 2 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( + *value_.double_list_); + break; + } + // .flwr.proto.Sint64List sint64_list = 22; + case kSint64List: { + total_size += 2 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( + *value_.sint64_list_); + break; + } + case VALUE_NOT_SET: { + break; + } + } + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData MetricsRecordValue::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + MetricsRecordValue::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*MetricsRecordValue::GetClassData() const { return &_class_data_; } + +void MetricsRecordValue::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void MetricsRecordValue::MergeFrom(const MetricsRecordValue& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.MetricsRecordValue) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + switch (from.value_case()) { + case kDouble: { + _internal_set_double_(from._internal_double_()); + break; + } + case kSint64: { + _internal_set_sint64(from._internal_sint64()); + break; + } + case kDoubleList: { + _internal_mutable_double_list()->::flwr::proto::DoubleList::MergeFrom(from._internal_double_list()); + break; + } + case kSint64List: { + _internal_mutable_sint64_list()->::flwr::proto::Sint64List::MergeFrom(from._internal_sint64_list()); + break; + } + case VALUE_NOT_SET: { + break; + } + } + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void MetricsRecordValue::CopyFrom(const MetricsRecordValue& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.MetricsRecordValue) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool MetricsRecordValue::IsInitialized() const { + return true; +} + +void MetricsRecordValue::InternalSwap(MetricsRecordValue* other) { + using std::swap; + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + swap(value_, other->value_); + swap(_oneof_case_[0], other->_oneof_case_[0]); +} + +::PROTOBUF_NAMESPACE_ID::Metadata MetricsRecordValue::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2frecordset_2eproto_getter, &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, + file_level_metadata_flwr_2fproto_2frecordset_2eproto[6]); +} + +// =================================================================== + +class ConfigsRecordValue::_Internal { + public: + static const ::flwr::proto::DoubleList& double_list(const ConfigsRecordValue* msg); + static const ::flwr::proto::Sint64List& sint64_list(const ConfigsRecordValue* msg); + static const ::flwr::proto::BoolList& bool_list(const ConfigsRecordValue* msg); + static const ::flwr::proto::StringList& string_list(const ConfigsRecordValue* msg); + static const ::flwr::proto::BytesList& bytes_list(const ConfigsRecordValue* msg); +}; + +const ::flwr::proto::DoubleList& +ConfigsRecordValue::_Internal::double_list(const ConfigsRecordValue* msg) { + return *msg->value_.double_list_; +} +const ::flwr::proto::Sint64List& +ConfigsRecordValue::_Internal::sint64_list(const ConfigsRecordValue* msg) { + return *msg->value_.sint64_list_; +} +const ::flwr::proto::BoolList& +ConfigsRecordValue::_Internal::bool_list(const ConfigsRecordValue* msg) { + return *msg->value_.bool_list_; +} +const ::flwr::proto::StringList& +ConfigsRecordValue::_Internal::string_list(const ConfigsRecordValue* msg) { + return *msg->value_.string_list_; +} +const ::flwr::proto::BytesList& +ConfigsRecordValue::_Internal::bytes_list(const ConfigsRecordValue* msg) { + return *msg->value_.bytes_list_; +} +void ConfigsRecordValue::set_allocated_double_list(::flwr::proto::DoubleList* double_list) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + clear_value(); + if (double_list) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::DoubleList>::GetOwningArena(double_list); + if (message_arena != submessage_arena) { + double_list = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, double_list, submessage_arena); + } + set_has_double_list(); + value_.double_list_ = double_list; + } + // @@protoc_insertion_point(field_set_allocated:flwr.proto.ConfigsRecordValue.double_list) +} +void ConfigsRecordValue::set_allocated_sint64_list(::flwr::proto::Sint64List* sint64_list) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + clear_value(); + if (sint64_list) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::Sint64List>::GetOwningArena(sint64_list); + if (message_arena != submessage_arena) { + sint64_list = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, sint64_list, submessage_arena); + } + set_has_sint64_list(); + value_.sint64_list_ = sint64_list; + } + // @@protoc_insertion_point(field_set_allocated:flwr.proto.ConfigsRecordValue.sint64_list) +} +void ConfigsRecordValue::set_allocated_bool_list(::flwr::proto::BoolList* bool_list) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + clear_value(); + if (bool_list) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::BoolList>::GetOwningArena(bool_list); + if (message_arena != submessage_arena) { + bool_list = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, bool_list, submessage_arena); + } + set_has_bool_list(); + value_.bool_list_ = bool_list; + } + // @@protoc_insertion_point(field_set_allocated:flwr.proto.ConfigsRecordValue.bool_list) +} +void ConfigsRecordValue::set_allocated_string_list(::flwr::proto::StringList* string_list) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + clear_value(); + if (string_list) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::StringList>::GetOwningArena(string_list); + if (message_arena != submessage_arena) { + string_list = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, string_list, submessage_arena); + } + set_has_string_list(); + value_.string_list_ = string_list; + } + // @@protoc_insertion_point(field_set_allocated:flwr.proto.ConfigsRecordValue.string_list) +} +void ConfigsRecordValue::set_allocated_bytes_list(::flwr::proto::BytesList* bytes_list) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + clear_value(); + if (bytes_list) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::BytesList>::GetOwningArena(bytes_list); + if (message_arena != submessage_arena) { + bytes_list = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, bytes_list, submessage_arena); + } + set_has_bytes_list(); + value_.bytes_list_ = bytes_list; + } + // @@protoc_insertion_point(field_set_allocated:flwr.proto.ConfigsRecordValue.bytes_list) +} +ConfigsRecordValue::ConfigsRecordValue(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.ConfigsRecordValue) +} +ConfigsRecordValue::ConfigsRecordValue(const ConfigsRecordValue& from) + : ::PROTOBUF_NAMESPACE_ID::Message() { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + clear_has_value(); + switch (from.value_case()) { + case kDouble: { + _internal_set_double_(from._internal_double_()); + break; + } + case kSint64: { + _internal_set_sint64(from._internal_sint64()); + break; + } + case kBool: { + _internal_set_bool_(from._internal_bool_()); + break; + } + case kString: { + _internal_set_string(from._internal_string()); + break; + } + case kBytes: { + _internal_set_bytes(from._internal_bytes()); + break; + } + case kDoubleList: { + _internal_mutable_double_list()->::flwr::proto::DoubleList::MergeFrom(from._internal_double_list()); + break; + } + case kSint64List: { + _internal_mutable_sint64_list()->::flwr::proto::Sint64List::MergeFrom(from._internal_sint64_list()); + break; + } + case kBoolList: { + _internal_mutable_bool_list()->::flwr::proto::BoolList::MergeFrom(from._internal_bool_list()); + break; + } + case kStringList: { + _internal_mutable_string_list()->::flwr::proto::StringList::MergeFrom(from._internal_string_list()); + break; + } + case kBytesList: { + _internal_mutable_bytes_list()->::flwr::proto::BytesList::MergeFrom(from._internal_bytes_list()); + break; + } + case VALUE_NOT_SET: { + break; + } + } + // @@protoc_insertion_point(copy_constructor:flwr.proto.ConfigsRecordValue) +} + +void ConfigsRecordValue::SharedCtor() { +clear_has_value(); +} + +ConfigsRecordValue::~ConfigsRecordValue() { + // @@protoc_insertion_point(destructor:flwr.proto.ConfigsRecordValue) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void ConfigsRecordValue::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); + if (has_value()) { + clear_value(); + } +} + +void ConfigsRecordValue::ArenaDtor(void* object) { + ConfigsRecordValue* _this = reinterpret_cast< ConfigsRecordValue* >(object); + (void)_this; +} +void ConfigsRecordValue::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +} +void ConfigsRecordValue::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void ConfigsRecordValue::clear_value() { +// @@protoc_insertion_point(one_of_clear_start:flwr.proto.ConfigsRecordValue) + switch (value_case()) { + case kDouble: { + // No need to clear + break; + } + case kSint64: { + // No need to clear + break; + } + case kBool: { + // No need to clear + break; + } + case kString: { + value_.string_.Destroy(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); + break; + } + case kBytes: { + value_.bytes_.Destroy(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); + break; + } + case kDoubleList: { + if (GetArenaForAllocation() == nullptr) { + delete value_.double_list_; + } + break; + } + case kSint64List: { + if (GetArenaForAllocation() == nullptr) { + delete value_.sint64_list_; + } + break; + } + case kBoolList: { + if (GetArenaForAllocation() == nullptr) { + delete value_.bool_list_; + } + break; + } + case kStringList: { + if (GetArenaForAllocation() == nullptr) { + delete value_.string_list_; + } + break; + } + case kBytesList: { + if (GetArenaForAllocation() == nullptr) { + delete value_.bytes_list_; + } + break; + } + case VALUE_NOT_SET: { + break; + } + } + _oneof_case_[0] = VALUE_NOT_SET; +} + + +void ConfigsRecordValue::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.ConfigsRecordValue) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + clear_value(); + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* ConfigsRecordValue::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // double double = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 9)) { + _internal_set_double_(::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad(ptr)); + ptr += sizeof(double); + } else + goto handle_unusual; + continue; + // sint64 sint64 = 2; + case 2: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 16)) { + _internal_set_sint64(::PROTOBUF_NAMESPACE_ID::internal::ReadVarintZigZag64(&ptr)); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // bool bool = 3; + case 3: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 24)) { + _internal_set_bool_(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr)); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // string string = 4; + case 4: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 34)) { + auto str = _internal_mutable_string(); + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); + CHK_(::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "flwr.proto.ConfigsRecordValue.string")); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // bytes bytes = 5; + case 5: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 42)) { + auto str = _internal_mutable_bytes(); + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // .flwr.proto.DoubleList double_list = 21; + case 21: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 170)) { + ptr = ctx->ParseMessage(_internal_mutable_double_list(), ptr); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // .flwr.proto.Sint64List sint64_list = 22; + case 22: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 178)) { + ptr = ctx->ParseMessage(_internal_mutable_sint64_list(), ptr); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // .flwr.proto.BoolList bool_list = 23; + case 23: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 186)) { + ptr = ctx->ParseMessage(_internal_mutable_bool_list(), ptr); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // .flwr.proto.StringList string_list = 24; + case 24: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 194)) { + ptr = ctx->ParseMessage(_internal_mutable_string_list(), ptr); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // .flwr.proto.BytesList bytes_list = 25; + case 25: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 202)) { + ptr = ctx->ParseMessage(_internal_mutable_bytes_list(), ptr); + CHK_(ptr); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* ConfigsRecordValue::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.ConfigsRecordValue) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // double double = 1; + if (_internal_has_double_()) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteDoubleToArray(1, this->_internal_double_(), target); + } + + // sint64 sint64 = 2; + if (_internal_has_sint64()) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteSInt64ToArray(2, this->_internal_sint64(), target); + } + + // bool bool = 3; + if (_internal_has_bool_()) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(3, this->_internal_bool_(), target); + } + + // string string = 4; + if (_internal_has_string()) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->_internal_string().data(), static_cast(this->_internal_string().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "flwr.proto.ConfigsRecordValue.string"); + target = stream->WriteStringMaybeAliased( + 4, this->_internal_string(), target); + } + + // bytes bytes = 5; + if (_internal_has_bytes()) { + target = stream->WriteBytesMaybeAliased( + 5, this->_internal_bytes(), target); + } + + // .flwr.proto.DoubleList double_list = 21; + if (_internal_has_double_list()) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage( + 21, _Internal::double_list(this), target, stream); + } + + // .flwr.proto.Sint64List sint64_list = 22; + if (_internal_has_sint64_list()) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage( + 22, _Internal::sint64_list(this), target, stream); + } + + // .flwr.proto.BoolList bool_list = 23; + if (_internal_has_bool_list()) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage( + 23, _Internal::bool_list(this), target, stream); + } + + // .flwr.proto.StringList string_list = 24; + if (_internal_has_string_list()) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage( + 24, _Internal::string_list(this), target, stream); + } + + // .flwr.proto.BytesList bytes_list = 25; + if (_internal_has_bytes_list()) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage( + 25, _Internal::bytes_list(this), target, stream); + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.ConfigsRecordValue) + return target; +} + +size_t ConfigsRecordValue::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.ConfigsRecordValue) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + switch (value_case()) { + // double double = 1; + case kDouble: { + total_size += 1 + 8; + break; + } + // sint64 sint64 = 2; + case kSint64: { + total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SInt64SizePlusOne(this->_internal_sint64()); + break; + } + // bool bool = 3; + case kBool: { + total_size += 1 + 1; + break; + } + // string string = 4; + case kString: { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( + this->_internal_string()); + break; + } + // bytes bytes = 5; + case kBytes: { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::BytesSize( + this->_internal_bytes()); + break; + } + // .flwr.proto.DoubleList double_list = 21; + case kDoubleList: { + total_size += 2 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( + *value_.double_list_); + break; + } + // .flwr.proto.Sint64List sint64_list = 22; + case kSint64List: { + total_size += 2 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( + *value_.sint64_list_); + break; + } + // .flwr.proto.BoolList bool_list = 23; + case kBoolList: { + total_size += 2 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( + *value_.bool_list_); + break; + } + // .flwr.proto.StringList string_list = 24; + case kStringList: { + total_size += 2 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( + *value_.string_list_); + break; + } + // .flwr.proto.BytesList bytes_list = 25; + case kBytesList: { + total_size += 2 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( + *value_.bytes_list_); + break; + } + case VALUE_NOT_SET: { + break; + } + } + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData ConfigsRecordValue::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + ConfigsRecordValue::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*ConfigsRecordValue::GetClassData() const { return &_class_data_; } + +void ConfigsRecordValue::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void ConfigsRecordValue::MergeFrom(const ConfigsRecordValue& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.ConfigsRecordValue) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + switch (from.value_case()) { + case kDouble: { + _internal_set_double_(from._internal_double_()); + break; + } + case kSint64: { + _internal_set_sint64(from._internal_sint64()); + break; + } + case kBool: { + _internal_set_bool_(from._internal_bool_()); + break; + } + case kString: { + _internal_set_string(from._internal_string()); + break; + } + case kBytes: { + _internal_set_bytes(from._internal_bytes()); + break; + } + case kDoubleList: { + _internal_mutable_double_list()->::flwr::proto::DoubleList::MergeFrom(from._internal_double_list()); + break; + } + case kSint64List: { + _internal_mutable_sint64_list()->::flwr::proto::Sint64List::MergeFrom(from._internal_sint64_list()); + break; + } + case kBoolList: { + _internal_mutable_bool_list()->::flwr::proto::BoolList::MergeFrom(from._internal_bool_list()); + break; + } + case kStringList: { + _internal_mutable_string_list()->::flwr::proto::StringList::MergeFrom(from._internal_string_list()); + break; + } + case kBytesList: { + _internal_mutable_bytes_list()->::flwr::proto::BytesList::MergeFrom(from._internal_bytes_list()); + break; + } + case VALUE_NOT_SET: { + break; + } + } + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void ConfigsRecordValue::CopyFrom(const ConfigsRecordValue& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.ConfigsRecordValue) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool ConfigsRecordValue::IsInitialized() const { + return true; +} + +void ConfigsRecordValue::InternalSwap(ConfigsRecordValue* other) { + using std::swap; + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + swap(value_, other->value_); + swap(_oneof_case_[0], other->_oneof_case_[0]); +} + +::PROTOBUF_NAMESPACE_ID::Metadata ConfigsRecordValue::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2frecordset_2eproto_getter, &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, + file_level_metadata_flwr_2fproto_2frecordset_2eproto[7]); +} + +// =================================================================== + +class ParametersRecord::_Internal { + public: +}; + +ParametersRecord::ParametersRecord(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), + data_keys_(arena), + data_values_(arena) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.ParametersRecord) +} +ParametersRecord::ParametersRecord(const ParametersRecord& from) + : ::PROTOBUF_NAMESPACE_ID::Message(), + data_keys_(from.data_keys_), + data_values_(from.data_values_) { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + // @@protoc_insertion_point(copy_constructor:flwr.proto.ParametersRecord) +} + +void ParametersRecord::SharedCtor() { +} + +ParametersRecord::~ParametersRecord() { + // @@protoc_insertion_point(destructor:flwr.proto.ParametersRecord) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void ParametersRecord::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); +} + +void ParametersRecord::ArenaDtor(void* object) { + ParametersRecord* _this = reinterpret_cast< ParametersRecord* >(object); + (void)_this; +} +void ParametersRecord::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +} +void ParametersRecord::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void ParametersRecord::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.ParametersRecord) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + data_keys_.Clear(); + data_values_.Clear(); + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* ParametersRecord::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // repeated string data_keys = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { + ptr -= 1; + do { + ptr += 1; + auto str = _internal_add_data_keys(); + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); + CHK_(::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "flwr.proto.ParametersRecord.data_keys")); + CHK_(ptr); + if (!ctx->DataAvailable(ptr)) break; + } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr)); + } else + goto handle_unusual; + continue; + // repeated .flwr.proto.Array data_values = 2; + case 2: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) { + ptr -= 1; + do { + ptr += 1; + ptr = ctx->ParseMessage(_internal_add_data_values(), ptr); + CHK_(ptr); + if (!ctx->DataAvailable(ptr)) break; + } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<18>(ptr)); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* ParametersRecord::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.ParametersRecord) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // repeated string data_keys = 1; + for (int i = 0, n = this->_internal_data_keys_size(); i < n; i++) { + const auto& s = this->_internal_data_keys(i); + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + s.data(), static_cast(s.length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "flwr.proto.ParametersRecord.data_keys"); + target = stream->WriteString(1, s, target); + } + + // repeated .flwr.proto.Array data_values = 2; + for (unsigned int i = 0, + n = static_cast(this->_internal_data_values_size()); i < n; i++) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage(2, this->_internal_data_values(i), target, stream); + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.ParametersRecord) + return target; +} + +size_t ParametersRecord::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.ParametersRecord) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // repeated string data_keys = 1; + total_size += 1 * + ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(data_keys_.size()); + for (int i = 0, n = data_keys_.size(); i < n; i++) { + total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( + data_keys_.Get(i)); + } + + // repeated .flwr.proto.Array data_values = 2; + total_size += 1UL * this->_internal_data_values_size(); + for (const auto& msg : this->data_values_) { + total_size += + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg); + } + + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData ParametersRecord::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + ParametersRecord::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*ParametersRecord::GetClassData() const { return &_class_data_; } + +void ParametersRecord::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void ParametersRecord::MergeFrom(const ParametersRecord& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.ParametersRecord) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + data_keys_.MergeFrom(from.data_keys_); + data_values_.MergeFrom(from.data_values_); + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void ParametersRecord::CopyFrom(const ParametersRecord& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.ParametersRecord) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool ParametersRecord::IsInitialized() const { + return true; +} + +void ParametersRecord::InternalSwap(ParametersRecord* other) { + using std::swap; + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + data_keys_.InternalSwap(&other->data_keys_); + data_values_.InternalSwap(&other->data_values_); +} + +::PROTOBUF_NAMESPACE_ID::Metadata ParametersRecord::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2frecordset_2eproto_getter, &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, + file_level_metadata_flwr_2fproto_2frecordset_2eproto[8]); +} + +// =================================================================== + +MetricsRecord_DataEntry_DoNotUse::MetricsRecord_DataEntry_DoNotUse() {} +MetricsRecord_DataEntry_DoNotUse::MetricsRecord_DataEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena) + : SuperType(arena) {} +void MetricsRecord_DataEntry_DoNotUse::MergeFrom(const MetricsRecord_DataEntry_DoNotUse& other) { + MergeFromInternal(other); +} +::PROTOBUF_NAMESPACE_ID::Metadata MetricsRecord_DataEntry_DoNotUse::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2frecordset_2eproto_getter, &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, + file_level_metadata_flwr_2fproto_2frecordset_2eproto[9]); +} + +// =================================================================== + +class MetricsRecord::_Internal { + public: +}; + +MetricsRecord::MetricsRecord(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), + data_(arena) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.MetricsRecord) +} +MetricsRecord::MetricsRecord(const MetricsRecord& from) + : ::PROTOBUF_NAMESPACE_ID::Message() { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + data_.MergeFrom(from.data_); + // @@protoc_insertion_point(copy_constructor:flwr.proto.MetricsRecord) +} + +void MetricsRecord::SharedCtor() { +} + +MetricsRecord::~MetricsRecord() { + // @@protoc_insertion_point(destructor:flwr.proto.MetricsRecord) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void MetricsRecord::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); +} + +void MetricsRecord::ArenaDtor(void* object) { + MetricsRecord* _this = reinterpret_cast< MetricsRecord* >(object); + (void)_this; + _this->data_. ~MapField(); +} +inline void MetricsRecord::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena) { + if (arena != nullptr) { + arena->OwnCustomDestructor(this, &MetricsRecord::ArenaDtor); + } +} +void MetricsRecord::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void MetricsRecord::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.MetricsRecord) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + data_.Clear(); + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* MetricsRecord::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // map data = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { + ptr -= 1; + do { + ptr += 1; + ptr = ctx->ParseMessage(&data_, ptr); + CHK_(ptr); + if (!ctx->DataAvailable(ptr)) break; + } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr)); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* MetricsRecord::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.MetricsRecord) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // map data = 1; + if (!this->_internal_data().empty()) { + typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecordValue >::const_pointer + ConstPtr; + typedef ConstPtr SortItem; + typedef ::PROTOBUF_NAMESPACE_ID::internal::CompareByDerefFirst Less; + struct Utf8Check { + static void Check(ConstPtr p) { + (void)p; + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + p->first.data(), static_cast(p->first.length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "flwr.proto.MetricsRecord.DataEntry.key"); + } + }; + + if (stream->IsSerializationDeterministic() && + this->_internal_data().size() > 1) { + ::std::unique_ptr items( + new SortItem[this->_internal_data().size()]); + typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecordValue >::size_type size_type; + size_type n = 0; + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecordValue >::const_iterator + it = this->_internal_data().begin(); + it != this->_internal_data().end(); ++it, ++n) { + items[static_cast(n)] = SortItem(&*it); + } + ::std::sort(&items[0], &items[static_cast(n)], Less()); + for (size_type i = 0; i < n; i++) { + target = MetricsRecord_DataEntry_DoNotUse::Funcs::InternalSerialize(1, items[static_cast(i)]->first, items[static_cast(i)]->second, target, stream); + Utf8Check::Check(&(*items[static_cast(i)])); + } + } else { + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecordValue >::const_iterator + it = this->_internal_data().begin(); + it != this->_internal_data().end(); ++it) { + target = MetricsRecord_DataEntry_DoNotUse::Funcs::InternalSerialize(1, it->first, it->second, target, stream); + Utf8Check::Check(&(*it)); + } + } + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.MetricsRecord) + return target; +} + +size_t MetricsRecord::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.MetricsRecord) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // map data = 1; + total_size += 1 * + ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->_internal_data_size()); + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecordValue >::const_iterator + it = this->_internal_data().begin(); + it != this->_internal_data().end(); ++it) { + total_size += MetricsRecord_DataEntry_DoNotUse::Funcs::ByteSizeLong(it->first, it->second); + } + + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData MetricsRecord::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + MetricsRecord::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*MetricsRecord::GetClassData() const { return &_class_data_; } + +void MetricsRecord::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void MetricsRecord::MergeFrom(const MetricsRecord& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.MetricsRecord) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + data_.MergeFrom(from.data_); + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void MetricsRecord::CopyFrom(const MetricsRecord& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.MetricsRecord) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool MetricsRecord::IsInitialized() const { + return true; +} + +void MetricsRecord::InternalSwap(MetricsRecord* other) { + using std::swap; + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + data_.InternalSwap(&other->data_); +} + +::PROTOBUF_NAMESPACE_ID::Metadata MetricsRecord::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2frecordset_2eproto_getter, &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, + file_level_metadata_flwr_2fproto_2frecordset_2eproto[10]); +} + +// =================================================================== + +ConfigsRecord_DataEntry_DoNotUse::ConfigsRecord_DataEntry_DoNotUse() {} +ConfigsRecord_DataEntry_DoNotUse::ConfigsRecord_DataEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena) + : SuperType(arena) {} +void ConfigsRecord_DataEntry_DoNotUse::MergeFrom(const ConfigsRecord_DataEntry_DoNotUse& other) { + MergeFromInternal(other); +} +::PROTOBUF_NAMESPACE_ID::Metadata ConfigsRecord_DataEntry_DoNotUse::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2frecordset_2eproto_getter, &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, + file_level_metadata_flwr_2fproto_2frecordset_2eproto[11]); +} + +// =================================================================== + +class ConfigsRecord::_Internal { + public: +}; + +ConfigsRecord::ConfigsRecord(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), + data_(arena) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.ConfigsRecord) +} +ConfigsRecord::ConfigsRecord(const ConfigsRecord& from) + : ::PROTOBUF_NAMESPACE_ID::Message() { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + data_.MergeFrom(from.data_); + // @@protoc_insertion_point(copy_constructor:flwr.proto.ConfigsRecord) +} + +void ConfigsRecord::SharedCtor() { +} + +ConfigsRecord::~ConfigsRecord() { + // @@protoc_insertion_point(destructor:flwr.proto.ConfigsRecord) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void ConfigsRecord::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); +} + +void ConfigsRecord::ArenaDtor(void* object) { + ConfigsRecord* _this = reinterpret_cast< ConfigsRecord* >(object); + (void)_this; + _this->data_. ~MapField(); +} +inline void ConfigsRecord::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena) { + if (arena != nullptr) { + arena->OwnCustomDestructor(this, &ConfigsRecord::ArenaDtor); + } +} +void ConfigsRecord::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void ConfigsRecord::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.ConfigsRecord) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + data_.Clear(); + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* ConfigsRecord::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // map data = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { + ptr -= 1; + do { + ptr += 1; + ptr = ctx->ParseMessage(&data_, ptr); + CHK_(ptr); + if (!ctx->DataAvailable(ptr)) break; + } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr)); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* ConfigsRecord::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.ConfigsRecord) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // map data = 1; + if (!this->_internal_data().empty()) { + typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecordValue >::const_pointer + ConstPtr; + typedef ConstPtr SortItem; + typedef ::PROTOBUF_NAMESPACE_ID::internal::CompareByDerefFirst Less; + struct Utf8Check { + static void Check(ConstPtr p) { + (void)p; + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + p->first.data(), static_cast(p->first.length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "flwr.proto.ConfigsRecord.DataEntry.key"); + } + }; + + if (stream->IsSerializationDeterministic() && + this->_internal_data().size() > 1) { + ::std::unique_ptr items( + new SortItem[this->_internal_data().size()]); + typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecordValue >::size_type size_type; + size_type n = 0; + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecordValue >::const_iterator + it = this->_internal_data().begin(); + it != this->_internal_data().end(); ++it, ++n) { + items[static_cast(n)] = SortItem(&*it); + } + ::std::sort(&items[0], &items[static_cast(n)], Less()); + for (size_type i = 0; i < n; i++) { + target = ConfigsRecord_DataEntry_DoNotUse::Funcs::InternalSerialize(1, items[static_cast(i)]->first, items[static_cast(i)]->second, target, stream); + Utf8Check::Check(&(*items[static_cast(i)])); + } + } else { + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecordValue >::const_iterator + it = this->_internal_data().begin(); + it != this->_internal_data().end(); ++it) { + target = ConfigsRecord_DataEntry_DoNotUse::Funcs::InternalSerialize(1, it->first, it->second, target, stream); + Utf8Check::Check(&(*it)); + } + } + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.ConfigsRecord) + return target; +} + +size_t ConfigsRecord::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.ConfigsRecord) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // map data = 1; + total_size += 1 * + ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->_internal_data_size()); + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecordValue >::const_iterator + it = this->_internal_data().begin(); + it != this->_internal_data().end(); ++it) { + total_size += ConfigsRecord_DataEntry_DoNotUse::Funcs::ByteSizeLong(it->first, it->second); + } + + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData ConfigsRecord::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + ConfigsRecord::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*ConfigsRecord::GetClassData() const { return &_class_data_; } + +void ConfigsRecord::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void ConfigsRecord::MergeFrom(const ConfigsRecord& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.ConfigsRecord) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + data_.MergeFrom(from.data_); + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void ConfigsRecord::CopyFrom(const ConfigsRecord& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.ConfigsRecord) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool ConfigsRecord::IsInitialized() const { + return true; +} + +void ConfigsRecord::InternalSwap(ConfigsRecord* other) { + using std::swap; + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + data_.InternalSwap(&other->data_); +} + +::PROTOBUF_NAMESPACE_ID::Metadata ConfigsRecord::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2frecordset_2eproto_getter, &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, + file_level_metadata_flwr_2fproto_2frecordset_2eproto[12]); +} + +// =================================================================== + +RecordSet_ParametersEntry_DoNotUse::RecordSet_ParametersEntry_DoNotUse() {} +RecordSet_ParametersEntry_DoNotUse::RecordSet_ParametersEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena) + : SuperType(arena) {} +void RecordSet_ParametersEntry_DoNotUse::MergeFrom(const RecordSet_ParametersEntry_DoNotUse& other) { + MergeFromInternal(other); +} +::PROTOBUF_NAMESPACE_ID::Metadata RecordSet_ParametersEntry_DoNotUse::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2frecordset_2eproto_getter, &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, + file_level_metadata_flwr_2fproto_2frecordset_2eproto[13]); +} + +// =================================================================== + +RecordSet_MetricsEntry_DoNotUse::RecordSet_MetricsEntry_DoNotUse() {} +RecordSet_MetricsEntry_DoNotUse::RecordSet_MetricsEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena) + : SuperType(arena) {} +void RecordSet_MetricsEntry_DoNotUse::MergeFrom(const RecordSet_MetricsEntry_DoNotUse& other) { + MergeFromInternal(other); +} +::PROTOBUF_NAMESPACE_ID::Metadata RecordSet_MetricsEntry_DoNotUse::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2frecordset_2eproto_getter, &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, + file_level_metadata_flwr_2fproto_2frecordset_2eproto[14]); +} + +// =================================================================== + +RecordSet_ConfigsEntry_DoNotUse::RecordSet_ConfigsEntry_DoNotUse() {} +RecordSet_ConfigsEntry_DoNotUse::RecordSet_ConfigsEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena) + : SuperType(arena) {} +void RecordSet_ConfigsEntry_DoNotUse::MergeFrom(const RecordSet_ConfigsEntry_DoNotUse& other) { + MergeFromInternal(other); +} +::PROTOBUF_NAMESPACE_ID::Metadata RecordSet_ConfigsEntry_DoNotUse::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2frecordset_2eproto_getter, &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, + file_level_metadata_flwr_2fproto_2frecordset_2eproto[15]); +} + +// =================================================================== + +class RecordSet::_Internal { + public: +}; + +RecordSet::RecordSet(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), + parameters_(arena), + metrics_(arena), + configs_(arena) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.RecordSet) +} +RecordSet::RecordSet(const RecordSet& from) + : ::PROTOBUF_NAMESPACE_ID::Message() { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + parameters_.MergeFrom(from.parameters_); + metrics_.MergeFrom(from.metrics_); + configs_.MergeFrom(from.configs_); + // @@protoc_insertion_point(copy_constructor:flwr.proto.RecordSet) +} + +void RecordSet::SharedCtor() { +} + +RecordSet::~RecordSet() { + // @@protoc_insertion_point(destructor:flwr.proto.RecordSet) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void RecordSet::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); +} + +void RecordSet::ArenaDtor(void* object) { + RecordSet* _this = reinterpret_cast< RecordSet* >(object); + (void)_this; + _this->parameters_. ~MapField(); + _this->metrics_. ~MapField(); + _this->configs_. ~MapField(); +} +inline void RecordSet::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena) { + if (arena != nullptr) { + arena->OwnCustomDestructor(this, &RecordSet::ArenaDtor); + } +} +void RecordSet::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void RecordSet::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.RecordSet) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + parameters_.Clear(); + metrics_.Clear(); + configs_.Clear(); + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* RecordSet::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // map parameters = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { + ptr -= 1; + do { + ptr += 1; + ptr = ctx->ParseMessage(¶meters_, ptr); + CHK_(ptr); + if (!ctx->DataAvailable(ptr)) break; + } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr)); + } else + goto handle_unusual; + continue; + // map metrics = 2; + case 2: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) { + ptr -= 1; + do { + ptr += 1; + ptr = ctx->ParseMessage(&metrics_, ptr); + CHK_(ptr); + if (!ctx->DataAvailable(ptr)) break; + } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<18>(ptr)); + } else + goto handle_unusual; + continue; + // map configs = 3; + case 3: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 26)) { + ptr -= 1; + do { + ptr += 1; + ptr = ctx->ParseMessage(&configs_, ptr); + CHK_(ptr); + if (!ctx->DataAvailable(ptr)) break; + } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<26>(ptr)); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* RecordSet::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.RecordSet) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // map parameters = 1; + if (!this->_internal_parameters().empty()) { + typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ParametersRecord >::const_pointer + ConstPtr; + typedef ConstPtr SortItem; + typedef ::PROTOBUF_NAMESPACE_ID::internal::CompareByDerefFirst Less; + struct Utf8Check { + static void Check(ConstPtr p) { + (void)p; + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + p->first.data(), static_cast(p->first.length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "flwr.proto.RecordSet.ParametersEntry.key"); + } + }; + + if (stream->IsSerializationDeterministic() && + this->_internal_parameters().size() > 1) { + ::std::unique_ptr items( + new SortItem[this->_internal_parameters().size()]); + typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ParametersRecord >::size_type size_type; + size_type n = 0; + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ParametersRecord >::const_iterator + it = this->_internal_parameters().begin(); + it != this->_internal_parameters().end(); ++it, ++n) { + items[static_cast(n)] = SortItem(&*it); + } + ::std::sort(&items[0], &items[static_cast(n)], Less()); + for (size_type i = 0; i < n; i++) { + target = RecordSet_ParametersEntry_DoNotUse::Funcs::InternalSerialize(1, items[static_cast(i)]->first, items[static_cast(i)]->second, target, stream); + Utf8Check::Check(&(*items[static_cast(i)])); + } + } else { + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ParametersRecord >::const_iterator + it = this->_internal_parameters().begin(); + it != this->_internal_parameters().end(); ++it) { + target = RecordSet_ParametersEntry_DoNotUse::Funcs::InternalSerialize(1, it->first, it->second, target, stream); + Utf8Check::Check(&(*it)); + } + } + } + + // map metrics = 2; + if (!this->_internal_metrics().empty()) { + typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecord >::const_pointer + ConstPtr; + typedef ConstPtr SortItem; + typedef ::PROTOBUF_NAMESPACE_ID::internal::CompareByDerefFirst Less; + struct Utf8Check { + static void Check(ConstPtr p) { + (void)p; + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + p->first.data(), static_cast(p->first.length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "flwr.proto.RecordSet.MetricsEntry.key"); + } + }; + + if (stream->IsSerializationDeterministic() && + this->_internal_metrics().size() > 1) { + ::std::unique_ptr items( + new SortItem[this->_internal_metrics().size()]); + typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecord >::size_type size_type; + size_type n = 0; + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecord >::const_iterator + it = this->_internal_metrics().begin(); + it != this->_internal_metrics().end(); ++it, ++n) { + items[static_cast(n)] = SortItem(&*it); + } + ::std::sort(&items[0], &items[static_cast(n)], Less()); + for (size_type i = 0; i < n; i++) { + target = RecordSet_MetricsEntry_DoNotUse::Funcs::InternalSerialize(2, items[static_cast(i)]->first, items[static_cast(i)]->second, target, stream); + Utf8Check::Check(&(*items[static_cast(i)])); + } + } else { + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecord >::const_iterator + it = this->_internal_metrics().begin(); + it != this->_internal_metrics().end(); ++it) { + target = RecordSet_MetricsEntry_DoNotUse::Funcs::InternalSerialize(2, it->first, it->second, target, stream); + Utf8Check::Check(&(*it)); + } + } + } + + // map configs = 3; + if (!this->_internal_configs().empty()) { + typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecord >::const_pointer + ConstPtr; + typedef ConstPtr SortItem; + typedef ::PROTOBUF_NAMESPACE_ID::internal::CompareByDerefFirst Less; + struct Utf8Check { + static void Check(ConstPtr p) { + (void)p; + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + p->first.data(), static_cast(p->first.length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "flwr.proto.RecordSet.ConfigsEntry.key"); + } + }; + + if (stream->IsSerializationDeterministic() && + this->_internal_configs().size() > 1) { + ::std::unique_ptr items( + new SortItem[this->_internal_configs().size()]); + typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecord >::size_type size_type; + size_type n = 0; + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecord >::const_iterator + it = this->_internal_configs().begin(); + it != this->_internal_configs().end(); ++it, ++n) { + items[static_cast(n)] = SortItem(&*it); + } + ::std::sort(&items[0], &items[static_cast(n)], Less()); + for (size_type i = 0; i < n; i++) { + target = RecordSet_ConfigsEntry_DoNotUse::Funcs::InternalSerialize(3, items[static_cast(i)]->first, items[static_cast(i)]->second, target, stream); + Utf8Check::Check(&(*items[static_cast(i)])); + } + } else { + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecord >::const_iterator + it = this->_internal_configs().begin(); + it != this->_internal_configs().end(); ++it) { + target = RecordSet_ConfigsEntry_DoNotUse::Funcs::InternalSerialize(3, it->first, it->second, target, stream); + Utf8Check::Check(&(*it)); + } + } + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.RecordSet) + return target; +} + +size_t RecordSet::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.RecordSet) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // map parameters = 1; + total_size += 1 * + ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->_internal_parameters_size()); + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ParametersRecord >::const_iterator + it = this->_internal_parameters().begin(); + it != this->_internal_parameters().end(); ++it) { + total_size += RecordSet_ParametersEntry_DoNotUse::Funcs::ByteSizeLong(it->first, it->second); + } + + // map metrics = 2; + total_size += 1 * + ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->_internal_metrics_size()); + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecord >::const_iterator + it = this->_internal_metrics().begin(); + it != this->_internal_metrics().end(); ++it) { + total_size += RecordSet_MetricsEntry_DoNotUse::Funcs::ByteSizeLong(it->first, it->second); + } + + // map configs = 3; + total_size += 1 * + ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->_internal_configs_size()); + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecord >::const_iterator + it = this->_internal_configs().begin(); + it != this->_internal_configs().end(); ++it) { + total_size += RecordSet_ConfigsEntry_DoNotUse::Funcs::ByteSizeLong(it->first, it->second); + } + + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData RecordSet::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + RecordSet::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*RecordSet::GetClassData() const { return &_class_data_; } + +void RecordSet::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void RecordSet::MergeFrom(const RecordSet& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.RecordSet) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + parameters_.MergeFrom(from.parameters_); + metrics_.MergeFrom(from.metrics_); + configs_.MergeFrom(from.configs_); + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void RecordSet::CopyFrom(const RecordSet& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.RecordSet) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool RecordSet::IsInitialized() const { + return true; +} + +void RecordSet::InternalSwap(RecordSet* other) { + using std::swap; + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + parameters_.InternalSwap(&other->parameters_); + metrics_.InternalSwap(&other->metrics_); + configs_.InternalSwap(&other->configs_); +} + +::PROTOBUF_NAMESPACE_ID::Metadata RecordSet::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2frecordset_2eproto_getter, &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, + file_level_metadata_flwr_2fproto_2frecordset_2eproto[16]); +} + +// @@protoc_insertion_point(namespace_scope) +} // namespace proto +} // namespace flwr +PROTOBUF_NAMESPACE_OPEN +template<> PROTOBUF_NOINLINE ::flwr::proto::DoubleList* Arena::CreateMaybeMessage< ::flwr::proto::DoubleList >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::DoubleList >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::Sint64List* Arena::CreateMaybeMessage< ::flwr::proto::Sint64List >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::Sint64List >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::BoolList* Arena::CreateMaybeMessage< ::flwr::proto::BoolList >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::BoolList >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::StringList* Arena::CreateMaybeMessage< ::flwr::proto::StringList >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::StringList >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::BytesList* Arena::CreateMaybeMessage< ::flwr::proto::BytesList >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::BytesList >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::Array* Arena::CreateMaybeMessage< ::flwr::proto::Array >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::Array >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::MetricsRecordValue* Arena::CreateMaybeMessage< ::flwr::proto::MetricsRecordValue >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::MetricsRecordValue >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::ConfigsRecordValue* Arena::CreateMaybeMessage< ::flwr::proto::ConfigsRecordValue >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::ConfigsRecordValue >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::ParametersRecord* Arena::CreateMaybeMessage< ::flwr::proto::ParametersRecord >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::ParametersRecord >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::MetricsRecord_DataEntry_DoNotUse* Arena::CreateMaybeMessage< ::flwr::proto::MetricsRecord_DataEntry_DoNotUse >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::MetricsRecord_DataEntry_DoNotUse >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::MetricsRecord* Arena::CreateMaybeMessage< ::flwr::proto::MetricsRecord >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::MetricsRecord >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::ConfigsRecord_DataEntry_DoNotUse* Arena::CreateMaybeMessage< ::flwr::proto::ConfigsRecord_DataEntry_DoNotUse >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::ConfigsRecord_DataEntry_DoNotUse >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::ConfigsRecord* Arena::CreateMaybeMessage< ::flwr::proto::ConfigsRecord >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::ConfigsRecord >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::RecordSet_ParametersEntry_DoNotUse* Arena::CreateMaybeMessage< ::flwr::proto::RecordSet_ParametersEntry_DoNotUse >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::RecordSet_ParametersEntry_DoNotUse >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::RecordSet_MetricsEntry_DoNotUse* Arena::CreateMaybeMessage< ::flwr::proto::RecordSet_MetricsEntry_DoNotUse >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::RecordSet_MetricsEntry_DoNotUse >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::RecordSet_ConfigsEntry_DoNotUse* Arena::CreateMaybeMessage< ::flwr::proto::RecordSet_ConfigsEntry_DoNotUse >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::RecordSet_ConfigsEntry_DoNotUse >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::RecordSet* Arena::CreateMaybeMessage< ::flwr::proto::RecordSet >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::RecordSet >(arena); +} +PROTOBUF_NAMESPACE_CLOSE + +// @@protoc_insertion_point(global_scope) +#include diff --git a/src/cc/flwr/include/flwr/proto/recordset.pb.h b/src/cc/flwr/include/flwr/proto/recordset.pb.h new file mode 100644 index 000000000000..74c336cf61ad --- /dev/null +++ b/src/cc/flwr/include/flwr/proto/recordset.pb.h @@ -0,0 +1,4255 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: flwr/proto/recordset.proto + +#ifndef GOOGLE_PROTOBUF_INCLUDED_flwr_2fproto_2frecordset_2eproto +#define GOOGLE_PROTOBUF_INCLUDED_flwr_2fproto_2frecordset_2eproto + +#include +#include + +#include +#if PROTOBUF_VERSION < 3018000 +#error This file was generated by a newer version of protoc which is +#error incompatible with your Protocol Buffer headers. Please update +#error your headers. +#endif +#if 3018001 < PROTOBUF_MIN_PROTOC_VERSION +#error This file was generated by an older version of protoc which is +#error incompatible with your Protocol Buffer headers. Please +#error regenerate this file with a newer version of protoc. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include // IWYU pragma: export +#include // IWYU pragma: export +#include // IWYU pragma: export +#include +#include +#include +// @@protoc_insertion_point(includes) +#include +#define PROTOBUF_INTERNAL_EXPORT_flwr_2fproto_2frecordset_2eproto +PROTOBUF_NAMESPACE_OPEN +namespace internal { +class AnyMetadata; +} // namespace internal +PROTOBUF_NAMESPACE_CLOSE + +// Internal implementation detail -- do not use these members. +struct TableStruct_flwr_2fproto_2frecordset_2eproto { + static const ::PROTOBUF_NAMESPACE_ID::internal::ParseTableField entries[] + PROTOBUF_SECTION_VARIABLE(protodesc_cold); + static const ::PROTOBUF_NAMESPACE_ID::internal::AuxiliaryParseTableField aux[] + PROTOBUF_SECTION_VARIABLE(protodesc_cold); + static const ::PROTOBUF_NAMESPACE_ID::internal::ParseTable schema[17] + PROTOBUF_SECTION_VARIABLE(protodesc_cold); + static const ::PROTOBUF_NAMESPACE_ID::internal::FieldMetadata field_metadata[]; + static const ::PROTOBUF_NAMESPACE_ID::internal::SerializationTable serialization_table[]; + static const ::PROTOBUF_NAMESPACE_ID::uint32 offsets[]; +}; +extern const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_flwr_2fproto_2frecordset_2eproto; +namespace flwr { +namespace proto { +class Array; +struct ArrayDefaultTypeInternal; +extern ArrayDefaultTypeInternal _Array_default_instance_; +class BoolList; +struct BoolListDefaultTypeInternal; +extern BoolListDefaultTypeInternal _BoolList_default_instance_; +class BytesList; +struct BytesListDefaultTypeInternal; +extern BytesListDefaultTypeInternal _BytesList_default_instance_; +class ConfigsRecord; +struct ConfigsRecordDefaultTypeInternal; +extern ConfigsRecordDefaultTypeInternal _ConfigsRecord_default_instance_; +class ConfigsRecordValue; +struct ConfigsRecordValueDefaultTypeInternal; +extern ConfigsRecordValueDefaultTypeInternal _ConfigsRecordValue_default_instance_; +class ConfigsRecord_DataEntry_DoNotUse; +struct ConfigsRecord_DataEntry_DoNotUseDefaultTypeInternal; +extern ConfigsRecord_DataEntry_DoNotUseDefaultTypeInternal _ConfigsRecord_DataEntry_DoNotUse_default_instance_; +class DoubleList; +struct DoubleListDefaultTypeInternal; +extern DoubleListDefaultTypeInternal _DoubleList_default_instance_; +class MetricsRecord; +struct MetricsRecordDefaultTypeInternal; +extern MetricsRecordDefaultTypeInternal _MetricsRecord_default_instance_; +class MetricsRecordValue; +struct MetricsRecordValueDefaultTypeInternal; +extern MetricsRecordValueDefaultTypeInternal _MetricsRecordValue_default_instance_; +class MetricsRecord_DataEntry_DoNotUse; +struct MetricsRecord_DataEntry_DoNotUseDefaultTypeInternal; +extern MetricsRecord_DataEntry_DoNotUseDefaultTypeInternal _MetricsRecord_DataEntry_DoNotUse_default_instance_; +class ParametersRecord; +struct ParametersRecordDefaultTypeInternal; +extern ParametersRecordDefaultTypeInternal _ParametersRecord_default_instance_; +class RecordSet; +struct RecordSetDefaultTypeInternal; +extern RecordSetDefaultTypeInternal _RecordSet_default_instance_; +class RecordSet_ConfigsEntry_DoNotUse; +struct RecordSet_ConfigsEntry_DoNotUseDefaultTypeInternal; +extern RecordSet_ConfigsEntry_DoNotUseDefaultTypeInternal _RecordSet_ConfigsEntry_DoNotUse_default_instance_; +class RecordSet_MetricsEntry_DoNotUse; +struct RecordSet_MetricsEntry_DoNotUseDefaultTypeInternal; +extern RecordSet_MetricsEntry_DoNotUseDefaultTypeInternal _RecordSet_MetricsEntry_DoNotUse_default_instance_; +class RecordSet_ParametersEntry_DoNotUse; +struct RecordSet_ParametersEntry_DoNotUseDefaultTypeInternal; +extern RecordSet_ParametersEntry_DoNotUseDefaultTypeInternal _RecordSet_ParametersEntry_DoNotUse_default_instance_; +class Sint64List; +struct Sint64ListDefaultTypeInternal; +extern Sint64ListDefaultTypeInternal _Sint64List_default_instance_; +class StringList; +struct StringListDefaultTypeInternal; +extern StringListDefaultTypeInternal _StringList_default_instance_; +} // namespace proto +} // namespace flwr +PROTOBUF_NAMESPACE_OPEN +template<> ::flwr::proto::Array* Arena::CreateMaybeMessage<::flwr::proto::Array>(Arena*); +template<> ::flwr::proto::BoolList* Arena::CreateMaybeMessage<::flwr::proto::BoolList>(Arena*); +template<> ::flwr::proto::BytesList* Arena::CreateMaybeMessage<::flwr::proto::BytesList>(Arena*); +template<> ::flwr::proto::ConfigsRecord* Arena::CreateMaybeMessage<::flwr::proto::ConfigsRecord>(Arena*); +template<> ::flwr::proto::ConfigsRecordValue* Arena::CreateMaybeMessage<::flwr::proto::ConfigsRecordValue>(Arena*); +template<> ::flwr::proto::ConfigsRecord_DataEntry_DoNotUse* Arena::CreateMaybeMessage<::flwr::proto::ConfigsRecord_DataEntry_DoNotUse>(Arena*); +template<> ::flwr::proto::DoubleList* Arena::CreateMaybeMessage<::flwr::proto::DoubleList>(Arena*); +template<> ::flwr::proto::MetricsRecord* Arena::CreateMaybeMessage<::flwr::proto::MetricsRecord>(Arena*); +template<> ::flwr::proto::MetricsRecordValue* Arena::CreateMaybeMessage<::flwr::proto::MetricsRecordValue>(Arena*); +template<> ::flwr::proto::MetricsRecord_DataEntry_DoNotUse* Arena::CreateMaybeMessage<::flwr::proto::MetricsRecord_DataEntry_DoNotUse>(Arena*); +template<> ::flwr::proto::ParametersRecord* Arena::CreateMaybeMessage<::flwr::proto::ParametersRecord>(Arena*); +template<> ::flwr::proto::RecordSet* Arena::CreateMaybeMessage<::flwr::proto::RecordSet>(Arena*); +template<> ::flwr::proto::RecordSet_ConfigsEntry_DoNotUse* Arena::CreateMaybeMessage<::flwr::proto::RecordSet_ConfigsEntry_DoNotUse>(Arena*); +template<> ::flwr::proto::RecordSet_MetricsEntry_DoNotUse* Arena::CreateMaybeMessage<::flwr::proto::RecordSet_MetricsEntry_DoNotUse>(Arena*); +template<> ::flwr::proto::RecordSet_ParametersEntry_DoNotUse* Arena::CreateMaybeMessage<::flwr::proto::RecordSet_ParametersEntry_DoNotUse>(Arena*); +template<> ::flwr::proto::Sint64List* Arena::CreateMaybeMessage<::flwr::proto::Sint64List>(Arena*); +template<> ::flwr::proto::StringList* Arena::CreateMaybeMessage<::flwr::proto::StringList>(Arena*); +PROTOBUF_NAMESPACE_CLOSE +namespace flwr { +namespace proto { + +// =================================================================== + +class DoubleList final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.DoubleList) */ { + public: + inline DoubleList() : DoubleList(nullptr) {} + ~DoubleList() override; + explicit constexpr DoubleList(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + DoubleList(const DoubleList& from); + DoubleList(DoubleList&& from) noexcept + : DoubleList() { + *this = ::std::move(from); + } + + inline DoubleList& operator=(const DoubleList& from) { + CopyFrom(from); + return *this; + } + inline DoubleList& operator=(DoubleList&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const DoubleList& default_instance() { + return *internal_default_instance(); + } + static inline const DoubleList* internal_default_instance() { + return reinterpret_cast( + &_DoubleList_default_instance_); + } + static constexpr int kIndexInFileMessages = + 0; + + friend void swap(DoubleList& a, DoubleList& b) { + a.Swap(&b); + } + inline void Swap(DoubleList* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(DoubleList* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline DoubleList* New() const final { + return new DoubleList(); + } + + DoubleList* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const DoubleList& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const DoubleList& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(DoubleList* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.DoubleList"; + } + protected: + explicit DoubleList(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kValsFieldNumber = 1, + }; + // repeated double vals = 1; + int vals_size() const; + private: + int _internal_vals_size() const; + public: + void clear_vals(); + private: + double _internal_vals(int index) const; + const ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >& + _internal_vals() const; + void _internal_add_vals(double value); + ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >* + _internal_mutable_vals(); + public: + double vals(int index) const; + void set_vals(int index, double value); + void add_vals(double value); + const ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >& + vals() const; + ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >* + mutable_vals(); + + // @@protoc_insertion_point(class_scope:flwr.proto.DoubleList) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + ::PROTOBUF_NAMESPACE_ID::RepeatedField< double > vals_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flwr_2fproto_2frecordset_2eproto; +}; +// ------------------------------------------------------------------- + +class Sint64List final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.Sint64List) */ { + public: + inline Sint64List() : Sint64List(nullptr) {} + ~Sint64List() override; + explicit constexpr Sint64List(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + Sint64List(const Sint64List& from); + Sint64List(Sint64List&& from) noexcept + : Sint64List() { + *this = ::std::move(from); + } + + inline Sint64List& operator=(const Sint64List& from) { + CopyFrom(from); + return *this; + } + inline Sint64List& operator=(Sint64List&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const Sint64List& default_instance() { + return *internal_default_instance(); + } + static inline const Sint64List* internal_default_instance() { + return reinterpret_cast( + &_Sint64List_default_instance_); + } + static constexpr int kIndexInFileMessages = + 1; + + friend void swap(Sint64List& a, Sint64List& b) { + a.Swap(&b); + } + inline void Swap(Sint64List* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(Sint64List* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline Sint64List* New() const final { + return new Sint64List(); + } + + Sint64List* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const Sint64List& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const Sint64List& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(Sint64List* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.Sint64List"; + } + protected: + explicit Sint64List(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kValsFieldNumber = 1, + }; + // repeated sint64 vals = 1; + int vals_size() const; + private: + int _internal_vals_size() const; + public: + void clear_vals(); + private: + ::PROTOBUF_NAMESPACE_ID::int64 _internal_vals(int index) const; + const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 >& + _internal_vals() const; + void _internal_add_vals(::PROTOBUF_NAMESPACE_ID::int64 value); + ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 >* + _internal_mutable_vals(); + public: + ::PROTOBUF_NAMESPACE_ID::int64 vals(int index) const; + void set_vals(int index, ::PROTOBUF_NAMESPACE_ID::int64 value); + void add_vals(::PROTOBUF_NAMESPACE_ID::int64 value); + const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 >& + vals() const; + ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 >* + mutable_vals(); + + // @@protoc_insertion_point(class_scope:flwr.proto.Sint64List) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 > vals_; + mutable std::atomic _vals_cached_byte_size_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flwr_2fproto_2frecordset_2eproto; +}; +// ------------------------------------------------------------------- + +class BoolList final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.BoolList) */ { + public: + inline BoolList() : BoolList(nullptr) {} + ~BoolList() override; + explicit constexpr BoolList(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + BoolList(const BoolList& from); + BoolList(BoolList&& from) noexcept + : BoolList() { + *this = ::std::move(from); + } + + inline BoolList& operator=(const BoolList& from) { + CopyFrom(from); + return *this; + } + inline BoolList& operator=(BoolList&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const BoolList& default_instance() { + return *internal_default_instance(); + } + static inline const BoolList* internal_default_instance() { + return reinterpret_cast( + &_BoolList_default_instance_); + } + static constexpr int kIndexInFileMessages = + 2; + + friend void swap(BoolList& a, BoolList& b) { + a.Swap(&b); + } + inline void Swap(BoolList* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(BoolList* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline BoolList* New() const final { + return new BoolList(); + } + + BoolList* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const BoolList& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const BoolList& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(BoolList* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.BoolList"; + } + protected: + explicit BoolList(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kValsFieldNumber = 1, + }; + // repeated bool vals = 1; + int vals_size() const; + private: + int _internal_vals_size() const; + public: + void clear_vals(); + private: + bool _internal_vals(int index) const; + const ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool >& + _internal_vals() const; + void _internal_add_vals(bool value); + ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool >* + _internal_mutable_vals(); + public: + bool vals(int index) const; + void set_vals(int index, bool value); + void add_vals(bool value); + const ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool >& + vals() const; + ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool >* + mutable_vals(); + + // @@protoc_insertion_point(class_scope:flwr.proto.BoolList) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool > vals_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flwr_2fproto_2frecordset_2eproto; +}; +// ------------------------------------------------------------------- + +class StringList final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.StringList) */ { + public: + inline StringList() : StringList(nullptr) {} + ~StringList() override; + explicit constexpr StringList(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + StringList(const StringList& from); + StringList(StringList&& from) noexcept + : StringList() { + *this = ::std::move(from); + } + + inline StringList& operator=(const StringList& from) { + CopyFrom(from); + return *this; + } + inline StringList& operator=(StringList&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const StringList& default_instance() { + return *internal_default_instance(); + } + static inline const StringList* internal_default_instance() { + return reinterpret_cast( + &_StringList_default_instance_); + } + static constexpr int kIndexInFileMessages = + 3; + + friend void swap(StringList& a, StringList& b) { + a.Swap(&b); + } + inline void Swap(StringList* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(StringList* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline StringList* New() const final { + return new StringList(); + } + + StringList* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const StringList& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const StringList& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(StringList* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.StringList"; + } + protected: + explicit StringList(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kValsFieldNumber = 1, + }; + // repeated string vals = 1; + int vals_size() const; + private: + int _internal_vals_size() const; + public: + void clear_vals(); + const std::string& vals(int index) const; + std::string* mutable_vals(int index); + void set_vals(int index, const std::string& value); + void set_vals(int index, std::string&& value); + void set_vals(int index, const char* value); + void set_vals(int index, const char* value, size_t size); + std::string* add_vals(); + void add_vals(const std::string& value); + void add_vals(std::string&& value); + void add_vals(const char* value); + void add_vals(const char* value, size_t size); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& vals() const; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* mutable_vals(); + private: + const std::string& _internal_vals(int index) const; + std::string* _internal_add_vals(); + public: + + // @@protoc_insertion_point(class_scope:flwr.proto.StringList) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField vals_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flwr_2fproto_2frecordset_2eproto; +}; +// ------------------------------------------------------------------- + +class BytesList final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.BytesList) */ { + public: + inline BytesList() : BytesList(nullptr) {} + ~BytesList() override; + explicit constexpr BytesList(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + BytesList(const BytesList& from); + BytesList(BytesList&& from) noexcept + : BytesList() { + *this = ::std::move(from); + } + + inline BytesList& operator=(const BytesList& from) { + CopyFrom(from); + return *this; + } + inline BytesList& operator=(BytesList&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const BytesList& default_instance() { + return *internal_default_instance(); + } + static inline const BytesList* internal_default_instance() { + return reinterpret_cast( + &_BytesList_default_instance_); + } + static constexpr int kIndexInFileMessages = + 4; + + friend void swap(BytesList& a, BytesList& b) { + a.Swap(&b); + } + inline void Swap(BytesList* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(BytesList* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline BytesList* New() const final { + return new BytesList(); + } + + BytesList* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const BytesList& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const BytesList& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(BytesList* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.BytesList"; + } + protected: + explicit BytesList(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kValsFieldNumber = 1, + }; + // repeated bytes vals = 1; + int vals_size() const; + private: + int _internal_vals_size() const; + public: + void clear_vals(); + const std::string& vals(int index) const; + std::string* mutable_vals(int index); + void set_vals(int index, const std::string& value); + void set_vals(int index, std::string&& value); + void set_vals(int index, const char* value); + void set_vals(int index, const void* value, size_t size); + std::string* add_vals(); + void add_vals(const std::string& value); + void add_vals(std::string&& value); + void add_vals(const char* value); + void add_vals(const void* value, size_t size); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& vals() const; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* mutable_vals(); + private: + const std::string& _internal_vals(int index) const; + std::string* _internal_add_vals(); + public: + + // @@protoc_insertion_point(class_scope:flwr.proto.BytesList) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField vals_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flwr_2fproto_2frecordset_2eproto; +}; +// ------------------------------------------------------------------- + +class Array final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.Array) */ { + public: + inline Array() : Array(nullptr) {} + ~Array() override; + explicit constexpr Array(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + Array(const Array& from); + Array(Array&& from) noexcept + : Array() { + *this = ::std::move(from); + } + + inline Array& operator=(const Array& from) { + CopyFrom(from); + return *this; + } + inline Array& operator=(Array&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const Array& default_instance() { + return *internal_default_instance(); + } + static inline const Array* internal_default_instance() { + return reinterpret_cast( + &_Array_default_instance_); + } + static constexpr int kIndexInFileMessages = + 5; + + friend void swap(Array& a, Array& b) { + a.Swap(&b); + } + inline void Swap(Array* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(Array* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline Array* New() const final { + return new Array(); + } + + Array* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const Array& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const Array& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(Array* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.Array"; + } + protected: + explicit Array(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kShapeFieldNumber = 2, + kDtypeFieldNumber = 1, + kStypeFieldNumber = 3, + kDataFieldNumber = 4, + }; + // repeated int32 shape = 2; + int shape_size() const; + private: + int _internal_shape_size() const; + public: + void clear_shape(); + private: + ::PROTOBUF_NAMESPACE_ID::int32 _internal_shape(int index) const; + const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int32 >& + _internal_shape() const; + void _internal_add_shape(::PROTOBUF_NAMESPACE_ID::int32 value); + ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int32 >* + _internal_mutable_shape(); + public: + ::PROTOBUF_NAMESPACE_ID::int32 shape(int index) const; + void set_shape(int index, ::PROTOBUF_NAMESPACE_ID::int32 value); + void add_shape(::PROTOBUF_NAMESPACE_ID::int32 value); + const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int32 >& + shape() const; + ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int32 >* + mutable_shape(); + + // string dtype = 1; + void clear_dtype(); + const std::string& dtype() const; + template + void set_dtype(ArgT0&& arg0, ArgT... args); + std::string* mutable_dtype(); + PROTOBUF_MUST_USE_RESULT std::string* release_dtype(); + void set_allocated_dtype(std::string* dtype); + private: + const std::string& _internal_dtype() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_dtype(const std::string& value); + std::string* _internal_mutable_dtype(); + public: + + // string stype = 3; + void clear_stype(); + const std::string& stype() const; + template + void set_stype(ArgT0&& arg0, ArgT... args); + std::string* mutable_stype(); + PROTOBUF_MUST_USE_RESULT std::string* release_stype(); + void set_allocated_stype(std::string* stype); + private: + const std::string& _internal_stype() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_stype(const std::string& value); + std::string* _internal_mutable_stype(); + public: + + // bytes data = 4; + void clear_data(); + const std::string& data() const; + template + void set_data(ArgT0&& arg0, ArgT... args); + std::string* mutable_data(); + PROTOBUF_MUST_USE_RESULT std::string* release_data(); + void set_allocated_data(std::string* data); + private: + const std::string& _internal_data() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_data(const std::string& value); + std::string* _internal_mutable_data(); + public: + + // @@protoc_insertion_point(class_scope:flwr.proto.Array) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int32 > shape_; + mutable std::atomic _shape_cached_byte_size_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr dtype_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr stype_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr data_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flwr_2fproto_2frecordset_2eproto; +}; +// ------------------------------------------------------------------- + +class MetricsRecordValue final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.MetricsRecordValue) */ { + public: + inline MetricsRecordValue() : MetricsRecordValue(nullptr) {} + ~MetricsRecordValue() override; + explicit constexpr MetricsRecordValue(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + MetricsRecordValue(const MetricsRecordValue& from); + MetricsRecordValue(MetricsRecordValue&& from) noexcept + : MetricsRecordValue() { + *this = ::std::move(from); + } + + inline MetricsRecordValue& operator=(const MetricsRecordValue& from) { + CopyFrom(from); + return *this; + } + inline MetricsRecordValue& operator=(MetricsRecordValue&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const MetricsRecordValue& default_instance() { + return *internal_default_instance(); + } + enum ValueCase { + kDouble = 1, + kSint64 = 2, + kDoubleList = 21, + kSint64List = 22, + VALUE_NOT_SET = 0, + }; + + static inline const MetricsRecordValue* internal_default_instance() { + return reinterpret_cast( + &_MetricsRecordValue_default_instance_); + } + static constexpr int kIndexInFileMessages = + 6; + + friend void swap(MetricsRecordValue& a, MetricsRecordValue& b) { + a.Swap(&b); + } + inline void Swap(MetricsRecordValue* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(MetricsRecordValue* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline MetricsRecordValue* New() const final { + return new MetricsRecordValue(); + } + + MetricsRecordValue* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const MetricsRecordValue& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const MetricsRecordValue& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(MetricsRecordValue* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.MetricsRecordValue"; + } + protected: + explicit MetricsRecordValue(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kDoubleFieldNumber = 1, + kSint64FieldNumber = 2, + kDoubleListFieldNumber = 21, + kSint64ListFieldNumber = 22, + }; + // double double = 1; + bool has_double_() const; + private: + bool _internal_has_double_() const; + public: + void clear_double_(); + double double_() const; + void set_double_(double value); + private: + double _internal_double_() const; + void _internal_set_double_(double value); + public: + + // sint64 sint64 = 2; + bool has_sint64() const; + private: + bool _internal_has_sint64() const; + public: + void clear_sint64(); + ::PROTOBUF_NAMESPACE_ID::int64 sint64() const; + void set_sint64(::PROTOBUF_NAMESPACE_ID::int64 value); + private: + ::PROTOBUF_NAMESPACE_ID::int64 _internal_sint64() const; + void _internal_set_sint64(::PROTOBUF_NAMESPACE_ID::int64 value); + public: + + // .flwr.proto.DoubleList double_list = 21; + bool has_double_list() const; + private: + bool _internal_has_double_list() const; + public: + void clear_double_list(); + const ::flwr::proto::DoubleList& double_list() const; + PROTOBUF_MUST_USE_RESULT ::flwr::proto::DoubleList* release_double_list(); + ::flwr::proto::DoubleList* mutable_double_list(); + void set_allocated_double_list(::flwr::proto::DoubleList* double_list); + private: + const ::flwr::proto::DoubleList& _internal_double_list() const; + ::flwr::proto::DoubleList* _internal_mutable_double_list(); + public: + void unsafe_arena_set_allocated_double_list( + ::flwr::proto::DoubleList* double_list); + ::flwr::proto::DoubleList* unsafe_arena_release_double_list(); + + // .flwr.proto.Sint64List sint64_list = 22; + bool has_sint64_list() const; + private: + bool _internal_has_sint64_list() const; + public: + void clear_sint64_list(); + const ::flwr::proto::Sint64List& sint64_list() const; + PROTOBUF_MUST_USE_RESULT ::flwr::proto::Sint64List* release_sint64_list(); + ::flwr::proto::Sint64List* mutable_sint64_list(); + void set_allocated_sint64_list(::flwr::proto::Sint64List* sint64_list); + private: + const ::flwr::proto::Sint64List& _internal_sint64_list() const; + ::flwr::proto::Sint64List* _internal_mutable_sint64_list(); + public: + void unsafe_arena_set_allocated_sint64_list( + ::flwr::proto::Sint64List* sint64_list); + ::flwr::proto::Sint64List* unsafe_arena_release_sint64_list(); + + void clear_value(); + ValueCase value_case() const; + // @@protoc_insertion_point(class_scope:flwr.proto.MetricsRecordValue) + private: + class _Internal; + void set_has_double_(); + void set_has_sint64(); + void set_has_double_list(); + void set_has_sint64_list(); + + inline bool has_value() const; + inline void clear_has_value(); + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + union ValueUnion { + constexpr ValueUnion() : _constinit_{} {} + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_; + double double__; + ::PROTOBUF_NAMESPACE_ID::int64 sint64_; + ::flwr::proto::DoubleList* double_list_; + ::flwr::proto::Sint64List* sint64_list_; + } value_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + ::PROTOBUF_NAMESPACE_ID::uint32 _oneof_case_[1]; + + friend struct ::TableStruct_flwr_2fproto_2frecordset_2eproto; +}; +// ------------------------------------------------------------------- + +class ConfigsRecordValue final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.ConfigsRecordValue) */ { + public: + inline ConfigsRecordValue() : ConfigsRecordValue(nullptr) {} + ~ConfigsRecordValue() override; + explicit constexpr ConfigsRecordValue(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + ConfigsRecordValue(const ConfigsRecordValue& from); + ConfigsRecordValue(ConfigsRecordValue&& from) noexcept + : ConfigsRecordValue() { + *this = ::std::move(from); + } + + inline ConfigsRecordValue& operator=(const ConfigsRecordValue& from) { + CopyFrom(from); + return *this; + } + inline ConfigsRecordValue& operator=(ConfigsRecordValue&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const ConfigsRecordValue& default_instance() { + return *internal_default_instance(); + } + enum ValueCase { + kDouble = 1, + kSint64 = 2, + kBool = 3, + kString = 4, + kBytes = 5, + kDoubleList = 21, + kSint64List = 22, + kBoolList = 23, + kStringList = 24, + kBytesList = 25, + VALUE_NOT_SET = 0, + }; + + static inline const ConfigsRecordValue* internal_default_instance() { + return reinterpret_cast( + &_ConfigsRecordValue_default_instance_); + } + static constexpr int kIndexInFileMessages = + 7; + + friend void swap(ConfigsRecordValue& a, ConfigsRecordValue& b) { + a.Swap(&b); + } + inline void Swap(ConfigsRecordValue* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(ConfigsRecordValue* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline ConfigsRecordValue* New() const final { + return new ConfigsRecordValue(); + } + + ConfigsRecordValue* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const ConfigsRecordValue& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const ConfigsRecordValue& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(ConfigsRecordValue* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.ConfigsRecordValue"; + } + protected: + explicit ConfigsRecordValue(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kDoubleFieldNumber = 1, + kSint64FieldNumber = 2, + kBoolFieldNumber = 3, + kStringFieldNumber = 4, + kBytesFieldNumber = 5, + kDoubleListFieldNumber = 21, + kSint64ListFieldNumber = 22, + kBoolListFieldNumber = 23, + kStringListFieldNumber = 24, + kBytesListFieldNumber = 25, + }; + // double double = 1; + bool has_double_() const; + private: + bool _internal_has_double_() const; + public: + void clear_double_(); + double double_() const; + void set_double_(double value); + private: + double _internal_double_() const; + void _internal_set_double_(double value); + public: + + // sint64 sint64 = 2; + bool has_sint64() const; + private: + bool _internal_has_sint64() const; + public: + void clear_sint64(); + ::PROTOBUF_NAMESPACE_ID::int64 sint64() const; + void set_sint64(::PROTOBUF_NAMESPACE_ID::int64 value); + private: + ::PROTOBUF_NAMESPACE_ID::int64 _internal_sint64() const; + void _internal_set_sint64(::PROTOBUF_NAMESPACE_ID::int64 value); + public: + + // bool bool = 3; + bool has_bool_() const; + private: + bool _internal_has_bool_() const; + public: + void clear_bool_(); + bool bool_() const; + void set_bool_(bool value); + private: + bool _internal_bool_() const; + void _internal_set_bool_(bool value); + public: + + // string string = 4; + bool has_string() const; + private: + bool _internal_has_string() const; + public: + void clear_string(); + const std::string& string() const; + template + void set_string(ArgT0&& arg0, ArgT... args); + std::string* mutable_string(); + PROTOBUF_MUST_USE_RESULT std::string* release_string(); + void set_allocated_string(std::string* string); + private: + const std::string& _internal_string() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_string(const std::string& value); + std::string* _internal_mutable_string(); + public: + + // bytes bytes = 5; + bool has_bytes() const; + private: + bool _internal_has_bytes() const; + public: + void clear_bytes(); + const std::string& bytes() const; + template + void set_bytes(ArgT0&& arg0, ArgT... args); + std::string* mutable_bytes(); + PROTOBUF_MUST_USE_RESULT std::string* release_bytes(); + void set_allocated_bytes(std::string* bytes); + private: + const std::string& _internal_bytes() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_bytes(const std::string& value); + std::string* _internal_mutable_bytes(); + public: + + // .flwr.proto.DoubleList double_list = 21; + bool has_double_list() const; + private: + bool _internal_has_double_list() const; + public: + void clear_double_list(); + const ::flwr::proto::DoubleList& double_list() const; + PROTOBUF_MUST_USE_RESULT ::flwr::proto::DoubleList* release_double_list(); + ::flwr::proto::DoubleList* mutable_double_list(); + void set_allocated_double_list(::flwr::proto::DoubleList* double_list); + private: + const ::flwr::proto::DoubleList& _internal_double_list() const; + ::flwr::proto::DoubleList* _internal_mutable_double_list(); + public: + void unsafe_arena_set_allocated_double_list( + ::flwr::proto::DoubleList* double_list); + ::flwr::proto::DoubleList* unsafe_arena_release_double_list(); + + // .flwr.proto.Sint64List sint64_list = 22; + bool has_sint64_list() const; + private: + bool _internal_has_sint64_list() const; + public: + void clear_sint64_list(); + const ::flwr::proto::Sint64List& sint64_list() const; + PROTOBUF_MUST_USE_RESULT ::flwr::proto::Sint64List* release_sint64_list(); + ::flwr::proto::Sint64List* mutable_sint64_list(); + void set_allocated_sint64_list(::flwr::proto::Sint64List* sint64_list); + private: + const ::flwr::proto::Sint64List& _internal_sint64_list() const; + ::flwr::proto::Sint64List* _internal_mutable_sint64_list(); + public: + void unsafe_arena_set_allocated_sint64_list( + ::flwr::proto::Sint64List* sint64_list); + ::flwr::proto::Sint64List* unsafe_arena_release_sint64_list(); + + // .flwr.proto.BoolList bool_list = 23; + bool has_bool_list() const; + private: + bool _internal_has_bool_list() const; + public: + void clear_bool_list(); + const ::flwr::proto::BoolList& bool_list() const; + PROTOBUF_MUST_USE_RESULT ::flwr::proto::BoolList* release_bool_list(); + ::flwr::proto::BoolList* mutable_bool_list(); + void set_allocated_bool_list(::flwr::proto::BoolList* bool_list); + private: + const ::flwr::proto::BoolList& _internal_bool_list() const; + ::flwr::proto::BoolList* _internal_mutable_bool_list(); + public: + void unsafe_arena_set_allocated_bool_list( + ::flwr::proto::BoolList* bool_list); + ::flwr::proto::BoolList* unsafe_arena_release_bool_list(); + + // .flwr.proto.StringList string_list = 24; + bool has_string_list() const; + private: + bool _internal_has_string_list() const; + public: + void clear_string_list(); + const ::flwr::proto::StringList& string_list() const; + PROTOBUF_MUST_USE_RESULT ::flwr::proto::StringList* release_string_list(); + ::flwr::proto::StringList* mutable_string_list(); + void set_allocated_string_list(::flwr::proto::StringList* string_list); + private: + const ::flwr::proto::StringList& _internal_string_list() const; + ::flwr::proto::StringList* _internal_mutable_string_list(); + public: + void unsafe_arena_set_allocated_string_list( + ::flwr::proto::StringList* string_list); + ::flwr::proto::StringList* unsafe_arena_release_string_list(); + + // .flwr.proto.BytesList bytes_list = 25; + bool has_bytes_list() const; + private: + bool _internal_has_bytes_list() const; + public: + void clear_bytes_list(); + const ::flwr::proto::BytesList& bytes_list() const; + PROTOBUF_MUST_USE_RESULT ::flwr::proto::BytesList* release_bytes_list(); + ::flwr::proto::BytesList* mutable_bytes_list(); + void set_allocated_bytes_list(::flwr::proto::BytesList* bytes_list); + private: + const ::flwr::proto::BytesList& _internal_bytes_list() const; + ::flwr::proto::BytesList* _internal_mutable_bytes_list(); + public: + void unsafe_arena_set_allocated_bytes_list( + ::flwr::proto::BytesList* bytes_list); + ::flwr::proto::BytesList* unsafe_arena_release_bytes_list(); + + void clear_value(); + ValueCase value_case() const; + // @@protoc_insertion_point(class_scope:flwr.proto.ConfigsRecordValue) + private: + class _Internal; + void set_has_double_(); + void set_has_sint64(); + void set_has_bool_(); + void set_has_string(); + void set_has_bytes(); + void set_has_double_list(); + void set_has_sint64_list(); + void set_has_bool_list(); + void set_has_string_list(); + void set_has_bytes_list(); + + inline bool has_value() const; + inline void clear_has_value(); + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + union ValueUnion { + constexpr ValueUnion() : _constinit_{} {} + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_; + double double__; + ::PROTOBUF_NAMESPACE_ID::int64 sint64_; + bool bool__; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr string_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr bytes_; + ::flwr::proto::DoubleList* double_list_; + ::flwr::proto::Sint64List* sint64_list_; + ::flwr::proto::BoolList* bool_list_; + ::flwr::proto::StringList* string_list_; + ::flwr::proto::BytesList* bytes_list_; + } value_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + ::PROTOBUF_NAMESPACE_ID::uint32 _oneof_case_[1]; + + friend struct ::TableStruct_flwr_2fproto_2frecordset_2eproto; +}; +// ------------------------------------------------------------------- + +class ParametersRecord final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.ParametersRecord) */ { + public: + inline ParametersRecord() : ParametersRecord(nullptr) {} + ~ParametersRecord() override; + explicit constexpr ParametersRecord(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + ParametersRecord(const ParametersRecord& from); + ParametersRecord(ParametersRecord&& from) noexcept + : ParametersRecord() { + *this = ::std::move(from); + } + + inline ParametersRecord& operator=(const ParametersRecord& from) { + CopyFrom(from); + return *this; + } + inline ParametersRecord& operator=(ParametersRecord&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const ParametersRecord& default_instance() { + return *internal_default_instance(); + } + static inline const ParametersRecord* internal_default_instance() { + return reinterpret_cast( + &_ParametersRecord_default_instance_); + } + static constexpr int kIndexInFileMessages = + 8; + + friend void swap(ParametersRecord& a, ParametersRecord& b) { + a.Swap(&b); + } + inline void Swap(ParametersRecord* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(ParametersRecord* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline ParametersRecord* New() const final { + return new ParametersRecord(); + } + + ParametersRecord* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const ParametersRecord& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const ParametersRecord& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(ParametersRecord* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.ParametersRecord"; + } + protected: + explicit ParametersRecord(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kDataKeysFieldNumber = 1, + kDataValuesFieldNumber = 2, + }; + // repeated string data_keys = 1; + int data_keys_size() const; + private: + int _internal_data_keys_size() const; + public: + void clear_data_keys(); + const std::string& data_keys(int index) const; + std::string* mutable_data_keys(int index); + void set_data_keys(int index, const std::string& value); + void set_data_keys(int index, std::string&& value); + void set_data_keys(int index, const char* value); + void set_data_keys(int index, const char* value, size_t size); + std::string* add_data_keys(); + void add_data_keys(const std::string& value); + void add_data_keys(std::string&& value); + void add_data_keys(const char* value); + void add_data_keys(const char* value, size_t size); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& data_keys() const; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* mutable_data_keys(); + private: + const std::string& _internal_data_keys(int index) const; + std::string* _internal_add_data_keys(); + public: + + // repeated .flwr.proto.Array data_values = 2; + int data_values_size() const; + private: + int _internal_data_values_size() const; + public: + void clear_data_values(); + ::flwr::proto::Array* mutable_data_values(int index); + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::flwr::proto::Array >* + mutable_data_values(); + private: + const ::flwr::proto::Array& _internal_data_values(int index) const; + ::flwr::proto::Array* _internal_add_data_values(); + public: + const ::flwr::proto::Array& data_values(int index) const; + ::flwr::proto::Array* add_data_values(); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::flwr::proto::Array >& + data_values() const; + + // @@protoc_insertion_point(class_scope:flwr.proto.ParametersRecord) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField data_keys_; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::flwr::proto::Array > data_values_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flwr_2fproto_2frecordset_2eproto; +}; +// ------------------------------------------------------------------- + +class MetricsRecord_DataEntry_DoNotUse : public ::PROTOBUF_NAMESPACE_ID::internal::MapEntry { +public: + typedef ::PROTOBUF_NAMESPACE_ID::internal::MapEntry SuperType; + MetricsRecord_DataEntry_DoNotUse(); + explicit constexpr MetricsRecord_DataEntry_DoNotUse( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + explicit MetricsRecord_DataEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void MergeFrom(const MetricsRecord_DataEntry_DoNotUse& other); + static const MetricsRecord_DataEntry_DoNotUse* internal_default_instance() { return reinterpret_cast(&_MetricsRecord_DataEntry_DoNotUse_default_instance_); } + static bool ValidateKey(std::string* s) { + return ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(s->data(), static_cast(s->size()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, "flwr.proto.MetricsRecord.DataEntry.key"); + } + static bool ValidateValue(void*) { return true; } + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; +}; + +// ------------------------------------------------------------------- + +class MetricsRecord final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.MetricsRecord) */ { + public: + inline MetricsRecord() : MetricsRecord(nullptr) {} + ~MetricsRecord() override; + explicit constexpr MetricsRecord(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + MetricsRecord(const MetricsRecord& from); + MetricsRecord(MetricsRecord&& from) noexcept + : MetricsRecord() { + *this = ::std::move(from); + } + + inline MetricsRecord& operator=(const MetricsRecord& from) { + CopyFrom(from); + return *this; + } + inline MetricsRecord& operator=(MetricsRecord&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const MetricsRecord& default_instance() { + return *internal_default_instance(); + } + static inline const MetricsRecord* internal_default_instance() { + return reinterpret_cast( + &_MetricsRecord_default_instance_); + } + static constexpr int kIndexInFileMessages = + 10; + + friend void swap(MetricsRecord& a, MetricsRecord& b) { + a.Swap(&b); + } + inline void Swap(MetricsRecord* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(MetricsRecord* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline MetricsRecord* New() const final { + return new MetricsRecord(); + } + + MetricsRecord* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const MetricsRecord& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const MetricsRecord& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(MetricsRecord* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.MetricsRecord"; + } + protected: + explicit MetricsRecord(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + + // accessors ------------------------------------------------------- + + enum : int { + kDataFieldNumber = 1, + }; + // map data = 1; + int data_size() const; + private: + int _internal_data_size() const; + public: + void clear_data(); + private: + const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecordValue >& + _internal_data() const; + ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecordValue >* + _internal_mutable_data(); + public: + const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecordValue >& + data() const; + ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecordValue >* + mutable_data(); + + // @@protoc_insertion_point(class_scope:flwr.proto.MetricsRecord) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + ::PROTOBUF_NAMESPACE_ID::internal::MapField< + MetricsRecord_DataEntry_DoNotUse, + std::string, ::flwr::proto::MetricsRecordValue, + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING, + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_MESSAGE> data_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flwr_2fproto_2frecordset_2eproto; +}; +// ------------------------------------------------------------------- + +class ConfigsRecord_DataEntry_DoNotUse : public ::PROTOBUF_NAMESPACE_ID::internal::MapEntry { +public: + typedef ::PROTOBUF_NAMESPACE_ID::internal::MapEntry SuperType; + ConfigsRecord_DataEntry_DoNotUse(); + explicit constexpr ConfigsRecord_DataEntry_DoNotUse( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + explicit ConfigsRecord_DataEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void MergeFrom(const ConfigsRecord_DataEntry_DoNotUse& other); + static const ConfigsRecord_DataEntry_DoNotUse* internal_default_instance() { return reinterpret_cast(&_ConfigsRecord_DataEntry_DoNotUse_default_instance_); } + static bool ValidateKey(std::string* s) { + return ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(s->data(), static_cast(s->size()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, "flwr.proto.ConfigsRecord.DataEntry.key"); + } + static bool ValidateValue(void*) { return true; } + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; +}; + +// ------------------------------------------------------------------- + +class ConfigsRecord final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.ConfigsRecord) */ { + public: + inline ConfigsRecord() : ConfigsRecord(nullptr) {} + ~ConfigsRecord() override; + explicit constexpr ConfigsRecord(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + ConfigsRecord(const ConfigsRecord& from); + ConfigsRecord(ConfigsRecord&& from) noexcept + : ConfigsRecord() { + *this = ::std::move(from); + } + + inline ConfigsRecord& operator=(const ConfigsRecord& from) { + CopyFrom(from); + return *this; + } + inline ConfigsRecord& operator=(ConfigsRecord&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const ConfigsRecord& default_instance() { + return *internal_default_instance(); + } + static inline const ConfigsRecord* internal_default_instance() { + return reinterpret_cast( + &_ConfigsRecord_default_instance_); + } + static constexpr int kIndexInFileMessages = + 12; + + friend void swap(ConfigsRecord& a, ConfigsRecord& b) { + a.Swap(&b); + } + inline void Swap(ConfigsRecord* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(ConfigsRecord* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline ConfigsRecord* New() const final { + return new ConfigsRecord(); + } + + ConfigsRecord* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const ConfigsRecord& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const ConfigsRecord& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(ConfigsRecord* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.ConfigsRecord"; + } + protected: + explicit ConfigsRecord(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + + // accessors ------------------------------------------------------- + + enum : int { + kDataFieldNumber = 1, + }; + // map data = 1; + int data_size() const; + private: + int _internal_data_size() const; + public: + void clear_data(); + private: + const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecordValue >& + _internal_data() const; + ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecordValue >* + _internal_mutable_data(); + public: + const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecordValue >& + data() const; + ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecordValue >* + mutable_data(); + + // @@protoc_insertion_point(class_scope:flwr.proto.ConfigsRecord) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + ::PROTOBUF_NAMESPACE_ID::internal::MapField< + ConfigsRecord_DataEntry_DoNotUse, + std::string, ::flwr::proto::ConfigsRecordValue, + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING, + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_MESSAGE> data_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flwr_2fproto_2frecordset_2eproto; +}; +// ------------------------------------------------------------------- + +class RecordSet_ParametersEntry_DoNotUse : public ::PROTOBUF_NAMESPACE_ID::internal::MapEntry { +public: + typedef ::PROTOBUF_NAMESPACE_ID::internal::MapEntry SuperType; + RecordSet_ParametersEntry_DoNotUse(); + explicit constexpr RecordSet_ParametersEntry_DoNotUse( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + explicit RecordSet_ParametersEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void MergeFrom(const RecordSet_ParametersEntry_DoNotUse& other); + static const RecordSet_ParametersEntry_DoNotUse* internal_default_instance() { return reinterpret_cast(&_RecordSet_ParametersEntry_DoNotUse_default_instance_); } + static bool ValidateKey(std::string* s) { + return ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(s->data(), static_cast(s->size()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, "flwr.proto.RecordSet.ParametersEntry.key"); + } + static bool ValidateValue(void*) { return true; } + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; +}; + +// ------------------------------------------------------------------- + +class RecordSet_MetricsEntry_DoNotUse : public ::PROTOBUF_NAMESPACE_ID::internal::MapEntry { +public: + typedef ::PROTOBUF_NAMESPACE_ID::internal::MapEntry SuperType; + RecordSet_MetricsEntry_DoNotUse(); + explicit constexpr RecordSet_MetricsEntry_DoNotUse( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + explicit RecordSet_MetricsEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void MergeFrom(const RecordSet_MetricsEntry_DoNotUse& other); + static const RecordSet_MetricsEntry_DoNotUse* internal_default_instance() { return reinterpret_cast(&_RecordSet_MetricsEntry_DoNotUse_default_instance_); } + static bool ValidateKey(std::string* s) { + return ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(s->data(), static_cast(s->size()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, "flwr.proto.RecordSet.MetricsEntry.key"); + } + static bool ValidateValue(void*) { return true; } + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; +}; + +// ------------------------------------------------------------------- + +class RecordSet_ConfigsEntry_DoNotUse : public ::PROTOBUF_NAMESPACE_ID::internal::MapEntry { +public: + typedef ::PROTOBUF_NAMESPACE_ID::internal::MapEntry SuperType; + RecordSet_ConfigsEntry_DoNotUse(); + explicit constexpr RecordSet_ConfigsEntry_DoNotUse( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + explicit RecordSet_ConfigsEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void MergeFrom(const RecordSet_ConfigsEntry_DoNotUse& other); + static const RecordSet_ConfigsEntry_DoNotUse* internal_default_instance() { return reinterpret_cast(&_RecordSet_ConfigsEntry_DoNotUse_default_instance_); } + static bool ValidateKey(std::string* s) { + return ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(s->data(), static_cast(s->size()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, "flwr.proto.RecordSet.ConfigsEntry.key"); + } + static bool ValidateValue(void*) { return true; } + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; +}; + +// ------------------------------------------------------------------- + +class RecordSet final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.RecordSet) */ { + public: + inline RecordSet() : RecordSet(nullptr) {} + ~RecordSet() override; + explicit constexpr RecordSet(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + RecordSet(const RecordSet& from); + RecordSet(RecordSet&& from) noexcept + : RecordSet() { + *this = ::std::move(from); + } + + inline RecordSet& operator=(const RecordSet& from) { + CopyFrom(from); + return *this; + } + inline RecordSet& operator=(RecordSet&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const RecordSet& default_instance() { + return *internal_default_instance(); + } + static inline const RecordSet* internal_default_instance() { + return reinterpret_cast( + &_RecordSet_default_instance_); + } + static constexpr int kIndexInFileMessages = + 16; + + friend void swap(RecordSet& a, RecordSet& b) { + a.Swap(&b); + } + inline void Swap(RecordSet* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(RecordSet* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline RecordSet* New() const final { + return new RecordSet(); + } + + RecordSet* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const RecordSet& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const RecordSet& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(RecordSet* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.RecordSet"; + } + protected: + explicit RecordSet(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + + // accessors ------------------------------------------------------- + + enum : int { + kParametersFieldNumber = 1, + kMetricsFieldNumber = 2, + kConfigsFieldNumber = 3, + }; + // map parameters = 1; + int parameters_size() const; + private: + int _internal_parameters_size() const; + public: + void clear_parameters(); + private: + const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ParametersRecord >& + _internal_parameters() const; + ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ParametersRecord >* + _internal_mutable_parameters(); + public: + const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ParametersRecord >& + parameters() const; + ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ParametersRecord >* + mutable_parameters(); + + // map metrics = 2; + int metrics_size() const; + private: + int _internal_metrics_size() const; + public: + void clear_metrics(); + private: + const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecord >& + _internal_metrics() const; + ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecord >* + _internal_mutable_metrics(); + public: + const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecord >& + metrics() const; + ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecord >* + mutable_metrics(); + + // map configs = 3; + int configs_size() const; + private: + int _internal_configs_size() const; + public: + void clear_configs(); + private: + const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecord >& + _internal_configs() const; + ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecord >* + _internal_mutable_configs(); + public: + const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecord >& + configs() const; + ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecord >* + mutable_configs(); + + // @@protoc_insertion_point(class_scope:flwr.proto.RecordSet) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + ::PROTOBUF_NAMESPACE_ID::internal::MapField< + RecordSet_ParametersEntry_DoNotUse, + std::string, ::flwr::proto::ParametersRecord, + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING, + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_MESSAGE> parameters_; + ::PROTOBUF_NAMESPACE_ID::internal::MapField< + RecordSet_MetricsEntry_DoNotUse, + std::string, ::flwr::proto::MetricsRecord, + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING, + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_MESSAGE> metrics_; + ::PROTOBUF_NAMESPACE_ID::internal::MapField< + RecordSet_ConfigsEntry_DoNotUse, + std::string, ::flwr::proto::ConfigsRecord, + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING, + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_MESSAGE> configs_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flwr_2fproto_2frecordset_2eproto; +}; +// =================================================================== + + +// =================================================================== + +#ifdef __GNUC__ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wstrict-aliasing" +#endif // __GNUC__ +// DoubleList + +// repeated double vals = 1; +inline int DoubleList::_internal_vals_size() const { + return vals_.size(); +} +inline int DoubleList::vals_size() const { + return _internal_vals_size(); +} +inline void DoubleList::clear_vals() { + vals_.Clear(); +} +inline double DoubleList::_internal_vals(int index) const { + return vals_.Get(index); +} +inline double DoubleList::vals(int index) const { + // @@protoc_insertion_point(field_get:flwr.proto.DoubleList.vals) + return _internal_vals(index); +} +inline void DoubleList::set_vals(int index, double value) { + vals_.Set(index, value); + // @@protoc_insertion_point(field_set:flwr.proto.DoubleList.vals) +} +inline void DoubleList::_internal_add_vals(double value) { + vals_.Add(value); +} +inline void DoubleList::add_vals(double value) { + _internal_add_vals(value); + // @@protoc_insertion_point(field_add:flwr.proto.DoubleList.vals) +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >& +DoubleList::_internal_vals() const { + return vals_; +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >& +DoubleList::vals() const { + // @@protoc_insertion_point(field_list:flwr.proto.DoubleList.vals) + return _internal_vals(); +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >* +DoubleList::_internal_mutable_vals() { + return &vals_; +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >* +DoubleList::mutable_vals() { + // @@protoc_insertion_point(field_mutable_list:flwr.proto.DoubleList.vals) + return _internal_mutable_vals(); +} + +// ------------------------------------------------------------------- + +// Sint64List + +// repeated sint64 vals = 1; +inline int Sint64List::_internal_vals_size() const { + return vals_.size(); +} +inline int Sint64List::vals_size() const { + return _internal_vals_size(); +} +inline void Sint64List::clear_vals() { + vals_.Clear(); +} +inline ::PROTOBUF_NAMESPACE_ID::int64 Sint64List::_internal_vals(int index) const { + return vals_.Get(index); +} +inline ::PROTOBUF_NAMESPACE_ID::int64 Sint64List::vals(int index) const { + // @@protoc_insertion_point(field_get:flwr.proto.Sint64List.vals) + return _internal_vals(index); +} +inline void Sint64List::set_vals(int index, ::PROTOBUF_NAMESPACE_ID::int64 value) { + vals_.Set(index, value); + // @@protoc_insertion_point(field_set:flwr.proto.Sint64List.vals) +} +inline void Sint64List::_internal_add_vals(::PROTOBUF_NAMESPACE_ID::int64 value) { + vals_.Add(value); +} +inline void Sint64List::add_vals(::PROTOBUF_NAMESPACE_ID::int64 value) { + _internal_add_vals(value); + // @@protoc_insertion_point(field_add:flwr.proto.Sint64List.vals) +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 >& +Sint64List::_internal_vals() const { + return vals_; +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 >& +Sint64List::vals() const { + // @@protoc_insertion_point(field_list:flwr.proto.Sint64List.vals) + return _internal_vals(); +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 >* +Sint64List::_internal_mutable_vals() { + return &vals_; +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 >* +Sint64List::mutable_vals() { + // @@protoc_insertion_point(field_mutable_list:flwr.proto.Sint64List.vals) + return _internal_mutable_vals(); +} + +// ------------------------------------------------------------------- + +// BoolList + +// repeated bool vals = 1; +inline int BoolList::_internal_vals_size() const { + return vals_.size(); +} +inline int BoolList::vals_size() const { + return _internal_vals_size(); +} +inline void BoolList::clear_vals() { + vals_.Clear(); +} +inline bool BoolList::_internal_vals(int index) const { + return vals_.Get(index); +} +inline bool BoolList::vals(int index) const { + // @@protoc_insertion_point(field_get:flwr.proto.BoolList.vals) + return _internal_vals(index); +} +inline void BoolList::set_vals(int index, bool value) { + vals_.Set(index, value); + // @@protoc_insertion_point(field_set:flwr.proto.BoolList.vals) +} +inline void BoolList::_internal_add_vals(bool value) { + vals_.Add(value); +} +inline void BoolList::add_vals(bool value) { + _internal_add_vals(value); + // @@protoc_insertion_point(field_add:flwr.proto.BoolList.vals) +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool >& +BoolList::_internal_vals() const { + return vals_; +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool >& +BoolList::vals() const { + // @@protoc_insertion_point(field_list:flwr.proto.BoolList.vals) + return _internal_vals(); +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool >* +BoolList::_internal_mutable_vals() { + return &vals_; +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool >* +BoolList::mutable_vals() { + // @@protoc_insertion_point(field_mutable_list:flwr.proto.BoolList.vals) + return _internal_mutable_vals(); +} + +// ------------------------------------------------------------------- + +// StringList + +// repeated string vals = 1; +inline int StringList::_internal_vals_size() const { + return vals_.size(); +} +inline int StringList::vals_size() const { + return _internal_vals_size(); +} +inline void StringList::clear_vals() { + vals_.Clear(); +} +inline std::string* StringList::add_vals() { + std::string* _s = _internal_add_vals(); + // @@protoc_insertion_point(field_add_mutable:flwr.proto.StringList.vals) + return _s; +} +inline const std::string& StringList::_internal_vals(int index) const { + return vals_.Get(index); +} +inline const std::string& StringList::vals(int index) const { + // @@protoc_insertion_point(field_get:flwr.proto.StringList.vals) + return _internal_vals(index); +} +inline std::string* StringList::mutable_vals(int index) { + // @@protoc_insertion_point(field_mutable:flwr.proto.StringList.vals) + return vals_.Mutable(index); +} +inline void StringList::set_vals(int index, const std::string& value) { + vals_.Mutable(index)->assign(value); + // @@protoc_insertion_point(field_set:flwr.proto.StringList.vals) +} +inline void StringList::set_vals(int index, std::string&& value) { + vals_.Mutable(index)->assign(std::move(value)); + // @@protoc_insertion_point(field_set:flwr.proto.StringList.vals) +} +inline void StringList::set_vals(int index, const char* value) { + GOOGLE_DCHECK(value != nullptr); + vals_.Mutable(index)->assign(value); + // @@protoc_insertion_point(field_set_char:flwr.proto.StringList.vals) +} +inline void StringList::set_vals(int index, const char* value, size_t size) { + vals_.Mutable(index)->assign( + reinterpret_cast(value), size); + // @@protoc_insertion_point(field_set_pointer:flwr.proto.StringList.vals) +} +inline std::string* StringList::_internal_add_vals() { + return vals_.Add(); +} +inline void StringList::add_vals(const std::string& value) { + vals_.Add()->assign(value); + // @@protoc_insertion_point(field_add:flwr.proto.StringList.vals) +} +inline void StringList::add_vals(std::string&& value) { + vals_.Add(std::move(value)); + // @@protoc_insertion_point(field_add:flwr.proto.StringList.vals) +} +inline void StringList::add_vals(const char* value) { + GOOGLE_DCHECK(value != nullptr); + vals_.Add()->assign(value); + // @@protoc_insertion_point(field_add_char:flwr.proto.StringList.vals) +} +inline void StringList::add_vals(const char* value, size_t size) { + vals_.Add()->assign(reinterpret_cast(value), size); + // @@protoc_insertion_point(field_add_pointer:flwr.proto.StringList.vals) +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& +StringList::vals() const { + // @@protoc_insertion_point(field_list:flwr.proto.StringList.vals) + return vals_; +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* +StringList::mutable_vals() { + // @@protoc_insertion_point(field_mutable_list:flwr.proto.StringList.vals) + return &vals_; +} + +// ------------------------------------------------------------------- + +// BytesList + +// repeated bytes vals = 1; +inline int BytesList::_internal_vals_size() const { + return vals_.size(); +} +inline int BytesList::vals_size() const { + return _internal_vals_size(); +} +inline void BytesList::clear_vals() { + vals_.Clear(); +} +inline std::string* BytesList::add_vals() { + std::string* _s = _internal_add_vals(); + // @@protoc_insertion_point(field_add_mutable:flwr.proto.BytesList.vals) + return _s; +} +inline const std::string& BytesList::_internal_vals(int index) const { + return vals_.Get(index); +} +inline const std::string& BytesList::vals(int index) const { + // @@protoc_insertion_point(field_get:flwr.proto.BytesList.vals) + return _internal_vals(index); +} +inline std::string* BytesList::mutable_vals(int index) { + // @@protoc_insertion_point(field_mutable:flwr.proto.BytesList.vals) + return vals_.Mutable(index); +} +inline void BytesList::set_vals(int index, const std::string& value) { + vals_.Mutable(index)->assign(value); + // @@protoc_insertion_point(field_set:flwr.proto.BytesList.vals) +} +inline void BytesList::set_vals(int index, std::string&& value) { + vals_.Mutable(index)->assign(std::move(value)); + // @@protoc_insertion_point(field_set:flwr.proto.BytesList.vals) +} +inline void BytesList::set_vals(int index, const char* value) { + GOOGLE_DCHECK(value != nullptr); + vals_.Mutable(index)->assign(value); + // @@protoc_insertion_point(field_set_char:flwr.proto.BytesList.vals) +} +inline void BytesList::set_vals(int index, const void* value, size_t size) { + vals_.Mutable(index)->assign( + reinterpret_cast(value), size); + // @@protoc_insertion_point(field_set_pointer:flwr.proto.BytesList.vals) +} +inline std::string* BytesList::_internal_add_vals() { + return vals_.Add(); +} +inline void BytesList::add_vals(const std::string& value) { + vals_.Add()->assign(value); + // @@protoc_insertion_point(field_add:flwr.proto.BytesList.vals) +} +inline void BytesList::add_vals(std::string&& value) { + vals_.Add(std::move(value)); + // @@protoc_insertion_point(field_add:flwr.proto.BytesList.vals) +} +inline void BytesList::add_vals(const char* value) { + GOOGLE_DCHECK(value != nullptr); + vals_.Add()->assign(value); + // @@protoc_insertion_point(field_add_char:flwr.proto.BytesList.vals) +} +inline void BytesList::add_vals(const void* value, size_t size) { + vals_.Add()->assign(reinterpret_cast(value), size); + // @@protoc_insertion_point(field_add_pointer:flwr.proto.BytesList.vals) +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& +BytesList::vals() const { + // @@protoc_insertion_point(field_list:flwr.proto.BytesList.vals) + return vals_; +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* +BytesList::mutable_vals() { + // @@protoc_insertion_point(field_mutable_list:flwr.proto.BytesList.vals) + return &vals_; +} + +// ------------------------------------------------------------------- + +// Array + +// string dtype = 1; +inline void Array::clear_dtype() { + dtype_.ClearToEmpty(); +} +inline const std::string& Array::dtype() const { + // @@protoc_insertion_point(field_get:flwr.proto.Array.dtype) + return _internal_dtype(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void Array::set_dtype(ArgT0&& arg0, ArgT... args) { + + dtype_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:flwr.proto.Array.dtype) +} +inline std::string* Array::mutable_dtype() { + std::string* _s = _internal_mutable_dtype(); + // @@protoc_insertion_point(field_mutable:flwr.proto.Array.dtype) + return _s; +} +inline const std::string& Array::_internal_dtype() const { + return dtype_.Get(); +} +inline void Array::_internal_set_dtype(const std::string& value) { + + dtype_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); +} +inline std::string* Array::_internal_mutable_dtype() { + + return dtype_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); +} +inline std::string* Array::release_dtype() { + // @@protoc_insertion_point(field_release:flwr.proto.Array.dtype) + return dtype_.Release(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); +} +inline void Array::set_allocated_dtype(std::string* dtype) { + if (dtype != nullptr) { + + } else { + + } + dtype_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), dtype, + GetArenaForAllocation()); + // @@protoc_insertion_point(field_set_allocated:flwr.proto.Array.dtype) +} + +// repeated int32 shape = 2; +inline int Array::_internal_shape_size() const { + return shape_.size(); +} +inline int Array::shape_size() const { + return _internal_shape_size(); +} +inline void Array::clear_shape() { + shape_.Clear(); +} +inline ::PROTOBUF_NAMESPACE_ID::int32 Array::_internal_shape(int index) const { + return shape_.Get(index); +} +inline ::PROTOBUF_NAMESPACE_ID::int32 Array::shape(int index) const { + // @@protoc_insertion_point(field_get:flwr.proto.Array.shape) + return _internal_shape(index); +} +inline void Array::set_shape(int index, ::PROTOBUF_NAMESPACE_ID::int32 value) { + shape_.Set(index, value); + // @@protoc_insertion_point(field_set:flwr.proto.Array.shape) +} +inline void Array::_internal_add_shape(::PROTOBUF_NAMESPACE_ID::int32 value) { + shape_.Add(value); +} +inline void Array::add_shape(::PROTOBUF_NAMESPACE_ID::int32 value) { + _internal_add_shape(value); + // @@protoc_insertion_point(field_add:flwr.proto.Array.shape) +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int32 >& +Array::_internal_shape() const { + return shape_; +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int32 >& +Array::shape() const { + // @@protoc_insertion_point(field_list:flwr.proto.Array.shape) + return _internal_shape(); +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int32 >* +Array::_internal_mutable_shape() { + return &shape_; +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int32 >* +Array::mutable_shape() { + // @@protoc_insertion_point(field_mutable_list:flwr.proto.Array.shape) + return _internal_mutable_shape(); +} + +// string stype = 3; +inline void Array::clear_stype() { + stype_.ClearToEmpty(); +} +inline const std::string& Array::stype() const { + // @@protoc_insertion_point(field_get:flwr.proto.Array.stype) + return _internal_stype(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void Array::set_stype(ArgT0&& arg0, ArgT... args) { + + stype_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:flwr.proto.Array.stype) +} +inline std::string* Array::mutable_stype() { + std::string* _s = _internal_mutable_stype(); + // @@protoc_insertion_point(field_mutable:flwr.proto.Array.stype) + return _s; +} +inline const std::string& Array::_internal_stype() const { + return stype_.Get(); +} +inline void Array::_internal_set_stype(const std::string& value) { + + stype_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); +} +inline std::string* Array::_internal_mutable_stype() { + + return stype_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); +} +inline std::string* Array::release_stype() { + // @@protoc_insertion_point(field_release:flwr.proto.Array.stype) + return stype_.Release(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); +} +inline void Array::set_allocated_stype(std::string* stype) { + if (stype != nullptr) { + + } else { + + } + stype_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), stype, + GetArenaForAllocation()); + // @@protoc_insertion_point(field_set_allocated:flwr.proto.Array.stype) +} + +// bytes data = 4; +inline void Array::clear_data() { + data_.ClearToEmpty(); +} +inline const std::string& Array::data() const { + // @@protoc_insertion_point(field_get:flwr.proto.Array.data) + return _internal_data(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void Array::set_data(ArgT0&& arg0, ArgT... args) { + + data_.SetBytes(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:flwr.proto.Array.data) +} +inline std::string* Array::mutable_data() { + std::string* _s = _internal_mutable_data(); + // @@protoc_insertion_point(field_mutable:flwr.proto.Array.data) + return _s; +} +inline const std::string& Array::_internal_data() const { + return data_.Get(); +} +inline void Array::_internal_set_data(const std::string& value) { + + data_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); +} +inline std::string* Array::_internal_mutable_data() { + + return data_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); +} +inline std::string* Array::release_data() { + // @@protoc_insertion_point(field_release:flwr.proto.Array.data) + return data_.Release(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); +} +inline void Array::set_allocated_data(std::string* data) { + if (data != nullptr) { + + } else { + + } + data_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), data, + GetArenaForAllocation()); + // @@protoc_insertion_point(field_set_allocated:flwr.proto.Array.data) +} + +// ------------------------------------------------------------------- + +// MetricsRecordValue + +// double double = 1; +inline bool MetricsRecordValue::_internal_has_double_() const { + return value_case() == kDouble; +} +inline bool MetricsRecordValue::has_double_() const { + return _internal_has_double_(); +} +inline void MetricsRecordValue::set_has_double_() { + _oneof_case_[0] = kDouble; +} +inline void MetricsRecordValue::clear_double_() { + if (_internal_has_double_()) { + value_.double__ = 0; + clear_has_value(); + } +} +inline double MetricsRecordValue::_internal_double_() const { + if (_internal_has_double_()) { + return value_.double__; + } + return 0; +} +inline void MetricsRecordValue::_internal_set_double_(double value) { + if (!_internal_has_double_()) { + clear_value(); + set_has_double_(); + } + value_.double__ = value; +} +inline double MetricsRecordValue::double_() const { + // @@protoc_insertion_point(field_get:flwr.proto.MetricsRecordValue.double) + return _internal_double_(); +} +inline void MetricsRecordValue::set_double_(double value) { + _internal_set_double_(value); + // @@protoc_insertion_point(field_set:flwr.proto.MetricsRecordValue.double) +} + +// sint64 sint64 = 2; +inline bool MetricsRecordValue::_internal_has_sint64() const { + return value_case() == kSint64; +} +inline bool MetricsRecordValue::has_sint64() const { + return _internal_has_sint64(); +} +inline void MetricsRecordValue::set_has_sint64() { + _oneof_case_[0] = kSint64; +} +inline void MetricsRecordValue::clear_sint64() { + if (_internal_has_sint64()) { + value_.sint64_ = int64_t{0}; + clear_has_value(); + } +} +inline ::PROTOBUF_NAMESPACE_ID::int64 MetricsRecordValue::_internal_sint64() const { + if (_internal_has_sint64()) { + return value_.sint64_; + } + return int64_t{0}; +} +inline void MetricsRecordValue::_internal_set_sint64(::PROTOBUF_NAMESPACE_ID::int64 value) { + if (!_internal_has_sint64()) { + clear_value(); + set_has_sint64(); + } + value_.sint64_ = value; +} +inline ::PROTOBUF_NAMESPACE_ID::int64 MetricsRecordValue::sint64() const { + // @@protoc_insertion_point(field_get:flwr.proto.MetricsRecordValue.sint64) + return _internal_sint64(); +} +inline void MetricsRecordValue::set_sint64(::PROTOBUF_NAMESPACE_ID::int64 value) { + _internal_set_sint64(value); + // @@protoc_insertion_point(field_set:flwr.proto.MetricsRecordValue.sint64) +} + +// .flwr.proto.DoubleList double_list = 21; +inline bool MetricsRecordValue::_internal_has_double_list() const { + return value_case() == kDoubleList; +} +inline bool MetricsRecordValue::has_double_list() const { + return _internal_has_double_list(); +} +inline void MetricsRecordValue::set_has_double_list() { + _oneof_case_[0] = kDoubleList; +} +inline void MetricsRecordValue::clear_double_list() { + if (_internal_has_double_list()) { + if (GetArenaForAllocation() == nullptr) { + delete value_.double_list_; + } + clear_has_value(); + } +} +inline ::flwr::proto::DoubleList* MetricsRecordValue::release_double_list() { + // @@protoc_insertion_point(field_release:flwr.proto.MetricsRecordValue.double_list) + if (_internal_has_double_list()) { + clear_has_value(); + ::flwr::proto::DoubleList* temp = value_.double_list_; + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } + value_.double_list_ = nullptr; + return temp; + } else { + return nullptr; + } +} +inline const ::flwr::proto::DoubleList& MetricsRecordValue::_internal_double_list() const { + return _internal_has_double_list() + ? *value_.double_list_ + : reinterpret_cast< ::flwr::proto::DoubleList&>(::flwr::proto::_DoubleList_default_instance_); +} +inline const ::flwr::proto::DoubleList& MetricsRecordValue::double_list() const { + // @@protoc_insertion_point(field_get:flwr.proto.MetricsRecordValue.double_list) + return _internal_double_list(); +} +inline ::flwr::proto::DoubleList* MetricsRecordValue::unsafe_arena_release_double_list() { + // @@protoc_insertion_point(field_unsafe_arena_release:flwr.proto.MetricsRecordValue.double_list) + if (_internal_has_double_list()) { + clear_has_value(); + ::flwr::proto::DoubleList* temp = value_.double_list_; + value_.double_list_ = nullptr; + return temp; + } else { + return nullptr; + } +} +inline void MetricsRecordValue::unsafe_arena_set_allocated_double_list(::flwr::proto::DoubleList* double_list) { + clear_value(); + if (double_list) { + set_has_double_list(); + value_.double_list_ = double_list; + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.MetricsRecordValue.double_list) +} +inline ::flwr::proto::DoubleList* MetricsRecordValue::_internal_mutable_double_list() { + if (!_internal_has_double_list()) { + clear_value(); + set_has_double_list(); + value_.double_list_ = CreateMaybeMessage< ::flwr::proto::DoubleList >(GetArenaForAllocation()); + } + return value_.double_list_; +} +inline ::flwr::proto::DoubleList* MetricsRecordValue::mutable_double_list() { + ::flwr::proto::DoubleList* _msg = _internal_mutable_double_list(); + // @@protoc_insertion_point(field_mutable:flwr.proto.MetricsRecordValue.double_list) + return _msg; +} + +// .flwr.proto.Sint64List sint64_list = 22; +inline bool MetricsRecordValue::_internal_has_sint64_list() const { + return value_case() == kSint64List; +} +inline bool MetricsRecordValue::has_sint64_list() const { + return _internal_has_sint64_list(); +} +inline void MetricsRecordValue::set_has_sint64_list() { + _oneof_case_[0] = kSint64List; +} +inline void MetricsRecordValue::clear_sint64_list() { + if (_internal_has_sint64_list()) { + if (GetArenaForAllocation() == nullptr) { + delete value_.sint64_list_; + } + clear_has_value(); + } +} +inline ::flwr::proto::Sint64List* MetricsRecordValue::release_sint64_list() { + // @@protoc_insertion_point(field_release:flwr.proto.MetricsRecordValue.sint64_list) + if (_internal_has_sint64_list()) { + clear_has_value(); + ::flwr::proto::Sint64List* temp = value_.sint64_list_; + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } + value_.sint64_list_ = nullptr; + return temp; + } else { + return nullptr; + } +} +inline const ::flwr::proto::Sint64List& MetricsRecordValue::_internal_sint64_list() const { + return _internal_has_sint64_list() + ? *value_.sint64_list_ + : reinterpret_cast< ::flwr::proto::Sint64List&>(::flwr::proto::_Sint64List_default_instance_); +} +inline const ::flwr::proto::Sint64List& MetricsRecordValue::sint64_list() const { + // @@protoc_insertion_point(field_get:flwr.proto.MetricsRecordValue.sint64_list) + return _internal_sint64_list(); +} +inline ::flwr::proto::Sint64List* MetricsRecordValue::unsafe_arena_release_sint64_list() { + // @@protoc_insertion_point(field_unsafe_arena_release:flwr.proto.MetricsRecordValue.sint64_list) + if (_internal_has_sint64_list()) { + clear_has_value(); + ::flwr::proto::Sint64List* temp = value_.sint64_list_; + value_.sint64_list_ = nullptr; + return temp; + } else { + return nullptr; + } +} +inline void MetricsRecordValue::unsafe_arena_set_allocated_sint64_list(::flwr::proto::Sint64List* sint64_list) { + clear_value(); + if (sint64_list) { + set_has_sint64_list(); + value_.sint64_list_ = sint64_list; + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.MetricsRecordValue.sint64_list) +} +inline ::flwr::proto::Sint64List* MetricsRecordValue::_internal_mutable_sint64_list() { + if (!_internal_has_sint64_list()) { + clear_value(); + set_has_sint64_list(); + value_.sint64_list_ = CreateMaybeMessage< ::flwr::proto::Sint64List >(GetArenaForAllocation()); + } + return value_.sint64_list_; +} +inline ::flwr::proto::Sint64List* MetricsRecordValue::mutable_sint64_list() { + ::flwr::proto::Sint64List* _msg = _internal_mutable_sint64_list(); + // @@protoc_insertion_point(field_mutable:flwr.proto.MetricsRecordValue.sint64_list) + return _msg; +} + +inline bool MetricsRecordValue::has_value() const { + return value_case() != VALUE_NOT_SET; +} +inline void MetricsRecordValue::clear_has_value() { + _oneof_case_[0] = VALUE_NOT_SET; +} +inline MetricsRecordValue::ValueCase MetricsRecordValue::value_case() const { + return MetricsRecordValue::ValueCase(_oneof_case_[0]); +} +// ------------------------------------------------------------------- + +// ConfigsRecordValue + +// double double = 1; +inline bool ConfigsRecordValue::_internal_has_double_() const { + return value_case() == kDouble; +} +inline bool ConfigsRecordValue::has_double_() const { + return _internal_has_double_(); +} +inline void ConfigsRecordValue::set_has_double_() { + _oneof_case_[0] = kDouble; +} +inline void ConfigsRecordValue::clear_double_() { + if (_internal_has_double_()) { + value_.double__ = 0; + clear_has_value(); + } +} +inline double ConfigsRecordValue::_internal_double_() const { + if (_internal_has_double_()) { + return value_.double__; + } + return 0; +} +inline void ConfigsRecordValue::_internal_set_double_(double value) { + if (!_internal_has_double_()) { + clear_value(); + set_has_double_(); + } + value_.double__ = value; +} +inline double ConfigsRecordValue::double_() const { + // @@protoc_insertion_point(field_get:flwr.proto.ConfigsRecordValue.double) + return _internal_double_(); +} +inline void ConfigsRecordValue::set_double_(double value) { + _internal_set_double_(value); + // @@protoc_insertion_point(field_set:flwr.proto.ConfigsRecordValue.double) +} + +// sint64 sint64 = 2; +inline bool ConfigsRecordValue::_internal_has_sint64() const { + return value_case() == kSint64; +} +inline bool ConfigsRecordValue::has_sint64() const { + return _internal_has_sint64(); +} +inline void ConfigsRecordValue::set_has_sint64() { + _oneof_case_[0] = kSint64; +} +inline void ConfigsRecordValue::clear_sint64() { + if (_internal_has_sint64()) { + value_.sint64_ = int64_t{0}; + clear_has_value(); + } +} +inline ::PROTOBUF_NAMESPACE_ID::int64 ConfigsRecordValue::_internal_sint64() const { + if (_internal_has_sint64()) { + return value_.sint64_; + } + return int64_t{0}; +} +inline void ConfigsRecordValue::_internal_set_sint64(::PROTOBUF_NAMESPACE_ID::int64 value) { + if (!_internal_has_sint64()) { + clear_value(); + set_has_sint64(); + } + value_.sint64_ = value; +} +inline ::PROTOBUF_NAMESPACE_ID::int64 ConfigsRecordValue::sint64() const { + // @@protoc_insertion_point(field_get:flwr.proto.ConfigsRecordValue.sint64) + return _internal_sint64(); +} +inline void ConfigsRecordValue::set_sint64(::PROTOBUF_NAMESPACE_ID::int64 value) { + _internal_set_sint64(value); + // @@protoc_insertion_point(field_set:flwr.proto.ConfigsRecordValue.sint64) +} + +// bool bool = 3; +inline bool ConfigsRecordValue::_internal_has_bool_() const { + return value_case() == kBool; +} +inline bool ConfigsRecordValue::has_bool_() const { + return _internal_has_bool_(); +} +inline void ConfigsRecordValue::set_has_bool_() { + _oneof_case_[0] = kBool; +} +inline void ConfigsRecordValue::clear_bool_() { + if (_internal_has_bool_()) { + value_.bool__ = false; + clear_has_value(); + } +} +inline bool ConfigsRecordValue::_internal_bool_() const { + if (_internal_has_bool_()) { + return value_.bool__; + } + return false; +} +inline void ConfigsRecordValue::_internal_set_bool_(bool value) { + if (!_internal_has_bool_()) { + clear_value(); + set_has_bool_(); + } + value_.bool__ = value; +} +inline bool ConfigsRecordValue::bool_() const { + // @@protoc_insertion_point(field_get:flwr.proto.ConfigsRecordValue.bool) + return _internal_bool_(); +} +inline void ConfigsRecordValue::set_bool_(bool value) { + _internal_set_bool_(value); + // @@protoc_insertion_point(field_set:flwr.proto.ConfigsRecordValue.bool) +} + +// string string = 4; +inline bool ConfigsRecordValue::_internal_has_string() const { + return value_case() == kString; +} +inline bool ConfigsRecordValue::has_string() const { + return _internal_has_string(); +} +inline void ConfigsRecordValue::set_has_string() { + _oneof_case_[0] = kString; +} +inline void ConfigsRecordValue::clear_string() { + if (_internal_has_string()) { + value_.string_.Destroy(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); + clear_has_value(); + } +} +inline const std::string& ConfigsRecordValue::string() const { + // @@protoc_insertion_point(field_get:flwr.proto.ConfigsRecordValue.string) + return _internal_string(); +} +template +inline void ConfigsRecordValue::set_string(ArgT0&& arg0, ArgT... args) { + if (!_internal_has_string()) { + clear_value(); + set_has_string(); + value_.string_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + } + value_.string_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:flwr.proto.ConfigsRecordValue.string) +} +inline std::string* ConfigsRecordValue::mutable_string() { + std::string* _s = _internal_mutable_string(); + // @@protoc_insertion_point(field_mutable:flwr.proto.ConfigsRecordValue.string) + return _s; +} +inline const std::string& ConfigsRecordValue::_internal_string() const { + if (_internal_has_string()) { + return value_.string_.Get(); + } + return ::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(); +} +inline void ConfigsRecordValue::_internal_set_string(const std::string& value) { + if (!_internal_has_string()) { + clear_value(); + set_has_string(); + value_.string_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + } + value_.string_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); +} +inline std::string* ConfigsRecordValue::_internal_mutable_string() { + if (!_internal_has_string()) { + clear_value(); + set_has_string(); + value_.string_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + } + return value_.string_.Mutable( + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); +} +inline std::string* ConfigsRecordValue::release_string() { + // @@protoc_insertion_point(field_release:flwr.proto.ConfigsRecordValue.string) + if (_internal_has_string()) { + clear_has_value(); + return value_.string_.ReleaseNonDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); + } else { + return nullptr; + } +} +inline void ConfigsRecordValue::set_allocated_string(std::string* string) { + if (has_value()) { + clear_value(); + } + if (string != nullptr) { + set_has_string(); + value_.string_.UnsafeSetDefault(string); + ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaForAllocation(); + if (arena != nullptr) { + arena->Own(string); + } + } + // @@protoc_insertion_point(field_set_allocated:flwr.proto.ConfigsRecordValue.string) +} + +// bytes bytes = 5; +inline bool ConfigsRecordValue::_internal_has_bytes() const { + return value_case() == kBytes; +} +inline bool ConfigsRecordValue::has_bytes() const { + return _internal_has_bytes(); +} +inline void ConfigsRecordValue::set_has_bytes() { + _oneof_case_[0] = kBytes; +} +inline void ConfigsRecordValue::clear_bytes() { + if (_internal_has_bytes()) { + value_.bytes_.Destroy(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); + clear_has_value(); + } +} +inline const std::string& ConfigsRecordValue::bytes() const { + // @@protoc_insertion_point(field_get:flwr.proto.ConfigsRecordValue.bytes) + return _internal_bytes(); +} +template +inline void ConfigsRecordValue::set_bytes(ArgT0&& arg0, ArgT... args) { + if (!_internal_has_bytes()) { + clear_value(); + set_has_bytes(); + value_.bytes_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + } + value_.bytes_.SetBytes(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:flwr.proto.ConfigsRecordValue.bytes) +} +inline std::string* ConfigsRecordValue::mutable_bytes() { + std::string* _s = _internal_mutable_bytes(); + // @@protoc_insertion_point(field_mutable:flwr.proto.ConfigsRecordValue.bytes) + return _s; +} +inline const std::string& ConfigsRecordValue::_internal_bytes() const { + if (_internal_has_bytes()) { + return value_.bytes_.Get(); + } + return ::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(); +} +inline void ConfigsRecordValue::_internal_set_bytes(const std::string& value) { + if (!_internal_has_bytes()) { + clear_value(); + set_has_bytes(); + value_.bytes_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + } + value_.bytes_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); +} +inline std::string* ConfigsRecordValue::_internal_mutable_bytes() { + if (!_internal_has_bytes()) { + clear_value(); + set_has_bytes(); + value_.bytes_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + } + return value_.bytes_.Mutable( + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); +} +inline std::string* ConfigsRecordValue::release_bytes() { + // @@protoc_insertion_point(field_release:flwr.proto.ConfigsRecordValue.bytes) + if (_internal_has_bytes()) { + clear_has_value(); + return value_.bytes_.ReleaseNonDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); + } else { + return nullptr; + } +} +inline void ConfigsRecordValue::set_allocated_bytes(std::string* bytes) { + if (has_value()) { + clear_value(); + } + if (bytes != nullptr) { + set_has_bytes(); + value_.bytes_.UnsafeSetDefault(bytes); + ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaForAllocation(); + if (arena != nullptr) { + arena->Own(bytes); + } + } + // @@protoc_insertion_point(field_set_allocated:flwr.proto.ConfigsRecordValue.bytes) +} + +// .flwr.proto.DoubleList double_list = 21; +inline bool ConfigsRecordValue::_internal_has_double_list() const { + return value_case() == kDoubleList; +} +inline bool ConfigsRecordValue::has_double_list() const { + return _internal_has_double_list(); +} +inline void ConfigsRecordValue::set_has_double_list() { + _oneof_case_[0] = kDoubleList; +} +inline void ConfigsRecordValue::clear_double_list() { + if (_internal_has_double_list()) { + if (GetArenaForAllocation() == nullptr) { + delete value_.double_list_; + } + clear_has_value(); + } +} +inline ::flwr::proto::DoubleList* ConfigsRecordValue::release_double_list() { + // @@protoc_insertion_point(field_release:flwr.proto.ConfigsRecordValue.double_list) + if (_internal_has_double_list()) { + clear_has_value(); + ::flwr::proto::DoubleList* temp = value_.double_list_; + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } + value_.double_list_ = nullptr; + return temp; + } else { + return nullptr; + } +} +inline const ::flwr::proto::DoubleList& ConfigsRecordValue::_internal_double_list() const { + return _internal_has_double_list() + ? *value_.double_list_ + : reinterpret_cast< ::flwr::proto::DoubleList&>(::flwr::proto::_DoubleList_default_instance_); +} +inline const ::flwr::proto::DoubleList& ConfigsRecordValue::double_list() const { + // @@protoc_insertion_point(field_get:flwr.proto.ConfigsRecordValue.double_list) + return _internal_double_list(); +} +inline ::flwr::proto::DoubleList* ConfigsRecordValue::unsafe_arena_release_double_list() { + // @@protoc_insertion_point(field_unsafe_arena_release:flwr.proto.ConfigsRecordValue.double_list) + if (_internal_has_double_list()) { + clear_has_value(); + ::flwr::proto::DoubleList* temp = value_.double_list_; + value_.double_list_ = nullptr; + return temp; + } else { + return nullptr; + } +} +inline void ConfigsRecordValue::unsafe_arena_set_allocated_double_list(::flwr::proto::DoubleList* double_list) { + clear_value(); + if (double_list) { + set_has_double_list(); + value_.double_list_ = double_list; + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.ConfigsRecordValue.double_list) +} +inline ::flwr::proto::DoubleList* ConfigsRecordValue::_internal_mutable_double_list() { + if (!_internal_has_double_list()) { + clear_value(); + set_has_double_list(); + value_.double_list_ = CreateMaybeMessage< ::flwr::proto::DoubleList >(GetArenaForAllocation()); + } + return value_.double_list_; +} +inline ::flwr::proto::DoubleList* ConfigsRecordValue::mutable_double_list() { + ::flwr::proto::DoubleList* _msg = _internal_mutable_double_list(); + // @@protoc_insertion_point(field_mutable:flwr.proto.ConfigsRecordValue.double_list) + return _msg; +} + +// .flwr.proto.Sint64List sint64_list = 22; +inline bool ConfigsRecordValue::_internal_has_sint64_list() const { + return value_case() == kSint64List; +} +inline bool ConfigsRecordValue::has_sint64_list() const { + return _internal_has_sint64_list(); +} +inline void ConfigsRecordValue::set_has_sint64_list() { + _oneof_case_[0] = kSint64List; +} +inline void ConfigsRecordValue::clear_sint64_list() { + if (_internal_has_sint64_list()) { + if (GetArenaForAllocation() == nullptr) { + delete value_.sint64_list_; + } + clear_has_value(); + } +} +inline ::flwr::proto::Sint64List* ConfigsRecordValue::release_sint64_list() { + // @@protoc_insertion_point(field_release:flwr.proto.ConfigsRecordValue.sint64_list) + if (_internal_has_sint64_list()) { + clear_has_value(); + ::flwr::proto::Sint64List* temp = value_.sint64_list_; + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } + value_.sint64_list_ = nullptr; + return temp; + } else { + return nullptr; + } +} +inline const ::flwr::proto::Sint64List& ConfigsRecordValue::_internal_sint64_list() const { + return _internal_has_sint64_list() + ? *value_.sint64_list_ + : reinterpret_cast< ::flwr::proto::Sint64List&>(::flwr::proto::_Sint64List_default_instance_); +} +inline const ::flwr::proto::Sint64List& ConfigsRecordValue::sint64_list() const { + // @@protoc_insertion_point(field_get:flwr.proto.ConfigsRecordValue.sint64_list) + return _internal_sint64_list(); +} +inline ::flwr::proto::Sint64List* ConfigsRecordValue::unsafe_arena_release_sint64_list() { + // @@protoc_insertion_point(field_unsafe_arena_release:flwr.proto.ConfigsRecordValue.sint64_list) + if (_internal_has_sint64_list()) { + clear_has_value(); + ::flwr::proto::Sint64List* temp = value_.sint64_list_; + value_.sint64_list_ = nullptr; + return temp; + } else { + return nullptr; + } +} +inline void ConfigsRecordValue::unsafe_arena_set_allocated_sint64_list(::flwr::proto::Sint64List* sint64_list) { + clear_value(); + if (sint64_list) { + set_has_sint64_list(); + value_.sint64_list_ = sint64_list; + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.ConfigsRecordValue.sint64_list) +} +inline ::flwr::proto::Sint64List* ConfigsRecordValue::_internal_mutable_sint64_list() { + if (!_internal_has_sint64_list()) { + clear_value(); + set_has_sint64_list(); + value_.sint64_list_ = CreateMaybeMessage< ::flwr::proto::Sint64List >(GetArenaForAllocation()); + } + return value_.sint64_list_; +} +inline ::flwr::proto::Sint64List* ConfigsRecordValue::mutable_sint64_list() { + ::flwr::proto::Sint64List* _msg = _internal_mutable_sint64_list(); + // @@protoc_insertion_point(field_mutable:flwr.proto.ConfigsRecordValue.sint64_list) + return _msg; +} + +// .flwr.proto.BoolList bool_list = 23; +inline bool ConfigsRecordValue::_internal_has_bool_list() const { + return value_case() == kBoolList; +} +inline bool ConfigsRecordValue::has_bool_list() const { + return _internal_has_bool_list(); +} +inline void ConfigsRecordValue::set_has_bool_list() { + _oneof_case_[0] = kBoolList; +} +inline void ConfigsRecordValue::clear_bool_list() { + if (_internal_has_bool_list()) { + if (GetArenaForAllocation() == nullptr) { + delete value_.bool_list_; + } + clear_has_value(); + } +} +inline ::flwr::proto::BoolList* ConfigsRecordValue::release_bool_list() { + // @@protoc_insertion_point(field_release:flwr.proto.ConfigsRecordValue.bool_list) + if (_internal_has_bool_list()) { + clear_has_value(); + ::flwr::proto::BoolList* temp = value_.bool_list_; + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } + value_.bool_list_ = nullptr; + return temp; + } else { + return nullptr; + } +} +inline const ::flwr::proto::BoolList& ConfigsRecordValue::_internal_bool_list() const { + return _internal_has_bool_list() + ? *value_.bool_list_ + : reinterpret_cast< ::flwr::proto::BoolList&>(::flwr::proto::_BoolList_default_instance_); +} +inline const ::flwr::proto::BoolList& ConfigsRecordValue::bool_list() const { + // @@protoc_insertion_point(field_get:flwr.proto.ConfigsRecordValue.bool_list) + return _internal_bool_list(); +} +inline ::flwr::proto::BoolList* ConfigsRecordValue::unsafe_arena_release_bool_list() { + // @@protoc_insertion_point(field_unsafe_arena_release:flwr.proto.ConfigsRecordValue.bool_list) + if (_internal_has_bool_list()) { + clear_has_value(); + ::flwr::proto::BoolList* temp = value_.bool_list_; + value_.bool_list_ = nullptr; + return temp; + } else { + return nullptr; + } +} +inline void ConfigsRecordValue::unsafe_arena_set_allocated_bool_list(::flwr::proto::BoolList* bool_list) { + clear_value(); + if (bool_list) { + set_has_bool_list(); + value_.bool_list_ = bool_list; + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.ConfigsRecordValue.bool_list) +} +inline ::flwr::proto::BoolList* ConfigsRecordValue::_internal_mutable_bool_list() { + if (!_internal_has_bool_list()) { + clear_value(); + set_has_bool_list(); + value_.bool_list_ = CreateMaybeMessage< ::flwr::proto::BoolList >(GetArenaForAllocation()); + } + return value_.bool_list_; +} +inline ::flwr::proto::BoolList* ConfigsRecordValue::mutable_bool_list() { + ::flwr::proto::BoolList* _msg = _internal_mutable_bool_list(); + // @@protoc_insertion_point(field_mutable:flwr.proto.ConfigsRecordValue.bool_list) + return _msg; +} + +// .flwr.proto.StringList string_list = 24; +inline bool ConfigsRecordValue::_internal_has_string_list() const { + return value_case() == kStringList; +} +inline bool ConfigsRecordValue::has_string_list() const { + return _internal_has_string_list(); +} +inline void ConfigsRecordValue::set_has_string_list() { + _oneof_case_[0] = kStringList; +} +inline void ConfigsRecordValue::clear_string_list() { + if (_internal_has_string_list()) { + if (GetArenaForAllocation() == nullptr) { + delete value_.string_list_; + } + clear_has_value(); + } +} +inline ::flwr::proto::StringList* ConfigsRecordValue::release_string_list() { + // @@protoc_insertion_point(field_release:flwr.proto.ConfigsRecordValue.string_list) + if (_internal_has_string_list()) { + clear_has_value(); + ::flwr::proto::StringList* temp = value_.string_list_; + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } + value_.string_list_ = nullptr; + return temp; + } else { + return nullptr; + } +} +inline const ::flwr::proto::StringList& ConfigsRecordValue::_internal_string_list() const { + return _internal_has_string_list() + ? *value_.string_list_ + : reinterpret_cast< ::flwr::proto::StringList&>(::flwr::proto::_StringList_default_instance_); +} +inline const ::flwr::proto::StringList& ConfigsRecordValue::string_list() const { + // @@protoc_insertion_point(field_get:flwr.proto.ConfigsRecordValue.string_list) + return _internal_string_list(); +} +inline ::flwr::proto::StringList* ConfigsRecordValue::unsafe_arena_release_string_list() { + // @@protoc_insertion_point(field_unsafe_arena_release:flwr.proto.ConfigsRecordValue.string_list) + if (_internal_has_string_list()) { + clear_has_value(); + ::flwr::proto::StringList* temp = value_.string_list_; + value_.string_list_ = nullptr; + return temp; + } else { + return nullptr; + } +} +inline void ConfigsRecordValue::unsafe_arena_set_allocated_string_list(::flwr::proto::StringList* string_list) { + clear_value(); + if (string_list) { + set_has_string_list(); + value_.string_list_ = string_list; + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.ConfigsRecordValue.string_list) +} +inline ::flwr::proto::StringList* ConfigsRecordValue::_internal_mutable_string_list() { + if (!_internal_has_string_list()) { + clear_value(); + set_has_string_list(); + value_.string_list_ = CreateMaybeMessage< ::flwr::proto::StringList >(GetArenaForAllocation()); + } + return value_.string_list_; +} +inline ::flwr::proto::StringList* ConfigsRecordValue::mutable_string_list() { + ::flwr::proto::StringList* _msg = _internal_mutable_string_list(); + // @@protoc_insertion_point(field_mutable:flwr.proto.ConfigsRecordValue.string_list) + return _msg; +} + +// .flwr.proto.BytesList bytes_list = 25; +inline bool ConfigsRecordValue::_internal_has_bytes_list() const { + return value_case() == kBytesList; +} +inline bool ConfigsRecordValue::has_bytes_list() const { + return _internal_has_bytes_list(); +} +inline void ConfigsRecordValue::set_has_bytes_list() { + _oneof_case_[0] = kBytesList; +} +inline void ConfigsRecordValue::clear_bytes_list() { + if (_internal_has_bytes_list()) { + if (GetArenaForAllocation() == nullptr) { + delete value_.bytes_list_; + } + clear_has_value(); + } +} +inline ::flwr::proto::BytesList* ConfigsRecordValue::release_bytes_list() { + // @@protoc_insertion_point(field_release:flwr.proto.ConfigsRecordValue.bytes_list) + if (_internal_has_bytes_list()) { + clear_has_value(); + ::flwr::proto::BytesList* temp = value_.bytes_list_; + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } + value_.bytes_list_ = nullptr; + return temp; + } else { + return nullptr; + } +} +inline const ::flwr::proto::BytesList& ConfigsRecordValue::_internal_bytes_list() const { + return _internal_has_bytes_list() + ? *value_.bytes_list_ + : reinterpret_cast< ::flwr::proto::BytesList&>(::flwr::proto::_BytesList_default_instance_); +} +inline const ::flwr::proto::BytesList& ConfigsRecordValue::bytes_list() const { + // @@protoc_insertion_point(field_get:flwr.proto.ConfigsRecordValue.bytes_list) + return _internal_bytes_list(); +} +inline ::flwr::proto::BytesList* ConfigsRecordValue::unsafe_arena_release_bytes_list() { + // @@protoc_insertion_point(field_unsafe_arena_release:flwr.proto.ConfigsRecordValue.bytes_list) + if (_internal_has_bytes_list()) { + clear_has_value(); + ::flwr::proto::BytesList* temp = value_.bytes_list_; + value_.bytes_list_ = nullptr; + return temp; + } else { + return nullptr; + } +} +inline void ConfigsRecordValue::unsafe_arena_set_allocated_bytes_list(::flwr::proto::BytesList* bytes_list) { + clear_value(); + if (bytes_list) { + set_has_bytes_list(); + value_.bytes_list_ = bytes_list; + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.ConfigsRecordValue.bytes_list) +} +inline ::flwr::proto::BytesList* ConfigsRecordValue::_internal_mutable_bytes_list() { + if (!_internal_has_bytes_list()) { + clear_value(); + set_has_bytes_list(); + value_.bytes_list_ = CreateMaybeMessage< ::flwr::proto::BytesList >(GetArenaForAllocation()); + } + return value_.bytes_list_; +} +inline ::flwr::proto::BytesList* ConfigsRecordValue::mutable_bytes_list() { + ::flwr::proto::BytesList* _msg = _internal_mutable_bytes_list(); + // @@protoc_insertion_point(field_mutable:flwr.proto.ConfigsRecordValue.bytes_list) + return _msg; +} + +inline bool ConfigsRecordValue::has_value() const { + return value_case() != VALUE_NOT_SET; +} +inline void ConfigsRecordValue::clear_has_value() { + _oneof_case_[0] = VALUE_NOT_SET; +} +inline ConfigsRecordValue::ValueCase ConfigsRecordValue::value_case() const { + return ConfigsRecordValue::ValueCase(_oneof_case_[0]); +} +// ------------------------------------------------------------------- + +// ParametersRecord + +// repeated string data_keys = 1; +inline int ParametersRecord::_internal_data_keys_size() const { + return data_keys_.size(); +} +inline int ParametersRecord::data_keys_size() const { + return _internal_data_keys_size(); +} +inline void ParametersRecord::clear_data_keys() { + data_keys_.Clear(); +} +inline std::string* ParametersRecord::add_data_keys() { + std::string* _s = _internal_add_data_keys(); + // @@protoc_insertion_point(field_add_mutable:flwr.proto.ParametersRecord.data_keys) + return _s; +} +inline const std::string& ParametersRecord::_internal_data_keys(int index) const { + return data_keys_.Get(index); +} +inline const std::string& ParametersRecord::data_keys(int index) const { + // @@protoc_insertion_point(field_get:flwr.proto.ParametersRecord.data_keys) + return _internal_data_keys(index); +} +inline std::string* ParametersRecord::mutable_data_keys(int index) { + // @@protoc_insertion_point(field_mutable:flwr.proto.ParametersRecord.data_keys) + return data_keys_.Mutable(index); +} +inline void ParametersRecord::set_data_keys(int index, const std::string& value) { + data_keys_.Mutable(index)->assign(value); + // @@protoc_insertion_point(field_set:flwr.proto.ParametersRecord.data_keys) +} +inline void ParametersRecord::set_data_keys(int index, std::string&& value) { + data_keys_.Mutable(index)->assign(std::move(value)); + // @@protoc_insertion_point(field_set:flwr.proto.ParametersRecord.data_keys) +} +inline void ParametersRecord::set_data_keys(int index, const char* value) { + GOOGLE_DCHECK(value != nullptr); + data_keys_.Mutable(index)->assign(value); + // @@protoc_insertion_point(field_set_char:flwr.proto.ParametersRecord.data_keys) +} +inline void ParametersRecord::set_data_keys(int index, const char* value, size_t size) { + data_keys_.Mutable(index)->assign( + reinterpret_cast(value), size); + // @@protoc_insertion_point(field_set_pointer:flwr.proto.ParametersRecord.data_keys) +} +inline std::string* ParametersRecord::_internal_add_data_keys() { + return data_keys_.Add(); +} +inline void ParametersRecord::add_data_keys(const std::string& value) { + data_keys_.Add()->assign(value); + // @@protoc_insertion_point(field_add:flwr.proto.ParametersRecord.data_keys) +} +inline void ParametersRecord::add_data_keys(std::string&& value) { + data_keys_.Add(std::move(value)); + // @@protoc_insertion_point(field_add:flwr.proto.ParametersRecord.data_keys) +} +inline void ParametersRecord::add_data_keys(const char* value) { + GOOGLE_DCHECK(value != nullptr); + data_keys_.Add()->assign(value); + // @@protoc_insertion_point(field_add_char:flwr.proto.ParametersRecord.data_keys) +} +inline void ParametersRecord::add_data_keys(const char* value, size_t size) { + data_keys_.Add()->assign(reinterpret_cast(value), size); + // @@protoc_insertion_point(field_add_pointer:flwr.proto.ParametersRecord.data_keys) +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& +ParametersRecord::data_keys() const { + // @@protoc_insertion_point(field_list:flwr.proto.ParametersRecord.data_keys) + return data_keys_; +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* +ParametersRecord::mutable_data_keys() { + // @@protoc_insertion_point(field_mutable_list:flwr.proto.ParametersRecord.data_keys) + return &data_keys_; +} + +// repeated .flwr.proto.Array data_values = 2; +inline int ParametersRecord::_internal_data_values_size() const { + return data_values_.size(); +} +inline int ParametersRecord::data_values_size() const { + return _internal_data_values_size(); +} +inline void ParametersRecord::clear_data_values() { + data_values_.Clear(); +} +inline ::flwr::proto::Array* ParametersRecord::mutable_data_values(int index) { + // @@protoc_insertion_point(field_mutable:flwr.proto.ParametersRecord.data_values) + return data_values_.Mutable(index); +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::flwr::proto::Array >* +ParametersRecord::mutable_data_values() { + // @@protoc_insertion_point(field_mutable_list:flwr.proto.ParametersRecord.data_values) + return &data_values_; +} +inline const ::flwr::proto::Array& ParametersRecord::_internal_data_values(int index) const { + return data_values_.Get(index); +} +inline const ::flwr::proto::Array& ParametersRecord::data_values(int index) const { + // @@protoc_insertion_point(field_get:flwr.proto.ParametersRecord.data_values) + return _internal_data_values(index); +} +inline ::flwr::proto::Array* ParametersRecord::_internal_add_data_values() { + return data_values_.Add(); +} +inline ::flwr::proto::Array* ParametersRecord::add_data_values() { + ::flwr::proto::Array* _add = _internal_add_data_values(); + // @@protoc_insertion_point(field_add:flwr.proto.ParametersRecord.data_values) + return _add; +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::flwr::proto::Array >& +ParametersRecord::data_values() const { + // @@protoc_insertion_point(field_list:flwr.proto.ParametersRecord.data_values) + return data_values_; +} + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// MetricsRecord + +// map data = 1; +inline int MetricsRecord::_internal_data_size() const { + return data_.size(); +} +inline int MetricsRecord::data_size() const { + return _internal_data_size(); +} +inline void MetricsRecord::clear_data() { + data_.Clear(); +} +inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecordValue >& +MetricsRecord::_internal_data() const { + return data_.GetMap(); +} +inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecordValue >& +MetricsRecord::data() const { + // @@protoc_insertion_point(field_map:flwr.proto.MetricsRecord.data) + return _internal_data(); +} +inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecordValue >* +MetricsRecord::_internal_mutable_data() { + return data_.MutableMap(); +} +inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecordValue >* +MetricsRecord::mutable_data() { + // @@protoc_insertion_point(field_mutable_map:flwr.proto.MetricsRecord.data) + return _internal_mutable_data(); +} + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ConfigsRecord + +// map data = 1; +inline int ConfigsRecord::_internal_data_size() const { + return data_.size(); +} +inline int ConfigsRecord::data_size() const { + return _internal_data_size(); +} +inline void ConfigsRecord::clear_data() { + data_.Clear(); +} +inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecordValue >& +ConfigsRecord::_internal_data() const { + return data_.GetMap(); +} +inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecordValue >& +ConfigsRecord::data() const { + // @@protoc_insertion_point(field_map:flwr.proto.ConfigsRecord.data) + return _internal_data(); +} +inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecordValue >* +ConfigsRecord::_internal_mutable_data() { + return data_.MutableMap(); +} +inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecordValue >* +ConfigsRecord::mutable_data() { + // @@protoc_insertion_point(field_mutable_map:flwr.proto.ConfigsRecord.data) + return _internal_mutable_data(); +} + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// RecordSet + +// map parameters = 1; +inline int RecordSet::_internal_parameters_size() const { + return parameters_.size(); +} +inline int RecordSet::parameters_size() const { + return _internal_parameters_size(); +} +inline void RecordSet::clear_parameters() { + parameters_.Clear(); +} +inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ParametersRecord >& +RecordSet::_internal_parameters() const { + return parameters_.GetMap(); +} +inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ParametersRecord >& +RecordSet::parameters() const { + // @@protoc_insertion_point(field_map:flwr.proto.RecordSet.parameters) + return _internal_parameters(); +} +inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ParametersRecord >* +RecordSet::_internal_mutable_parameters() { + return parameters_.MutableMap(); +} +inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ParametersRecord >* +RecordSet::mutable_parameters() { + // @@protoc_insertion_point(field_mutable_map:flwr.proto.RecordSet.parameters) + return _internal_mutable_parameters(); +} + +// map metrics = 2; +inline int RecordSet::_internal_metrics_size() const { + return metrics_.size(); +} +inline int RecordSet::metrics_size() const { + return _internal_metrics_size(); +} +inline void RecordSet::clear_metrics() { + metrics_.Clear(); +} +inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecord >& +RecordSet::_internal_metrics() const { + return metrics_.GetMap(); +} +inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecord >& +RecordSet::metrics() const { + // @@protoc_insertion_point(field_map:flwr.proto.RecordSet.metrics) + return _internal_metrics(); +} +inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecord >* +RecordSet::_internal_mutable_metrics() { + return metrics_.MutableMap(); +} +inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecord >* +RecordSet::mutable_metrics() { + // @@protoc_insertion_point(field_mutable_map:flwr.proto.RecordSet.metrics) + return _internal_mutable_metrics(); +} + +// map configs = 3; +inline int RecordSet::_internal_configs_size() const { + return configs_.size(); +} +inline int RecordSet::configs_size() const { + return _internal_configs_size(); +} +inline void RecordSet::clear_configs() { + configs_.Clear(); +} +inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecord >& +RecordSet::_internal_configs() const { + return configs_.GetMap(); +} +inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecord >& +RecordSet::configs() const { + // @@protoc_insertion_point(field_map:flwr.proto.RecordSet.configs) + return _internal_configs(); +} +inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecord >* +RecordSet::_internal_mutable_configs() { + return configs_.MutableMap(); +} +inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecord >* +RecordSet::mutable_configs() { + // @@protoc_insertion_point(field_mutable_map:flwr.proto.RecordSet.configs) + return _internal_mutable_configs(); +} + +#ifdef __GNUC__ + #pragma GCC diagnostic pop +#endif // __GNUC__ +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + + +// @@protoc_insertion_point(namespace_scope) + +} // namespace proto +} // namespace flwr + +// @@protoc_insertion_point(global_scope) + +#include +#endif // GOOGLE_PROTOBUF_INCLUDED_GOOGLE_PROTOBUF_INCLUDED_flwr_2fproto_2frecordset_2eproto diff --git a/src/cc/flwr/include/flwr/proto/task.pb.cc b/src/cc/flwr/include/flwr/proto/task.pb.cc index 14f1259e5ba7..04fa3e8e2625 100644 --- a/src/cc/flwr/include/flwr/proto/task.pb.cc +++ b/src/cc/flwr/include/flwr/proto/task.pb.cc @@ -21,14 +21,15 @@ namespace proto { constexpr Task::Task( ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) : ancestry_() - , created_at_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string) , delivered_at_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string) - , ttl_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string) + , task_type_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string) , producer_(nullptr) , consumer_(nullptr) - , sa_(nullptr) - , legacy_server_message_(nullptr) - , legacy_client_message_(nullptr){} + , recordset_(nullptr) + , error_(nullptr) + , created_at_(0) + , pushed_at_(0) + , ttl_(0){} struct TaskDefaultTypeInternal { constexpr TaskDefaultTypeInternal() : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} @@ -43,7 +44,7 @@ constexpr TaskIns::TaskIns( : task_id_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string) , group_id_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string) , task_(nullptr) - , workload_id_(int64_t{0}){} + , run_id_(int64_t{0}){} struct TaskInsDefaultTypeInternal { constexpr TaskInsDefaultTypeInternal() : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} @@ -58,7 +59,7 @@ constexpr TaskRes::TaskRes( : task_id_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string) , group_id_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string) , task_(nullptr) - , workload_id_(int64_t{0}){} + , run_id_(int64_t{0}){} struct TaskResDefaultTypeInternal { constexpr TaskResDefaultTypeInternal() : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} @@ -68,105 +69,9 @@ struct TaskResDefaultTypeInternal { }; }; PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT TaskResDefaultTypeInternal _TaskRes_default_instance_; -constexpr Value_DoubleList::Value_DoubleList( - ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) - : vals_(){} -struct Value_DoubleListDefaultTypeInternal { - constexpr Value_DoubleListDefaultTypeInternal() - : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} - ~Value_DoubleListDefaultTypeInternal() {} - union { - Value_DoubleList _instance; - }; -}; -PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT Value_DoubleListDefaultTypeInternal _Value_DoubleList_default_instance_; -constexpr Value_Sint64List::Value_Sint64List( - ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) - : vals_() - , _vals_cached_byte_size_(0){} -struct Value_Sint64ListDefaultTypeInternal { - constexpr Value_Sint64ListDefaultTypeInternal() - : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} - ~Value_Sint64ListDefaultTypeInternal() {} - union { - Value_Sint64List _instance; - }; -}; -PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT Value_Sint64ListDefaultTypeInternal _Value_Sint64List_default_instance_; -constexpr Value_BoolList::Value_BoolList( - ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) - : vals_(){} -struct Value_BoolListDefaultTypeInternal { - constexpr Value_BoolListDefaultTypeInternal() - : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} - ~Value_BoolListDefaultTypeInternal() {} - union { - Value_BoolList _instance; - }; -}; -PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT Value_BoolListDefaultTypeInternal _Value_BoolList_default_instance_; -constexpr Value_StringList::Value_StringList( - ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) - : vals_(){} -struct Value_StringListDefaultTypeInternal { - constexpr Value_StringListDefaultTypeInternal() - : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} - ~Value_StringListDefaultTypeInternal() {} - union { - Value_StringList _instance; - }; -}; -PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT Value_StringListDefaultTypeInternal _Value_StringList_default_instance_; -constexpr Value_BytesList::Value_BytesList( - ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) - : vals_(){} -struct Value_BytesListDefaultTypeInternal { - constexpr Value_BytesListDefaultTypeInternal() - : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} - ~Value_BytesListDefaultTypeInternal() {} - union { - Value_BytesList _instance; - }; -}; -PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT Value_BytesListDefaultTypeInternal _Value_BytesList_default_instance_; -constexpr Value::Value( - ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) - : _oneof_case_{}{} -struct ValueDefaultTypeInternal { - constexpr ValueDefaultTypeInternal() - : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} - ~ValueDefaultTypeInternal() {} - union { - Value _instance; - }; -}; -PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT ValueDefaultTypeInternal _Value_default_instance_; -constexpr SecureAggregation_NamedValuesEntry_DoNotUse::SecureAggregation_NamedValuesEntry_DoNotUse( - ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized){} -struct SecureAggregation_NamedValuesEntry_DoNotUseDefaultTypeInternal { - constexpr SecureAggregation_NamedValuesEntry_DoNotUseDefaultTypeInternal() - : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} - ~SecureAggregation_NamedValuesEntry_DoNotUseDefaultTypeInternal() {} - union { - SecureAggregation_NamedValuesEntry_DoNotUse _instance; - }; -}; -PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT SecureAggregation_NamedValuesEntry_DoNotUseDefaultTypeInternal _SecureAggregation_NamedValuesEntry_DoNotUse_default_instance_; -constexpr SecureAggregation::SecureAggregation( - ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) - : named_values_(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}){} -struct SecureAggregationDefaultTypeInternal { - constexpr SecureAggregationDefaultTypeInternal() - : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} - ~SecureAggregationDefaultTypeInternal() {} - union { - SecureAggregation _instance; - }; -}; -PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT SecureAggregationDefaultTypeInternal _SecureAggregation_default_instance_; } // namespace proto } // namespace flwr -static ::PROTOBUF_NAMESPACE_ID::Metadata file_level_metadata_flwr_2fproto_2ftask_2eproto[11]; +static ::PROTOBUF_NAMESPACE_ID::Metadata file_level_metadata_flwr_2fproto_2ftask_2eproto[3]; static constexpr ::PROTOBUF_NAMESPACE_ID::EnumDescriptor const** file_level_enum_descriptors_flwr_2fproto_2ftask_2eproto = nullptr; static constexpr ::PROTOBUF_NAMESPACE_ID::ServiceDescriptor const** file_level_service_descriptors_flwr_2fproto_2ftask_2eproto = nullptr; @@ -181,11 +86,12 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_flwr_2fproto_2ftask_2eproto::o PROTOBUF_FIELD_OFFSET(::flwr::proto::Task, consumer_), PROTOBUF_FIELD_OFFSET(::flwr::proto::Task, created_at_), PROTOBUF_FIELD_OFFSET(::flwr::proto::Task, delivered_at_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::Task, pushed_at_), PROTOBUF_FIELD_OFFSET(::flwr::proto::Task, ttl_), PROTOBUF_FIELD_OFFSET(::flwr::proto::Task, ancestry_), - PROTOBUF_FIELD_OFFSET(::flwr::proto::Task, sa_), - PROTOBUF_FIELD_OFFSET(::flwr::proto::Task, legacy_server_message_), - PROTOBUF_FIELD_OFFSET(::flwr::proto::Task, legacy_client_message_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::Task, task_type_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::Task, recordset_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::Task, error_), ~0u, // no _has_bits_ PROTOBUF_FIELD_OFFSET(::flwr::proto::TaskIns, _internal_metadata_), ~0u, // no _extensions_ @@ -194,7 +100,7 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_flwr_2fproto_2ftask_2eproto::o ~0u, // no _inlined_string_donated_ PROTOBUF_FIELD_OFFSET(::flwr::proto::TaskIns, task_id_), PROTOBUF_FIELD_OFFSET(::flwr::proto::TaskIns, group_id_), - PROTOBUF_FIELD_OFFSET(::flwr::proto::TaskIns, workload_id_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::TaskIns, run_id_), PROTOBUF_FIELD_OFFSET(::flwr::proto::TaskIns, task_), ~0u, // no _has_bits_ PROTOBUF_FIELD_OFFSET(::flwr::proto::TaskRes, _internal_metadata_), @@ -204,148 +110,49 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_flwr_2fproto_2ftask_2eproto::o ~0u, // no _inlined_string_donated_ PROTOBUF_FIELD_OFFSET(::flwr::proto::TaskRes, task_id_), PROTOBUF_FIELD_OFFSET(::flwr::proto::TaskRes, group_id_), - PROTOBUF_FIELD_OFFSET(::flwr::proto::TaskRes, workload_id_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::TaskRes, run_id_), PROTOBUF_FIELD_OFFSET(::flwr::proto::TaskRes, task_), - ~0u, // no _has_bits_ - PROTOBUF_FIELD_OFFSET(::flwr::proto::Value_DoubleList, _internal_metadata_), - ~0u, // no _extensions_ - ~0u, // no _oneof_case_ - ~0u, // no _weak_field_map_ - ~0u, // no _inlined_string_donated_ - PROTOBUF_FIELD_OFFSET(::flwr::proto::Value_DoubleList, vals_), - ~0u, // no _has_bits_ - PROTOBUF_FIELD_OFFSET(::flwr::proto::Value_Sint64List, _internal_metadata_), - ~0u, // no _extensions_ - ~0u, // no _oneof_case_ - ~0u, // no _weak_field_map_ - ~0u, // no _inlined_string_donated_ - PROTOBUF_FIELD_OFFSET(::flwr::proto::Value_Sint64List, vals_), - ~0u, // no _has_bits_ - PROTOBUF_FIELD_OFFSET(::flwr::proto::Value_BoolList, _internal_metadata_), - ~0u, // no _extensions_ - ~0u, // no _oneof_case_ - ~0u, // no _weak_field_map_ - ~0u, // no _inlined_string_donated_ - PROTOBUF_FIELD_OFFSET(::flwr::proto::Value_BoolList, vals_), - ~0u, // no _has_bits_ - PROTOBUF_FIELD_OFFSET(::flwr::proto::Value_StringList, _internal_metadata_), - ~0u, // no _extensions_ - ~0u, // no _oneof_case_ - ~0u, // no _weak_field_map_ - ~0u, // no _inlined_string_donated_ - PROTOBUF_FIELD_OFFSET(::flwr::proto::Value_StringList, vals_), - ~0u, // no _has_bits_ - PROTOBUF_FIELD_OFFSET(::flwr::proto::Value_BytesList, _internal_metadata_), - ~0u, // no _extensions_ - ~0u, // no _oneof_case_ - ~0u, // no _weak_field_map_ - ~0u, // no _inlined_string_donated_ - PROTOBUF_FIELD_OFFSET(::flwr::proto::Value_BytesList, vals_), - ~0u, // no _has_bits_ - PROTOBUF_FIELD_OFFSET(::flwr::proto::Value, _internal_metadata_), - ~0u, // no _extensions_ - PROTOBUF_FIELD_OFFSET(::flwr::proto::Value, _oneof_case_[0]), - ~0u, // no _weak_field_map_ - ~0u, // no _inlined_string_donated_ - ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, - ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, - ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, - ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, - ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, - ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, - ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, - ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, - ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, - ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, - PROTOBUF_FIELD_OFFSET(::flwr::proto::Value, value_), - PROTOBUF_FIELD_OFFSET(::flwr::proto::SecureAggregation_NamedValuesEntry_DoNotUse, _has_bits_), - PROTOBUF_FIELD_OFFSET(::flwr::proto::SecureAggregation_NamedValuesEntry_DoNotUse, _internal_metadata_), - ~0u, // no _extensions_ - ~0u, // no _oneof_case_ - ~0u, // no _weak_field_map_ - ~0u, // no _inlined_string_donated_ - PROTOBUF_FIELD_OFFSET(::flwr::proto::SecureAggregation_NamedValuesEntry_DoNotUse, key_), - PROTOBUF_FIELD_OFFSET(::flwr::proto::SecureAggregation_NamedValuesEntry_DoNotUse, value_), - 0, - 1, - ~0u, // no _has_bits_ - PROTOBUF_FIELD_OFFSET(::flwr::proto::SecureAggregation, _internal_metadata_), - ~0u, // no _extensions_ - ~0u, // no _oneof_case_ - ~0u, // no _weak_field_map_ - ~0u, // no _inlined_string_donated_ - PROTOBUF_FIELD_OFFSET(::flwr::proto::SecureAggregation, named_values_), }; static const ::PROTOBUF_NAMESPACE_ID::internal::MigrationSchema schemas[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { { 0, -1, -1, sizeof(::flwr::proto::Task)}, - { 15, -1, -1, sizeof(::flwr::proto::TaskIns)}, - { 25, -1, -1, sizeof(::flwr::proto::TaskRes)}, - { 35, -1, -1, sizeof(::flwr::proto::Value_DoubleList)}, - { 42, -1, -1, sizeof(::flwr::proto::Value_Sint64List)}, - { 49, -1, -1, sizeof(::flwr::proto::Value_BoolList)}, - { 56, -1, -1, sizeof(::flwr::proto::Value_StringList)}, - { 63, -1, -1, sizeof(::flwr::proto::Value_BytesList)}, - { 70, -1, -1, sizeof(::flwr::proto::Value)}, - { 87, 95, -1, sizeof(::flwr::proto::SecureAggregation_NamedValuesEntry_DoNotUse)}, - { 97, -1, -1, sizeof(::flwr::proto::SecureAggregation)}, + { 16, -1, -1, sizeof(::flwr::proto::TaskIns)}, + { 26, -1, -1, sizeof(::flwr::proto::TaskRes)}, }; static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] = { reinterpret_cast(&::flwr::proto::_Task_default_instance_), reinterpret_cast(&::flwr::proto::_TaskIns_default_instance_), reinterpret_cast(&::flwr::proto::_TaskRes_default_instance_), - reinterpret_cast(&::flwr::proto::_Value_DoubleList_default_instance_), - reinterpret_cast(&::flwr::proto::_Value_Sint64List_default_instance_), - reinterpret_cast(&::flwr::proto::_Value_BoolList_default_instance_), - reinterpret_cast(&::flwr::proto::_Value_StringList_default_instance_), - reinterpret_cast(&::flwr::proto::_Value_BytesList_default_instance_), - reinterpret_cast(&::flwr::proto::_Value_default_instance_), - reinterpret_cast(&::flwr::proto::_SecureAggregation_NamedValuesEntry_DoNotUse_default_instance_), - reinterpret_cast(&::flwr::proto::_SecureAggregation_default_instance_), }; const char descriptor_table_protodef_flwr_2fproto_2ftask_2eproto[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = "\n\025flwr/proto/task.proto\022\nflwr.proto\032\025flw" - "r/proto/node.proto\032\032flwr/proto/transport" - ".proto\"\276\002\n\004Task\022\"\n\010producer\030\001 \001(\0132\020.flwr" - ".proto.Node\022\"\n\010consumer\030\002 \001(\0132\020.flwr.pro" - "to.Node\022\022\n\ncreated_at\030\003 \001(\t\022\024\n\014delivered" - "_at\030\004 \001(\t\022\013\n\003ttl\030\005 \001(\t\022\020\n\010ancestry\030\006 \003(\t" - "\022)\n\002sa\030\007 \001(\0132\035.flwr.proto.SecureAggregat" - "ion\022<\n\025legacy_server_message\030e \001(\0132\031.flw" - "r.proto.ServerMessageB\002\030\001\022<\n\025legacy_clie" - "nt_message\030f \001(\0132\031.flwr.proto.ClientMess" - "ageB\002\030\001\"a\n\007TaskIns\022\017\n\007task_id\030\001 \001(\t\022\020\n\010g" - "roup_id\030\002 \001(\t\022\023\n\013workload_id\030\003 \001(\022\022\036\n\004ta" - "sk\030\004 \001(\0132\020.flwr.proto.Task\"a\n\007TaskRes\022\017\n" - "\007task_id\030\001 \001(\t\022\020\n\010group_id\030\002 \001(\t\022\023\n\013work" - "load_id\030\003 \001(\022\022\036\n\004task\030\004 \001(\0132\020.flwr.proto" - ".Task\"\363\003\n\005Value\022\020\n\006double\030\001 \001(\001H\000\022\020\n\006sin" - "t64\030\002 \001(\022H\000\022\016\n\004bool\030\003 \001(\010H\000\022\020\n\006string\030\004 " - "\001(\tH\000\022\017\n\005bytes\030\005 \001(\014H\000\0223\n\013double_list\030\025 " - "\001(\0132\034.flwr.proto.Value.DoubleListH\000\0223\n\013s" - "int64_list\030\026 \001(\0132\034.flwr.proto.Value.Sint" - "64ListH\000\022/\n\tbool_list\030\027 \001(\0132\032.flwr.proto" - ".Value.BoolListH\000\0223\n\013string_list\030\030 \001(\0132\034" - ".flwr.proto.Value.StringListH\000\0221\n\nbytes_" - "list\030\031 \001(\0132\033.flwr.proto.Value.BytesListH" - "\000\032\032\n\nDoubleList\022\014\n\004vals\030\001 \003(\001\032\032\n\nSint64L" - "ist\022\014\n\004vals\030\001 \003(\022\032\030\n\010BoolList\022\014\n\004vals\030\001 " - "\003(\010\032\032\n\nStringList\022\014\n\004vals\030\001 \003(\t\032\031\n\tBytes" - "List\022\014\n\004vals\030\001 \003(\014B\007\n\005value\"\240\001\n\021SecureAg" - "gregation\022D\n\014named_values\030\001 \003(\0132..flwr.p" - "roto.SecureAggregation.NamedValuesEntry\032" - "E\n\020NamedValuesEntry\022\013\n\003key\030\001 \001(\t\022 \n\005valu" - "e\030\002 \001(\0132\021.flwr.proto.Value:\0028\001b\006proto3" + "r/proto/node.proto\032\032flwr/proto/recordset" + ".proto\032\032flwr/proto/transport.proto\032\026flwr" + "/proto/error.proto\"\211\002\n\004Task\022\"\n\010producer\030" + "\001 \001(\0132\020.flwr.proto.Node\022\"\n\010consumer\030\002 \001(" + "\0132\020.flwr.proto.Node\022\022\n\ncreated_at\030\003 \001(\001\022" + "\024\n\014delivered_at\030\004 \001(\t\022\021\n\tpushed_at\030\005 \001(\001" + "\022\013\n\003ttl\030\006 \001(\001\022\020\n\010ancestry\030\007 \003(\t\022\021\n\ttask_" + "type\030\010 \001(\t\022(\n\trecordset\030\t \001(\0132\025.flwr.pro" + "to.RecordSet\022 \n\005error\030\n \001(\0132\021.flwr.proto" + ".Error\"\\\n\007TaskIns\022\017\n\007task_id\030\001 \001(\t\022\020\n\010gr" + "oup_id\030\002 \001(\t\022\016\n\006run_id\030\003 \001(\022\022\036\n\004task\030\004 \001" + "(\0132\020.flwr.proto.Task\"\\\n\007TaskRes\022\017\n\007task_" + "id\030\001 \001(\t\022\020\n\010group_id\030\002 \001(\t\022\016\n\006run_id\030\003 \001" + "(\022\022\036\n\004task\030\004 \001(\0132\020.flwr.proto.Taskb\006prot" + "o3" ; -static const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable*const descriptor_table_flwr_2fproto_2ftask_2eproto_deps[2] = { +static const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable*const descriptor_table_flwr_2fproto_2ftask_2eproto_deps[4] = { + &::descriptor_table_flwr_2fproto_2ferror_2eproto, &::descriptor_table_flwr_2fproto_2fnode_2eproto, + &::descriptor_table_flwr_2fproto_2frecordset_2eproto, &::descriptor_table_flwr_2fproto_2ftransport_2eproto, }; static ::PROTOBUF_NAMESPACE_ID::internal::once_flag descriptor_table_flwr_2fproto_2ftask_2eproto_once; const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_flwr_2fproto_2ftask_2eproto = { - false, false, 1278, descriptor_table_protodef_flwr_2fproto_2ftask_2eproto, "flwr/proto/task.proto", - &descriptor_table_flwr_2fproto_2ftask_2eproto_once, descriptor_table_flwr_2fproto_2ftask_2eproto_deps, 2, 11, + false, false, 602, descriptor_table_protodef_flwr_2fproto_2ftask_2eproto, "flwr/proto/task.proto", + &descriptor_table_flwr_2fproto_2ftask_2eproto_once, descriptor_table_flwr_2fproto_2ftask_2eproto_deps, 4, 3, schemas, file_default_instances, TableStruct_flwr_2fproto_2ftask_2eproto::offsets, file_level_metadata_flwr_2fproto_2ftask_2eproto, file_level_enum_descriptors_flwr_2fproto_2ftask_2eproto, file_level_service_descriptors_flwr_2fproto_2ftask_2eproto, }; @@ -364,9 +171,8 @@ class Task::_Internal { public: static const ::flwr::proto::Node& producer(const Task* msg); static const ::flwr::proto::Node& consumer(const Task* msg); - static const ::flwr::proto::SecureAggregation& sa(const Task* msg); - static const ::flwr::proto::ServerMessage& legacy_server_message(const Task* msg); - static const ::flwr::proto::ClientMessage& legacy_client_message(const Task* msg); + static const ::flwr::proto::RecordSet& recordset(const Task* msg); + static const ::flwr::proto::Error& error(const Task* msg); }; const ::flwr::proto::Node& @@ -377,17 +183,13 @@ const ::flwr::proto::Node& Task::_Internal::consumer(const Task* msg) { return *msg->consumer_; } -const ::flwr::proto::SecureAggregation& -Task::_Internal::sa(const Task* msg) { - return *msg->sa_; -} -const ::flwr::proto::ServerMessage& -Task::_Internal::legacy_server_message(const Task* msg) { - return *msg->legacy_server_message_; +const ::flwr::proto::RecordSet& +Task::_Internal::recordset(const Task* msg) { + return *msg->recordset_; } -const ::flwr::proto::ClientMessage& -Task::_Internal::legacy_client_message(const Task* msg) { - return *msg->legacy_client_message_; +const ::flwr::proto::Error& +Task::_Internal::error(const Task* msg) { + return *msg->error_; } void Task::clear_producer() { if (GetArenaForAllocation() == nullptr && producer_ != nullptr) { @@ -401,17 +203,17 @@ void Task::clear_consumer() { } consumer_ = nullptr; } -void Task::clear_legacy_server_message() { - if (GetArenaForAllocation() == nullptr && legacy_server_message_ != nullptr) { - delete legacy_server_message_; +void Task::clear_recordset() { + if (GetArenaForAllocation() == nullptr && recordset_ != nullptr) { + delete recordset_; } - legacy_server_message_ = nullptr; + recordset_ = nullptr; } -void Task::clear_legacy_client_message() { - if (GetArenaForAllocation() == nullptr && legacy_client_message_ != nullptr) { - delete legacy_client_message_; +void Task::clear_error() { + if (GetArenaForAllocation() == nullptr && error_ != nullptr) { + delete error_; } - legacy_client_message_ = nullptr; + error_ = nullptr; } Task::Task(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) @@ -427,19 +229,14 @@ Task::Task(const Task& from) : ::PROTOBUF_NAMESPACE_ID::Message(), ancestry_(from.ancestry_) { _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); - created_at_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); - if (!from._internal_created_at().empty()) { - created_at_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, from._internal_created_at(), - GetArenaForAllocation()); - } delivered_at_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); if (!from._internal_delivered_at().empty()) { delivered_at_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, from._internal_delivered_at(), GetArenaForAllocation()); } - ttl_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); - if (!from._internal_ttl().empty()) { - ttl_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, from._internal_ttl(), + task_type_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + if (!from._internal_task_type().empty()) { + task_type_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, from._internal_task_type(), GetArenaForAllocation()); } if (from._internal_has_producer()) { @@ -452,32 +249,29 @@ Task::Task(const Task& from) } else { consumer_ = nullptr; } - if (from._internal_has_sa()) { - sa_ = new ::flwr::proto::SecureAggregation(*from.sa_); + if (from._internal_has_recordset()) { + recordset_ = new ::flwr::proto::RecordSet(*from.recordset_); } else { - sa_ = nullptr; + recordset_ = nullptr; } - if (from._internal_has_legacy_server_message()) { - legacy_server_message_ = new ::flwr::proto::ServerMessage(*from.legacy_server_message_); + if (from._internal_has_error()) { + error_ = new ::flwr::proto::Error(*from.error_); } else { - legacy_server_message_ = nullptr; - } - if (from._internal_has_legacy_client_message()) { - legacy_client_message_ = new ::flwr::proto::ClientMessage(*from.legacy_client_message_); - } else { - legacy_client_message_ = nullptr; + error_ = nullptr; } + ::memcpy(&created_at_, &from.created_at_, + static_cast(reinterpret_cast(&ttl_) - + reinterpret_cast(&created_at_)) + sizeof(ttl_)); // @@protoc_insertion_point(copy_constructor:flwr.proto.Task) } void Task::SharedCtor() { -created_at_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); delivered_at_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); -ttl_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +task_type_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); ::memset(reinterpret_cast(this) + static_cast( reinterpret_cast(&producer_) - reinterpret_cast(this)), - 0, static_cast(reinterpret_cast(&legacy_client_message_) - - reinterpret_cast(&producer_)) + sizeof(legacy_client_message_)); + 0, static_cast(reinterpret_cast(&ttl_) - + reinterpret_cast(&producer_)) + sizeof(ttl_)); } Task::~Task() { @@ -489,14 +283,12 @@ Task::~Task() { inline void Task::SharedDtor() { GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); - created_at_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); delivered_at_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); - ttl_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + task_type_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); if (this != internal_default_instance()) delete producer_; if (this != internal_default_instance()) delete consumer_; - if (this != internal_default_instance()) delete sa_; - if (this != internal_default_instance()) delete legacy_server_message_; - if (this != internal_default_instance()) delete legacy_client_message_; + if (this != internal_default_instance()) delete recordset_; + if (this != internal_default_instance()) delete error_; } void Task::ArenaDtor(void* object) { @@ -516,9 +308,8 @@ void Task::Clear() { (void) cached_has_bits; ancestry_.Clear(); - created_at_.ClearToEmpty(); delivered_at_.ClearToEmpty(); - ttl_.ClearToEmpty(); + task_type_.ClearToEmpty(); if (GetArenaForAllocation() == nullptr && producer_ != nullptr) { delete producer_; } @@ -527,18 +318,17 @@ void Task::Clear() { delete consumer_; } consumer_ = nullptr; - if (GetArenaForAllocation() == nullptr && sa_ != nullptr) { - delete sa_; + if (GetArenaForAllocation() == nullptr && recordset_ != nullptr) { + delete recordset_; } - sa_ = nullptr; - if (GetArenaForAllocation() == nullptr && legacy_server_message_ != nullptr) { - delete legacy_server_message_; + recordset_ = nullptr; + if (GetArenaForAllocation() == nullptr && error_ != nullptr) { + delete error_; } - legacy_server_message_ = nullptr; - if (GetArenaForAllocation() == nullptr && legacy_client_message_ != nullptr) { - delete legacy_client_message_; - } - legacy_client_message_ = nullptr; + error_ = nullptr; + ::memset(&created_at_, 0, static_cast( + reinterpret_cast(&ttl_) - + reinterpret_cast(&created_at_)) + sizeof(ttl_)); _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } @@ -564,13 +354,11 @@ const char* Task::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::inter } else goto handle_unusual; continue; - // string created_at = 3; + // double created_at = 3; case 3: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 26)) { - auto str = _internal_mutable_created_at(); - ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); - CHK_(::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "flwr.proto.Task.created_at")); - CHK_(ptr); + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 25)) { + created_at_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad(ptr); + ptr += sizeof(double); } else goto handle_unusual; continue; @@ -584,19 +372,25 @@ const char* Task::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::inter } else goto handle_unusual; continue; - // string ttl = 5; + // double pushed_at = 5; case 5: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 42)) { - auto str = _internal_mutable_ttl(); - ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); - CHK_(::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "flwr.proto.Task.ttl")); - CHK_(ptr); + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 41)) { + pushed_at_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad(ptr); + ptr += sizeof(double); } else goto handle_unusual; continue; - // repeated string ancestry = 6; + // double ttl = 6; case 6: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 50)) { + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 49)) { + ttl_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad(ptr); + ptr += sizeof(double); + } else + goto handle_unusual; + continue; + // repeated string ancestry = 7; + case 7: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 58)) { ptr -= 1; do { ptr += 1; @@ -605,30 +399,32 @@ const char* Task::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::inter CHK_(::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "flwr.proto.Task.ancestry")); CHK_(ptr); if (!ctx->DataAvailable(ptr)) break; - } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<50>(ptr)); + } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<58>(ptr)); } else goto handle_unusual; continue; - // .flwr.proto.SecureAggregation sa = 7; - case 7: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 58)) { - ptr = ctx->ParseMessage(_internal_mutable_sa(), ptr); + // string task_type = 8; + case 8: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 66)) { + auto str = _internal_mutable_task_type(); + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); + CHK_(::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "flwr.proto.Task.task_type")); CHK_(ptr); } else goto handle_unusual; continue; - // .flwr.proto.ServerMessage legacy_server_message = 101 [deprecated = true]; - case 101: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 42)) { - ptr = ctx->ParseMessage(_internal_mutable_legacy_server_message(), ptr); + // .flwr.proto.RecordSet recordset = 9; + case 9: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 74)) { + ptr = ctx->ParseMessage(_internal_mutable_recordset(), ptr); CHK_(ptr); } else goto handle_unusual; continue; - // .flwr.proto.ClientMessage legacy_client_message = 102 [deprecated = true]; - case 102: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 50)) { - ptr = ctx->ParseMessage(_internal_mutable_legacy_client_message(), ptr); + // .flwr.proto.Error error = 10; + case 10: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 82)) { + ptr = ctx->ParseMessage(_internal_mutable_error(), ptr); CHK_(ptr); } else goto handle_unusual; @@ -678,14 +474,10 @@ ::PROTOBUF_NAMESPACE_ID::uint8* Task::_InternalSerialize( 2, _Internal::consumer(this), target, stream); } - // string created_at = 3; - if (!this->_internal_created_at().empty()) { - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( - this->_internal_created_at().data(), static_cast(this->_internal_created_at().length()), - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, - "flwr.proto.Task.created_at"); - target = stream->WriteStringMaybeAliased( - 3, this->_internal_created_at(), target); + // double created_at = 3; + if (!(this->_internal_created_at() <= 0 && this->_internal_created_at() >= 0)) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteDoubleToArray(3, this->_internal_created_at(), target); } // string delivered_at = 4; @@ -698,48 +490,52 @@ ::PROTOBUF_NAMESPACE_ID::uint8* Task::_InternalSerialize( 4, this->_internal_delivered_at(), target); } - // string ttl = 5; - if (!this->_internal_ttl().empty()) { - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( - this->_internal_ttl().data(), static_cast(this->_internal_ttl().length()), - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, - "flwr.proto.Task.ttl"); - target = stream->WriteStringMaybeAliased( - 5, this->_internal_ttl(), target); + // double pushed_at = 5; + if (!(this->_internal_pushed_at() <= 0 && this->_internal_pushed_at() >= 0)) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteDoubleToArray(5, this->_internal_pushed_at(), target); + } + + // double ttl = 6; + if (!(this->_internal_ttl() <= 0 && this->_internal_ttl() >= 0)) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteDoubleToArray(6, this->_internal_ttl(), target); } - // repeated string ancestry = 6; + // repeated string ancestry = 7; for (int i = 0, n = this->_internal_ancestry_size(); i < n; i++) { const auto& s = this->_internal_ancestry(i); ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( s.data(), static_cast(s.length()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, "flwr.proto.Task.ancestry"); - target = stream->WriteString(6, s, target); + target = stream->WriteString(7, s, target); } - // .flwr.proto.SecureAggregation sa = 7; - if (this->_internal_has_sa()) { - target = stream->EnsureSpace(target); - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: - InternalWriteMessage( - 7, _Internal::sa(this), target, stream); + // string task_type = 8; + if (!this->_internal_task_type().empty()) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->_internal_task_type().data(), static_cast(this->_internal_task_type().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "flwr.proto.Task.task_type"); + target = stream->WriteStringMaybeAliased( + 8, this->_internal_task_type(), target); } - // .flwr.proto.ServerMessage legacy_server_message = 101 [deprecated = true]; - if (this->_internal_has_legacy_server_message()) { + // .flwr.proto.RecordSet recordset = 9; + if (this->_internal_has_recordset()) { target = stream->EnsureSpace(target); target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: InternalWriteMessage( - 101, _Internal::legacy_server_message(this), target, stream); + 9, _Internal::recordset(this), target, stream); } - // .flwr.proto.ClientMessage legacy_client_message = 102 [deprecated = true]; - if (this->_internal_has_legacy_client_message()) { + // .flwr.proto.Error error = 10; + if (this->_internal_has_error()) { target = stream->EnsureSpace(target); target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: InternalWriteMessage( - 102, _Internal::legacy_client_message(this), target, stream); + 10, _Internal::error(this), target, stream); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { @@ -758,7 +554,7 @@ size_t Task::ByteSizeLong() const { // Prevent compiler warnings about cached_has_bits being unused (void) cached_has_bits; - // repeated string ancestry = 6; + // repeated string ancestry = 7; total_size += 1 * ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(ancestry_.size()); for (int i = 0, n = ancestry_.size(); i < n; i++) { @@ -766,13 +562,6 @@ size_t Task::ByteSizeLong() const { ancestry_.Get(i)); } - // string created_at = 3; - if (!this->_internal_created_at().empty()) { - total_size += 1 + - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( - this->_internal_created_at()); - } - // string delivered_at = 4; if (!this->_internal_delivered_at().empty()) { total_size += 1 + @@ -780,11 +569,11 @@ size_t Task::ByteSizeLong() const { this->_internal_delivered_at()); } - // string ttl = 5; - if (!this->_internal_ttl().empty()) { + // string task_type = 8; + if (!this->_internal_task_type().empty()) { total_size += 1 + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( - this->_internal_ttl()); + this->_internal_task_type()); } // .flwr.proto.Node producer = 1; @@ -801,25 +590,33 @@ size_t Task::ByteSizeLong() const { *consumer_); } - // .flwr.proto.SecureAggregation sa = 7; - if (this->_internal_has_sa()) { + // .flwr.proto.RecordSet recordset = 9; + if (this->_internal_has_recordset()) { total_size += 1 + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( - *sa_); + *recordset_); } - // .flwr.proto.ServerMessage legacy_server_message = 101 [deprecated = true]; - if (this->_internal_has_legacy_server_message()) { - total_size += 2 + + // .flwr.proto.Error error = 10; + if (this->_internal_has_error()) { + total_size += 1 + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( - *legacy_server_message_); + *error_); } - // .flwr.proto.ClientMessage legacy_client_message = 102 [deprecated = true]; - if (this->_internal_has_legacy_client_message()) { - total_size += 2 + - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( - *legacy_client_message_); + // double created_at = 3; + if (!(this->_internal_created_at() <= 0 && this->_internal_created_at() >= 0)) { + total_size += 1 + 8; + } + + // double pushed_at = 5; + if (!(this->_internal_pushed_at() <= 0 && this->_internal_pushed_at() >= 0)) { + total_size += 1 + 8; + } + + // double ttl = 6; + if (!(this->_internal_ttl() <= 0 && this->_internal_ttl() >= 0)) { + total_size += 1 + 8; } return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); @@ -845,14 +642,11 @@ void Task::MergeFrom(const Task& from) { (void) cached_has_bits; ancestry_.MergeFrom(from.ancestry_); - if (!from._internal_created_at().empty()) { - _internal_set_created_at(from._internal_created_at()); - } if (!from._internal_delivered_at().empty()) { _internal_set_delivered_at(from._internal_delivered_at()); } - if (!from._internal_ttl().empty()) { - _internal_set_ttl(from._internal_ttl()); + if (!from._internal_task_type().empty()) { + _internal_set_task_type(from._internal_task_type()); } if (from._internal_has_producer()) { _internal_mutable_producer()->::flwr::proto::Node::MergeFrom(from._internal_producer()); @@ -860,14 +654,20 @@ void Task::MergeFrom(const Task& from) { if (from._internal_has_consumer()) { _internal_mutable_consumer()->::flwr::proto::Node::MergeFrom(from._internal_consumer()); } - if (from._internal_has_sa()) { - _internal_mutable_sa()->::flwr::proto::SecureAggregation::MergeFrom(from._internal_sa()); + if (from._internal_has_recordset()) { + _internal_mutable_recordset()->::flwr::proto::RecordSet::MergeFrom(from._internal_recordset()); } - if (from._internal_has_legacy_server_message()) { - _internal_mutable_legacy_server_message()->::flwr::proto::ServerMessage::MergeFrom(from._internal_legacy_server_message()); + if (from._internal_has_error()) { + _internal_mutable_error()->::flwr::proto::Error::MergeFrom(from._internal_error()); } - if (from._internal_has_legacy_client_message()) { - _internal_mutable_legacy_client_message()->::flwr::proto::ClientMessage::MergeFrom(from._internal_legacy_client_message()); + if (!(from._internal_created_at() <= 0 && from._internal_created_at() >= 0)) { + _internal_set_created_at(from._internal_created_at()); + } + if (!(from._internal_pushed_at() <= 0 && from._internal_pushed_at() >= 0)) { + _internal_set_pushed_at(from._internal_pushed_at()); + } + if (!(from._internal_ttl() <= 0 && from._internal_ttl() >= 0)) { + _internal_set_ttl(from._internal_ttl()); } _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); } @@ -889,11 +689,6 @@ void Task::InternalSwap(Task* other) { auto* rhs_arena = other->GetArenaForAllocation(); _internal_metadata_.InternalSwap(&other->_internal_metadata_); ancestry_.InternalSwap(&other->ancestry_); - ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap( - &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), - &created_at_, lhs_arena, - &other->created_at_, rhs_arena - ); ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap( &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), &delivered_at_, lhs_arena, @@ -901,12 +696,12 @@ void Task::InternalSwap(Task* other) { ); ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap( &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), - &ttl_, lhs_arena, - &other->ttl_, rhs_arena + &task_type_, lhs_arena, + &other->task_type_, rhs_arena ); ::PROTOBUF_NAMESPACE_ID::internal::memswap< - PROTOBUF_FIELD_OFFSET(Task, legacy_client_message_) - + sizeof(Task::legacy_client_message_) + PROTOBUF_FIELD_OFFSET(Task, ttl_) + + sizeof(Task::ttl_) - PROTOBUF_FIELD_OFFSET(Task, producer_)>( reinterpret_cast(&producer_), reinterpret_cast(&other->producer_)); @@ -956,7 +751,7 @@ TaskIns::TaskIns(const TaskIns& from) } else { task_ = nullptr; } - workload_id_ = from.workload_id_; + run_id_ = from.run_id_; // @@protoc_insertion_point(copy_constructor:flwr.proto.TaskIns) } @@ -965,8 +760,8 @@ task_id_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlre group_id_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); ::memset(reinterpret_cast(this) + static_cast( reinterpret_cast(&task_) - reinterpret_cast(this)), - 0, static_cast(reinterpret_cast(&workload_id_) - - reinterpret_cast(&task_)) + sizeof(workload_id_)); + 0, static_cast(reinterpret_cast(&run_id_) - + reinterpret_cast(&task_)) + sizeof(run_id_)); } TaskIns::~TaskIns() { @@ -1005,7 +800,7 @@ void TaskIns::Clear() { delete task_; } task_ = nullptr; - workload_id_ = int64_t{0}; + run_id_ = int64_t{0}; _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } @@ -1035,10 +830,10 @@ const char* TaskIns::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::in } else goto handle_unusual; continue; - // sint64 workload_id = 3; + // sint64 run_id = 3; case 3: if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 24)) { - workload_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarintZigZag64(&ptr); + run_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarintZigZag64(&ptr); CHK_(ptr); } else goto handle_unusual; @@ -1100,10 +895,10 @@ ::PROTOBUF_NAMESPACE_ID::uint8* TaskIns::_InternalSerialize( 2, this->_internal_group_id(), target); } - // sint64 workload_id = 3; - if (this->_internal_workload_id() != 0) { + // sint64 run_id = 3; + if (this->_internal_run_id() != 0) { target = stream->EnsureSpace(target); - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteSInt64ToArray(3, this->_internal_workload_id(), target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteSInt64ToArray(3, this->_internal_run_id(), target); } // .flwr.proto.Task task = 4; @@ -1151,9 +946,9 @@ size_t TaskIns::ByteSizeLong() const { *task_); } - // sint64 workload_id = 3; - if (this->_internal_workload_id() != 0) { - total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SInt64SizePlusOne(this->_internal_workload_id()); + // sint64 run_id = 3; + if (this->_internal_run_id() != 0) { + total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SInt64SizePlusOne(this->_internal_run_id()); } return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); @@ -1187,8 +982,8 @@ void TaskIns::MergeFrom(const TaskIns& from) { if (from._internal_has_task()) { _internal_mutable_task()->::flwr::proto::Task::MergeFrom(from._internal_task()); } - if (from._internal_workload_id() != 0) { - _internal_set_workload_id(from._internal_workload_id()); + if (from._internal_run_id() != 0) { + _internal_set_run_id(from._internal_run_id()); } _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); } @@ -1220,8 +1015,8 @@ void TaskIns::InternalSwap(TaskIns* other) { &other->group_id_, rhs_arena ); ::PROTOBUF_NAMESPACE_ID::internal::memswap< - PROTOBUF_FIELD_OFFSET(TaskIns, workload_id_) - + sizeof(TaskIns::workload_id_) + PROTOBUF_FIELD_OFFSET(TaskIns, run_id_) + + sizeof(TaskIns::run_id_) - PROTOBUF_FIELD_OFFSET(TaskIns, task_)>( reinterpret_cast(&task_), reinterpret_cast(&other->task_)); @@ -1271,7 +1066,7 @@ TaskRes::TaskRes(const TaskRes& from) } else { task_ = nullptr; } - workload_id_ = from.workload_id_; + run_id_ = from.run_id_; // @@protoc_insertion_point(copy_constructor:flwr.proto.TaskRes) } @@ -1280,8 +1075,8 @@ task_id_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlre group_id_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); ::memset(reinterpret_cast(this) + static_cast( reinterpret_cast(&task_) - reinterpret_cast(this)), - 0, static_cast(reinterpret_cast(&workload_id_) - - reinterpret_cast(&task_)) + sizeof(workload_id_)); + 0, static_cast(reinterpret_cast(&run_id_) - + reinterpret_cast(&task_)) + sizeof(run_id_)); } TaskRes::~TaskRes() { @@ -1320,7 +1115,7 @@ void TaskRes::Clear() { delete task_; } task_ = nullptr; - workload_id_ = int64_t{0}; + run_id_ = int64_t{0}; _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } @@ -1350,10 +1145,10 @@ const char* TaskRes::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::in } else goto handle_unusual; continue; - // sint64 workload_id = 3; + // sint64 run_id = 3; case 3: if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 24)) { - workload_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarintZigZag64(&ptr); + run_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarintZigZag64(&ptr); CHK_(ptr); } else goto handle_unusual; @@ -1415,10 +1210,10 @@ ::PROTOBUF_NAMESPACE_ID::uint8* TaskRes::_InternalSerialize( 2, this->_internal_group_id(), target); } - // sint64 workload_id = 3; - if (this->_internal_workload_id() != 0) { + // sint64 run_id = 3; + if (this->_internal_run_id() != 0) { target = stream->EnsureSpace(target); - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteSInt64ToArray(3, this->_internal_workload_id(), target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteSInt64ToArray(3, this->_internal_run_id(), target); } // .flwr.proto.Task task = 4; @@ -1466,9 +1261,9 @@ size_t TaskRes::ByteSizeLong() const { *task_); } - // sint64 workload_id = 3; - if (this->_internal_workload_id() != 0) { - total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SInt64SizePlusOne(this->_internal_workload_id()); + // sint64 run_id = 3; + if (this->_internal_run_id() != 0) { + total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SInt64SizePlusOne(this->_internal_run_id()); } return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); @@ -1502,8 +1297,8 @@ void TaskRes::MergeFrom(const TaskRes& from) { if (from._internal_has_task()) { _internal_mutable_task()->::flwr::proto::Task::MergeFrom(from._internal_task()); } - if (from._internal_workload_id() != 0) { - _internal_set_workload_id(from._internal_workload_id()); + if (from._internal_run_id() != 0) { + _internal_set_run_id(from._internal_run_id()); } _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); } @@ -1535,8 +1330,8 @@ void TaskRes::InternalSwap(TaskRes* other) { &other->group_id_, rhs_arena ); ::PROTOBUF_NAMESPACE_ID::internal::memswap< - PROTOBUF_FIELD_OFFSET(TaskRes, workload_id_) - + sizeof(TaskRes::workload_id_) + PROTOBUF_FIELD_OFFSET(TaskRes, run_id_) + + sizeof(TaskRes::run_id_) - PROTOBUF_FIELD_OFFSET(TaskRes, task_)>( reinterpret_cast(&task_), reinterpret_cast(&other->task_)); @@ -1548,1856 +1343,19 @@ ::PROTOBUF_NAMESPACE_ID::Metadata TaskRes::GetMetadata() const { file_level_metadata_flwr_2fproto_2ftask_2eproto[2]); } -// =================================================================== - -class Value_DoubleList::_Internal { - public: -}; - -Value_DoubleList::Value_DoubleList(::PROTOBUF_NAMESPACE_ID::Arena* arena, - bool is_message_owned) - : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), - vals_(arena) { - SharedCtor(); - if (!is_message_owned) { - RegisterArenaDtor(arena); - } - // @@protoc_insertion_point(arena_constructor:flwr.proto.Value.DoubleList) -} -Value_DoubleList::Value_DoubleList(const Value_DoubleList& from) - : ::PROTOBUF_NAMESPACE_ID::Message(), - vals_(from.vals_) { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); - // @@protoc_insertion_point(copy_constructor:flwr.proto.Value.DoubleList) -} - -void Value_DoubleList::SharedCtor() { -} - -Value_DoubleList::~Value_DoubleList() { - // @@protoc_insertion_point(destructor:flwr.proto.Value.DoubleList) - if (GetArenaForAllocation() != nullptr) return; - SharedDtor(); - _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); -} - -inline void Value_DoubleList::SharedDtor() { - GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); -} - -void Value_DoubleList::ArenaDtor(void* object) { - Value_DoubleList* _this = reinterpret_cast< Value_DoubleList* >(object); - (void)_this; -} -void Value_DoubleList::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { -} -void Value_DoubleList::SetCachedSize(int size) const { - _cached_size_.Set(size); -} - -void Value_DoubleList::Clear() { -// @@protoc_insertion_point(message_clear_start:flwr.proto.Value.DoubleList) - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - // Prevent compiler warnings about cached_has_bits being unused - (void) cached_has_bits; - - vals_.Clear(); - _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); -} - -const char* Value_DoubleList::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { -#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure - while (!ctx->Done(&ptr)) { - ::PROTOBUF_NAMESPACE_ID::uint32 tag; - ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); - switch (tag >> 3) { - // repeated double vals = 1; - case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { - ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedDoubleParser(_internal_mutable_vals(), ptr, ctx); - CHK_(ptr); - } else if (static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 9) { - _internal_add_vals(::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad(ptr)); - ptr += sizeof(double); - } else - goto handle_unusual; - continue; - default: - goto handle_unusual; - } // switch - handle_unusual: - if ((tag == 0) || ((tag & 7) == 4)) { - CHK_(ptr); - ctx->SetLastTag(tag); - goto message_done; - } - ptr = UnknownFieldParse( - tag, - _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), - ptr, ctx); - CHK_(ptr != nullptr); - } // while -message_done: - return ptr; -failure: - ptr = nullptr; - goto message_done; -#undef CHK_ -} - -::PROTOBUF_NAMESPACE_ID::uint8* Value_DoubleList::_InternalSerialize( - ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { - // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.Value.DoubleList) - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - (void) cached_has_bits; - - // repeated double vals = 1; - if (this->_internal_vals_size() > 0) { - target = stream->WriteFixedPacked(1, _internal_vals(), target); - } - - if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( - _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); - } - // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.Value.DoubleList) - return target; -} - -size_t Value_DoubleList::ByteSizeLong() const { -// @@protoc_insertion_point(message_byte_size_start:flwr.proto.Value.DoubleList) - size_t total_size = 0; - - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - // Prevent compiler warnings about cached_has_bits being unused - (void) cached_has_bits; - - // repeated double vals = 1; - { - unsigned int count = static_cast(this->_internal_vals_size()); - size_t data_size = 8UL * count; - if (data_size > 0) { - total_size += 1 + - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size( - static_cast<::PROTOBUF_NAMESPACE_ID::int32>(data_size)); - } - total_size += data_size; - } - - return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); -} - -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData Value_DoubleList::_class_data_ = { - ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, - Value_DoubleList::MergeImpl -}; -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*Value_DoubleList::GetClassData() const { return &_class_data_; } - -void Value_DoubleList::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, - const ::PROTOBUF_NAMESPACE_ID::Message& from) { - static_cast(to)->MergeFrom( - static_cast(from)); +// @@protoc_insertion_point(namespace_scope) +} // namespace proto +} // namespace flwr +PROTOBUF_NAMESPACE_OPEN +template<> PROTOBUF_NOINLINE ::flwr::proto::Task* Arena::CreateMaybeMessage< ::flwr::proto::Task >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::Task >(arena); } - - -void Value_DoubleList::MergeFrom(const Value_DoubleList& from) { -// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.Value.DoubleList) - GOOGLE_DCHECK_NE(&from, this); - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - (void) cached_has_bits; - - vals_.MergeFrom(from.vals_); - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); -} - -void Value_DoubleList::CopyFrom(const Value_DoubleList& from) { -// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.Value.DoubleList) - if (&from == this) return; - Clear(); - MergeFrom(from); -} - -bool Value_DoubleList::IsInitialized() const { - return true; -} - -void Value_DoubleList::InternalSwap(Value_DoubleList* other) { - using std::swap; - _internal_metadata_.InternalSwap(&other->_internal_metadata_); - vals_.InternalSwap(&other->vals_); -} - -::PROTOBUF_NAMESPACE_ID::Metadata Value_DoubleList::GetMetadata() const { - return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( - &descriptor_table_flwr_2fproto_2ftask_2eproto_getter, &descriptor_table_flwr_2fproto_2ftask_2eproto_once, - file_level_metadata_flwr_2fproto_2ftask_2eproto[3]); -} - -// =================================================================== - -class Value_Sint64List::_Internal { - public: -}; - -Value_Sint64List::Value_Sint64List(::PROTOBUF_NAMESPACE_ID::Arena* arena, - bool is_message_owned) - : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), - vals_(arena) { - SharedCtor(); - if (!is_message_owned) { - RegisterArenaDtor(arena); - } - // @@protoc_insertion_point(arena_constructor:flwr.proto.Value.Sint64List) -} -Value_Sint64List::Value_Sint64List(const Value_Sint64List& from) - : ::PROTOBUF_NAMESPACE_ID::Message(), - vals_(from.vals_) { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); - // @@protoc_insertion_point(copy_constructor:flwr.proto.Value.Sint64List) -} - -void Value_Sint64List::SharedCtor() { -} - -Value_Sint64List::~Value_Sint64List() { - // @@protoc_insertion_point(destructor:flwr.proto.Value.Sint64List) - if (GetArenaForAllocation() != nullptr) return; - SharedDtor(); - _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); -} - -inline void Value_Sint64List::SharedDtor() { - GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); -} - -void Value_Sint64List::ArenaDtor(void* object) { - Value_Sint64List* _this = reinterpret_cast< Value_Sint64List* >(object); - (void)_this; -} -void Value_Sint64List::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { -} -void Value_Sint64List::SetCachedSize(int size) const { - _cached_size_.Set(size); -} - -void Value_Sint64List::Clear() { -// @@protoc_insertion_point(message_clear_start:flwr.proto.Value.Sint64List) - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - // Prevent compiler warnings about cached_has_bits being unused - (void) cached_has_bits; - - vals_.Clear(); - _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); -} - -const char* Value_Sint64List::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { -#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure - while (!ctx->Done(&ptr)) { - ::PROTOBUF_NAMESPACE_ID::uint32 tag; - ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); - switch (tag >> 3) { - // repeated sint64 vals = 1; - case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { - ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedSInt64Parser(_internal_mutable_vals(), ptr, ctx); - CHK_(ptr); - } else if (static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 8) { - _internal_add_vals(::PROTOBUF_NAMESPACE_ID::internal::ReadVarintZigZag64(&ptr)); - CHK_(ptr); - } else - goto handle_unusual; - continue; - default: - goto handle_unusual; - } // switch - handle_unusual: - if ((tag == 0) || ((tag & 7) == 4)) { - CHK_(ptr); - ctx->SetLastTag(tag); - goto message_done; - } - ptr = UnknownFieldParse( - tag, - _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), - ptr, ctx); - CHK_(ptr != nullptr); - } // while -message_done: - return ptr; -failure: - ptr = nullptr; - goto message_done; -#undef CHK_ -} - -::PROTOBUF_NAMESPACE_ID::uint8* Value_Sint64List::_InternalSerialize( - ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { - // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.Value.Sint64List) - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - (void) cached_has_bits; - - // repeated sint64 vals = 1; - { - int byte_size = _vals_cached_byte_size_.load(std::memory_order_relaxed); - if (byte_size > 0) { - target = stream->WriteSInt64Packed( - 1, _internal_vals(), byte_size, target); - } - } - - if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( - _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); - } - // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.Value.Sint64List) - return target; -} - -size_t Value_Sint64List::ByteSizeLong() const { -// @@protoc_insertion_point(message_byte_size_start:flwr.proto.Value.Sint64List) - size_t total_size = 0; - - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - // Prevent compiler warnings about cached_has_bits being unused - (void) cached_has_bits; - - // repeated sint64 vals = 1; - { - size_t data_size = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: - SInt64Size(this->vals_); - if (data_size > 0) { - total_size += 1 + - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size( - static_cast<::PROTOBUF_NAMESPACE_ID::int32>(data_size)); - } - int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(data_size); - _vals_cached_byte_size_.store(cached_size, - std::memory_order_relaxed); - total_size += data_size; - } - - return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); -} - -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData Value_Sint64List::_class_data_ = { - ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, - Value_Sint64List::MergeImpl -}; -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*Value_Sint64List::GetClassData() const { return &_class_data_; } - -void Value_Sint64List::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, - const ::PROTOBUF_NAMESPACE_ID::Message& from) { - static_cast(to)->MergeFrom( - static_cast(from)); -} - - -void Value_Sint64List::MergeFrom(const Value_Sint64List& from) { -// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.Value.Sint64List) - GOOGLE_DCHECK_NE(&from, this); - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - (void) cached_has_bits; - - vals_.MergeFrom(from.vals_); - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); -} - -void Value_Sint64List::CopyFrom(const Value_Sint64List& from) { -// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.Value.Sint64List) - if (&from == this) return; - Clear(); - MergeFrom(from); -} - -bool Value_Sint64List::IsInitialized() const { - return true; -} - -void Value_Sint64List::InternalSwap(Value_Sint64List* other) { - using std::swap; - _internal_metadata_.InternalSwap(&other->_internal_metadata_); - vals_.InternalSwap(&other->vals_); -} - -::PROTOBUF_NAMESPACE_ID::Metadata Value_Sint64List::GetMetadata() const { - return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( - &descriptor_table_flwr_2fproto_2ftask_2eproto_getter, &descriptor_table_flwr_2fproto_2ftask_2eproto_once, - file_level_metadata_flwr_2fproto_2ftask_2eproto[4]); -} - -// =================================================================== - -class Value_BoolList::_Internal { - public: -}; - -Value_BoolList::Value_BoolList(::PROTOBUF_NAMESPACE_ID::Arena* arena, - bool is_message_owned) - : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), - vals_(arena) { - SharedCtor(); - if (!is_message_owned) { - RegisterArenaDtor(arena); - } - // @@protoc_insertion_point(arena_constructor:flwr.proto.Value.BoolList) -} -Value_BoolList::Value_BoolList(const Value_BoolList& from) - : ::PROTOBUF_NAMESPACE_ID::Message(), - vals_(from.vals_) { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); - // @@protoc_insertion_point(copy_constructor:flwr.proto.Value.BoolList) -} - -void Value_BoolList::SharedCtor() { -} - -Value_BoolList::~Value_BoolList() { - // @@protoc_insertion_point(destructor:flwr.proto.Value.BoolList) - if (GetArenaForAllocation() != nullptr) return; - SharedDtor(); - _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); -} - -inline void Value_BoolList::SharedDtor() { - GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); -} - -void Value_BoolList::ArenaDtor(void* object) { - Value_BoolList* _this = reinterpret_cast< Value_BoolList* >(object); - (void)_this; -} -void Value_BoolList::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { -} -void Value_BoolList::SetCachedSize(int size) const { - _cached_size_.Set(size); -} - -void Value_BoolList::Clear() { -// @@protoc_insertion_point(message_clear_start:flwr.proto.Value.BoolList) - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - // Prevent compiler warnings about cached_has_bits being unused - (void) cached_has_bits; - - vals_.Clear(); - _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); -} - -const char* Value_BoolList::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { -#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure - while (!ctx->Done(&ptr)) { - ::PROTOBUF_NAMESPACE_ID::uint32 tag; - ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); - switch (tag >> 3) { - // repeated bool vals = 1; - case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { - ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedBoolParser(_internal_mutable_vals(), ptr, ctx); - CHK_(ptr); - } else if (static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 8) { - _internal_add_vals(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr)); - CHK_(ptr); - } else - goto handle_unusual; - continue; - default: - goto handle_unusual; - } // switch - handle_unusual: - if ((tag == 0) || ((tag & 7) == 4)) { - CHK_(ptr); - ctx->SetLastTag(tag); - goto message_done; - } - ptr = UnknownFieldParse( - tag, - _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), - ptr, ctx); - CHK_(ptr != nullptr); - } // while -message_done: - return ptr; -failure: - ptr = nullptr; - goto message_done; -#undef CHK_ -} - -::PROTOBUF_NAMESPACE_ID::uint8* Value_BoolList::_InternalSerialize( - ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { - // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.Value.BoolList) - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - (void) cached_has_bits; - - // repeated bool vals = 1; - if (this->_internal_vals_size() > 0) { - target = stream->WriteFixedPacked(1, _internal_vals(), target); - } - - if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( - _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); - } - // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.Value.BoolList) - return target; -} - -size_t Value_BoolList::ByteSizeLong() const { -// @@protoc_insertion_point(message_byte_size_start:flwr.proto.Value.BoolList) - size_t total_size = 0; - - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - // Prevent compiler warnings about cached_has_bits being unused - (void) cached_has_bits; - - // repeated bool vals = 1; - { - unsigned int count = static_cast(this->_internal_vals_size()); - size_t data_size = 1UL * count; - if (data_size > 0) { - total_size += 1 + - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size( - static_cast<::PROTOBUF_NAMESPACE_ID::int32>(data_size)); - } - total_size += data_size; - } - - return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); -} - -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData Value_BoolList::_class_data_ = { - ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, - Value_BoolList::MergeImpl -}; -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*Value_BoolList::GetClassData() const { return &_class_data_; } - -void Value_BoolList::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, - const ::PROTOBUF_NAMESPACE_ID::Message& from) { - static_cast(to)->MergeFrom( - static_cast(from)); -} - - -void Value_BoolList::MergeFrom(const Value_BoolList& from) { -// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.Value.BoolList) - GOOGLE_DCHECK_NE(&from, this); - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - (void) cached_has_bits; - - vals_.MergeFrom(from.vals_); - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); -} - -void Value_BoolList::CopyFrom(const Value_BoolList& from) { -// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.Value.BoolList) - if (&from == this) return; - Clear(); - MergeFrom(from); -} - -bool Value_BoolList::IsInitialized() const { - return true; -} - -void Value_BoolList::InternalSwap(Value_BoolList* other) { - using std::swap; - _internal_metadata_.InternalSwap(&other->_internal_metadata_); - vals_.InternalSwap(&other->vals_); -} - -::PROTOBUF_NAMESPACE_ID::Metadata Value_BoolList::GetMetadata() const { - return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( - &descriptor_table_flwr_2fproto_2ftask_2eproto_getter, &descriptor_table_flwr_2fproto_2ftask_2eproto_once, - file_level_metadata_flwr_2fproto_2ftask_2eproto[5]); -} - -// =================================================================== - -class Value_StringList::_Internal { - public: -}; - -Value_StringList::Value_StringList(::PROTOBUF_NAMESPACE_ID::Arena* arena, - bool is_message_owned) - : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), - vals_(arena) { - SharedCtor(); - if (!is_message_owned) { - RegisterArenaDtor(arena); - } - // @@protoc_insertion_point(arena_constructor:flwr.proto.Value.StringList) -} -Value_StringList::Value_StringList(const Value_StringList& from) - : ::PROTOBUF_NAMESPACE_ID::Message(), - vals_(from.vals_) { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); - // @@protoc_insertion_point(copy_constructor:flwr.proto.Value.StringList) -} - -void Value_StringList::SharedCtor() { -} - -Value_StringList::~Value_StringList() { - // @@protoc_insertion_point(destructor:flwr.proto.Value.StringList) - if (GetArenaForAllocation() != nullptr) return; - SharedDtor(); - _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); -} - -inline void Value_StringList::SharedDtor() { - GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); -} - -void Value_StringList::ArenaDtor(void* object) { - Value_StringList* _this = reinterpret_cast< Value_StringList* >(object); - (void)_this; -} -void Value_StringList::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { -} -void Value_StringList::SetCachedSize(int size) const { - _cached_size_.Set(size); -} - -void Value_StringList::Clear() { -// @@protoc_insertion_point(message_clear_start:flwr.proto.Value.StringList) - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - // Prevent compiler warnings about cached_has_bits being unused - (void) cached_has_bits; - - vals_.Clear(); - _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); -} - -const char* Value_StringList::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { -#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure - while (!ctx->Done(&ptr)) { - ::PROTOBUF_NAMESPACE_ID::uint32 tag; - ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); - switch (tag >> 3) { - // repeated string vals = 1; - case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { - ptr -= 1; - do { - ptr += 1; - auto str = _internal_add_vals(); - ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); - CHK_(::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "flwr.proto.Value.StringList.vals")); - CHK_(ptr); - if (!ctx->DataAvailable(ptr)) break; - } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr)); - } else - goto handle_unusual; - continue; - default: - goto handle_unusual; - } // switch - handle_unusual: - if ((tag == 0) || ((tag & 7) == 4)) { - CHK_(ptr); - ctx->SetLastTag(tag); - goto message_done; - } - ptr = UnknownFieldParse( - tag, - _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), - ptr, ctx); - CHK_(ptr != nullptr); - } // while -message_done: - return ptr; -failure: - ptr = nullptr; - goto message_done; -#undef CHK_ -} - -::PROTOBUF_NAMESPACE_ID::uint8* Value_StringList::_InternalSerialize( - ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { - // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.Value.StringList) - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - (void) cached_has_bits; - - // repeated string vals = 1; - for (int i = 0, n = this->_internal_vals_size(); i < n; i++) { - const auto& s = this->_internal_vals(i); - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( - s.data(), static_cast(s.length()), - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, - "flwr.proto.Value.StringList.vals"); - target = stream->WriteString(1, s, target); - } - - if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( - _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); - } - // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.Value.StringList) - return target; -} - -size_t Value_StringList::ByteSizeLong() const { -// @@protoc_insertion_point(message_byte_size_start:flwr.proto.Value.StringList) - size_t total_size = 0; - - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - // Prevent compiler warnings about cached_has_bits being unused - (void) cached_has_bits; - - // repeated string vals = 1; - total_size += 1 * - ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(vals_.size()); - for (int i = 0, n = vals_.size(); i < n; i++) { - total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( - vals_.Get(i)); - } - - return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); -} - -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData Value_StringList::_class_data_ = { - ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, - Value_StringList::MergeImpl -}; -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*Value_StringList::GetClassData() const { return &_class_data_; } - -void Value_StringList::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, - const ::PROTOBUF_NAMESPACE_ID::Message& from) { - static_cast(to)->MergeFrom( - static_cast(from)); -} - - -void Value_StringList::MergeFrom(const Value_StringList& from) { -// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.Value.StringList) - GOOGLE_DCHECK_NE(&from, this); - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - (void) cached_has_bits; - - vals_.MergeFrom(from.vals_); - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); -} - -void Value_StringList::CopyFrom(const Value_StringList& from) { -// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.Value.StringList) - if (&from == this) return; - Clear(); - MergeFrom(from); -} - -bool Value_StringList::IsInitialized() const { - return true; -} - -void Value_StringList::InternalSwap(Value_StringList* other) { - using std::swap; - _internal_metadata_.InternalSwap(&other->_internal_metadata_); - vals_.InternalSwap(&other->vals_); -} - -::PROTOBUF_NAMESPACE_ID::Metadata Value_StringList::GetMetadata() const { - return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( - &descriptor_table_flwr_2fproto_2ftask_2eproto_getter, &descriptor_table_flwr_2fproto_2ftask_2eproto_once, - file_level_metadata_flwr_2fproto_2ftask_2eproto[6]); -} - -// =================================================================== - -class Value_BytesList::_Internal { - public: -}; - -Value_BytesList::Value_BytesList(::PROTOBUF_NAMESPACE_ID::Arena* arena, - bool is_message_owned) - : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), - vals_(arena) { - SharedCtor(); - if (!is_message_owned) { - RegisterArenaDtor(arena); - } - // @@protoc_insertion_point(arena_constructor:flwr.proto.Value.BytesList) -} -Value_BytesList::Value_BytesList(const Value_BytesList& from) - : ::PROTOBUF_NAMESPACE_ID::Message(), - vals_(from.vals_) { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); - // @@protoc_insertion_point(copy_constructor:flwr.proto.Value.BytesList) -} - -void Value_BytesList::SharedCtor() { -} - -Value_BytesList::~Value_BytesList() { - // @@protoc_insertion_point(destructor:flwr.proto.Value.BytesList) - if (GetArenaForAllocation() != nullptr) return; - SharedDtor(); - _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); -} - -inline void Value_BytesList::SharedDtor() { - GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); -} - -void Value_BytesList::ArenaDtor(void* object) { - Value_BytesList* _this = reinterpret_cast< Value_BytesList* >(object); - (void)_this; -} -void Value_BytesList::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { -} -void Value_BytesList::SetCachedSize(int size) const { - _cached_size_.Set(size); -} - -void Value_BytesList::Clear() { -// @@protoc_insertion_point(message_clear_start:flwr.proto.Value.BytesList) - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - // Prevent compiler warnings about cached_has_bits being unused - (void) cached_has_bits; - - vals_.Clear(); - _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); -} - -const char* Value_BytesList::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { -#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure - while (!ctx->Done(&ptr)) { - ::PROTOBUF_NAMESPACE_ID::uint32 tag; - ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); - switch (tag >> 3) { - // repeated bytes vals = 1; - case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { - ptr -= 1; - do { - ptr += 1; - auto str = _internal_add_vals(); - ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); - CHK_(ptr); - if (!ctx->DataAvailable(ptr)) break; - } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr)); - } else - goto handle_unusual; - continue; - default: - goto handle_unusual; - } // switch - handle_unusual: - if ((tag == 0) || ((tag & 7) == 4)) { - CHK_(ptr); - ctx->SetLastTag(tag); - goto message_done; - } - ptr = UnknownFieldParse( - tag, - _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), - ptr, ctx); - CHK_(ptr != nullptr); - } // while -message_done: - return ptr; -failure: - ptr = nullptr; - goto message_done; -#undef CHK_ -} - -::PROTOBUF_NAMESPACE_ID::uint8* Value_BytesList::_InternalSerialize( - ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { - // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.Value.BytesList) - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - (void) cached_has_bits; - - // repeated bytes vals = 1; - for (int i = 0, n = this->_internal_vals_size(); i < n; i++) { - const auto& s = this->_internal_vals(i); - target = stream->WriteBytes(1, s, target); - } - - if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( - _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); - } - // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.Value.BytesList) - return target; -} - -size_t Value_BytesList::ByteSizeLong() const { -// @@protoc_insertion_point(message_byte_size_start:flwr.proto.Value.BytesList) - size_t total_size = 0; - - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - // Prevent compiler warnings about cached_has_bits being unused - (void) cached_has_bits; - - // repeated bytes vals = 1; - total_size += 1 * - ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(vals_.size()); - for (int i = 0, n = vals_.size(); i < n; i++) { - total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::BytesSize( - vals_.Get(i)); - } - - return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); -} - -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData Value_BytesList::_class_data_ = { - ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, - Value_BytesList::MergeImpl -}; -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*Value_BytesList::GetClassData() const { return &_class_data_; } - -void Value_BytesList::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, - const ::PROTOBUF_NAMESPACE_ID::Message& from) { - static_cast(to)->MergeFrom( - static_cast(from)); -} - - -void Value_BytesList::MergeFrom(const Value_BytesList& from) { -// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.Value.BytesList) - GOOGLE_DCHECK_NE(&from, this); - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - (void) cached_has_bits; - - vals_.MergeFrom(from.vals_); - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); -} - -void Value_BytesList::CopyFrom(const Value_BytesList& from) { -// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.Value.BytesList) - if (&from == this) return; - Clear(); - MergeFrom(from); -} - -bool Value_BytesList::IsInitialized() const { - return true; -} - -void Value_BytesList::InternalSwap(Value_BytesList* other) { - using std::swap; - _internal_metadata_.InternalSwap(&other->_internal_metadata_); - vals_.InternalSwap(&other->vals_); -} - -::PROTOBUF_NAMESPACE_ID::Metadata Value_BytesList::GetMetadata() const { - return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( - &descriptor_table_flwr_2fproto_2ftask_2eproto_getter, &descriptor_table_flwr_2fproto_2ftask_2eproto_once, - file_level_metadata_flwr_2fproto_2ftask_2eproto[7]); -} - -// =================================================================== - -class Value::_Internal { - public: - static const ::flwr::proto::Value_DoubleList& double_list(const Value* msg); - static const ::flwr::proto::Value_Sint64List& sint64_list(const Value* msg); - static const ::flwr::proto::Value_BoolList& bool_list(const Value* msg); - static const ::flwr::proto::Value_StringList& string_list(const Value* msg); - static const ::flwr::proto::Value_BytesList& bytes_list(const Value* msg); -}; - -const ::flwr::proto::Value_DoubleList& -Value::_Internal::double_list(const Value* msg) { - return *msg->value_.double_list_; -} -const ::flwr::proto::Value_Sint64List& -Value::_Internal::sint64_list(const Value* msg) { - return *msg->value_.sint64_list_; -} -const ::flwr::proto::Value_BoolList& -Value::_Internal::bool_list(const Value* msg) { - return *msg->value_.bool_list_; -} -const ::flwr::proto::Value_StringList& -Value::_Internal::string_list(const Value* msg) { - return *msg->value_.string_list_; -} -const ::flwr::proto::Value_BytesList& -Value::_Internal::bytes_list(const Value* msg) { - return *msg->value_.bytes_list_; -} -void Value::set_allocated_double_list(::flwr::proto::Value_DoubleList* double_list) { - ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); - clear_value(); - if (double_list) { - ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = - ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::Value_DoubleList>::GetOwningArena(double_list); - if (message_arena != submessage_arena) { - double_list = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( - message_arena, double_list, submessage_arena); - } - set_has_double_list(); - value_.double_list_ = double_list; - } - // @@protoc_insertion_point(field_set_allocated:flwr.proto.Value.double_list) -} -void Value::set_allocated_sint64_list(::flwr::proto::Value_Sint64List* sint64_list) { - ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); - clear_value(); - if (sint64_list) { - ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = - ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::Value_Sint64List>::GetOwningArena(sint64_list); - if (message_arena != submessage_arena) { - sint64_list = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( - message_arena, sint64_list, submessage_arena); - } - set_has_sint64_list(); - value_.sint64_list_ = sint64_list; - } - // @@protoc_insertion_point(field_set_allocated:flwr.proto.Value.sint64_list) -} -void Value::set_allocated_bool_list(::flwr::proto::Value_BoolList* bool_list) { - ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); - clear_value(); - if (bool_list) { - ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = - ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::Value_BoolList>::GetOwningArena(bool_list); - if (message_arena != submessage_arena) { - bool_list = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( - message_arena, bool_list, submessage_arena); - } - set_has_bool_list(); - value_.bool_list_ = bool_list; - } - // @@protoc_insertion_point(field_set_allocated:flwr.proto.Value.bool_list) -} -void Value::set_allocated_string_list(::flwr::proto::Value_StringList* string_list) { - ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); - clear_value(); - if (string_list) { - ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = - ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::Value_StringList>::GetOwningArena(string_list); - if (message_arena != submessage_arena) { - string_list = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( - message_arena, string_list, submessage_arena); - } - set_has_string_list(); - value_.string_list_ = string_list; - } - // @@protoc_insertion_point(field_set_allocated:flwr.proto.Value.string_list) -} -void Value::set_allocated_bytes_list(::flwr::proto::Value_BytesList* bytes_list) { - ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); - clear_value(); - if (bytes_list) { - ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = - ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::Value_BytesList>::GetOwningArena(bytes_list); - if (message_arena != submessage_arena) { - bytes_list = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( - message_arena, bytes_list, submessage_arena); - } - set_has_bytes_list(); - value_.bytes_list_ = bytes_list; - } - // @@protoc_insertion_point(field_set_allocated:flwr.proto.Value.bytes_list) -} -Value::Value(::PROTOBUF_NAMESPACE_ID::Arena* arena, - bool is_message_owned) - : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { - SharedCtor(); - if (!is_message_owned) { - RegisterArenaDtor(arena); - } - // @@protoc_insertion_point(arena_constructor:flwr.proto.Value) -} -Value::Value(const Value& from) - : ::PROTOBUF_NAMESPACE_ID::Message() { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); - clear_has_value(); - switch (from.value_case()) { - case kDouble: { - _internal_set_double_(from._internal_double_()); - break; - } - case kSint64: { - _internal_set_sint64(from._internal_sint64()); - break; - } - case kBool: { - _internal_set_bool_(from._internal_bool_()); - break; - } - case kString: { - _internal_set_string(from._internal_string()); - break; - } - case kBytes: { - _internal_set_bytes(from._internal_bytes()); - break; - } - case kDoubleList: { - _internal_mutable_double_list()->::flwr::proto::Value_DoubleList::MergeFrom(from._internal_double_list()); - break; - } - case kSint64List: { - _internal_mutable_sint64_list()->::flwr::proto::Value_Sint64List::MergeFrom(from._internal_sint64_list()); - break; - } - case kBoolList: { - _internal_mutable_bool_list()->::flwr::proto::Value_BoolList::MergeFrom(from._internal_bool_list()); - break; - } - case kStringList: { - _internal_mutable_string_list()->::flwr::proto::Value_StringList::MergeFrom(from._internal_string_list()); - break; - } - case kBytesList: { - _internal_mutable_bytes_list()->::flwr::proto::Value_BytesList::MergeFrom(from._internal_bytes_list()); - break; - } - case VALUE_NOT_SET: { - break; - } - } - // @@protoc_insertion_point(copy_constructor:flwr.proto.Value) -} - -void Value::SharedCtor() { -clear_has_value(); -} - -Value::~Value() { - // @@protoc_insertion_point(destructor:flwr.proto.Value) - if (GetArenaForAllocation() != nullptr) return; - SharedDtor(); - _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); -} - -inline void Value::SharedDtor() { - GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); - if (has_value()) { - clear_value(); - } -} - -void Value::ArenaDtor(void* object) { - Value* _this = reinterpret_cast< Value* >(object); - (void)_this; -} -void Value::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { -} -void Value::SetCachedSize(int size) const { - _cached_size_.Set(size); -} - -void Value::clear_value() { -// @@protoc_insertion_point(one_of_clear_start:flwr.proto.Value) - switch (value_case()) { - case kDouble: { - // No need to clear - break; - } - case kSint64: { - // No need to clear - break; - } - case kBool: { - // No need to clear - break; - } - case kString: { - value_.string_.Destroy(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); - break; - } - case kBytes: { - value_.bytes_.Destroy(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); - break; - } - case kDoubleList: { - if (GetArenaForAllocation() == nullptr) { - delete value_.double_list_; - } - break; - } - case kSint64List: { - if (GetArenaForAllocation() == nullptr) { - delete value_.sint64_list_; - } - break; - } - case kBoolList: { - if (GetArenaForAllocation() == nullptr) { - delete value_.bool_list_; - } - break; - } - case kStringList: { - if (GetArenaForAllocation() == nullptr) { - delete value_.string_list_; - } - break; - } - case kBytesList: { - if (GetArenaForAllocation() == nullptr) { - delete value_.bytes_list_; - } - break; - } - case VALUE_NOT_SET: { - break; - } - } - _oneof_case_[0] = VALUE_NOT_SET; -} - - -void Value::Clear() { -// @@protoc_insertion_point(message_clear_start:flwr.proto.Value) - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - // Prevent compiler warnings about cached_has_bits being unused - (void) cached_has_bits; - - clear_value(); - _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); -} - -const char* Value::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { -#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure - while (!ctx->Done(&ptr)) { - ::PROTOBUF_NAMESPACE_ID::uint32 tag; - ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); - switch (tag >> 3) { - // double double = 1; - case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 9)) { - _internal_set_double_(::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad(ptr)); - ptr += sizeof(double); - } else - goto handle_unusual; - continue; - // sint64 sint64 = 2; - case 2: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 16)) { - _internal_set_sint64(::PROTOBUF_NAMESPACE_ID::internal::ReadVarintZigZag64(&ptr)); - CHK_(ptr); - } else - goto handle_unusual; - continue; - // bool bool = 3; - case 3: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 24)) { - _internal_set_bool_(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr)); - CHK_(ptr); - } else - goto handle_unusual; - continue; - // string string = 4; - case 4: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 34)) { - auto str = _internal_mutable_string(); - ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); - CHK_(::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "flwr.proto.Value.string")); - CHK_(ptr); - } else - goto handle_unusual; - continue; - // bytes bytes = 5; - case 5: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 42)) { - auto str = _internal_mutable_bytes(); - ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); - CHK_(ptr); - } else - goto handle_unusual; - continue; - // .flwr.proto.Value.DoubleList double_list = 21; - case 21: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 170)) { - ptr = ctx->ParseMessage(_internal_mutable_double_list(), ptr); - CHK_(ptr); - } else - goto handle_unusual; - continue; - // .flwr.proto.Value.Sint64List sint64_list = 22; - case 22: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 178)) { - ptr = ctx->ParseMessage(_internal_mutable_sint64_list(), ptr); - CHK_(ptr); - } else - goto handle_unusual; - continue; - // .flwr.proto.Value.BoolList bool_list = 23; - case 23: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 186)) { - ptr = ctx->ParseMessage(_internal_mutable_bool_list(), ptr); - CHK_(ptr); - } else - goto handle_unusual; - continue; - // .flwr.proto.Value.StringList string_list = 24; - case 24: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 194)) { - ptr = ctx->ParseMessage(_internal_mutable_string_list(), ptr); - CHK_(ptr); - } else - goto handle_unusual; - continue; - // .flwr.proto.Value.BytesList bytes_list = 25; - case 25: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 202)) { - ptr = ctx->ParseMessage(_internal_mutable_bytes_list(), ptr); - CHK_(ptr); - } else - goto handle_unusual; - continue; - default: - goto handle_unusual; - } // switch - handle_unusual: - if ((tag == 0) || ((tag & 7) == 4)) { - CHK_(ptr); - ctx->SetLastTag(tag); - goto message_done; - } - ptr = UnknownFieldParse( - tag, - _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), - ptr, ctx); - CHK_(ptr != nullptr); - } // while -message_done: - return ptr; -failure: - ptr = nullptr; - goto message_done; -#undef CHK_ -} - -::PROTOBUF_NAMESPACE_ID::uint8* Value::_InternalSerialize( - ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { - // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.Value) - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - (void) cached_has_bits; - - // double double = 1; - if (_internal_has_double_()) { - target = stream->EnsureSpace(target); - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteDoubleToArray(1, this->_internal_double_(), target); - } - - // sint64 sint64 = 2; - if (_internal_has_sint64()) { - target = stream->EnsureSpace(target); - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteSInt64ToArray(2, this->_internal_sint64(), target); - } - - // bool bool = 3; - if (_internal_has_bool_()) { - target = stream->EnsureSpace(target); - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(3, this->_internal_bool_(), target); - } - - // string string = 4; - if (_internal_has_string()) { - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( - this->_internal_string().data(), static_cast(this->_internal_string().length()), - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, - "flwr.proto.Value.string"); - target = stream->WriteStringMaybeAliased( - 4, this->_internal_string(), target); - } - - // bytes bytes = 5; - if (_internal_has_bytes()) { - target = stream->WriteBytesMaybeAliased( - 5, this->_internal_bytes(), target); - } - - // .flwr.proto.Value.DoubleList double_list = 21; - if (_internal_has_double_list()) { - target = stream->EnsureSpace(target); - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: - InternalWriteMessage( - 21, _Internal::double_list(this), target, stream); - } - - // .flwr.proto.Value.Sint64List sint64_list = 22; - if (_internal_has_sint64_list()) { - target = stream->EnsureSpace(target); - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: - InternalWriteMessage( - 22, _Internal::sint64_list(this), target, stream); - } - - // .flwr.proto.Value.BoolList bool_list = 23; - if (_internal_has_bool_list()) { - target = stream->EnsureSpace(target); - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: - InternalWriteMessage( - 23, _Internal::bool_list(this), target, stream); - } - - // .flwr.proto.Value.StringList string_list = 24; - if (_internal_has_string_list()) { - target = stream->EnsureSpace(target); - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: - InternalWriteMessage( - 24, _Internal::string_list(this), target, stream); - } - - // .flwr.proto.Value.BytesList bytes_list = 25; - if (_internal_has_bytes_list()) { - target = stream->EnsureSpace(target); - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: - InternalWriteMessage( - 25, _Internal::bytes_list(this), target, stream); - } - - if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( - _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); - } - // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.Value) - return target; -} - -size_t Value::ByteSizeLong() const { -// @@protoc_insertion_point(message_byte_size_start:flwr.proto.Value) - size_t total_size = 0; - - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - // Prevent compiler warnings about cached_has_bits being unused - (void) cached_has_bits; - - switch (value_case()) { - // double double = 1; - case kDouble: { - total_size += 1 + 8; - break; - } - // sint64 sint64 = 2; - case kSint64: { - total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SInt64SizePlusOne(this->_internal_sint64()); - break; - } - // bool bool = 3; - case kBool: { - total_size += 1 + 1; - break; - } - // string string = 4; - case kString: { - total_size += 1 + - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( - this->_internal_string()); - break; - } - // bytes bytes = 5; - case kBytes: { - total_size += 1 + - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::BytesSize( - this->_internal_bytes()); - break; - } - // .flwr.proto.Value.DoubleList double_list = 21; - case kDoubleList: { - total_size += 2 + - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( - *value_.double_list_); - break; - } - // .flwr.proto.Value.Sint64List sint64_list = 22; - case kSint64List: { - total_size += 2 + - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( - *value_.sint64_list_); - break; - } - // .flwr.proto.Value.BoolList bool_list = 23; - case kBoolList: { - total_size += 2 + - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( - *value_.bool_list_); - break; - } - // .flwr.proto.Value.StringList string_list = 24; - case kStringList: { - total_size += 2 + - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( - *value_.string_list_); - break; - } - // .flwr.proto.Value.BytesList bytes_list = 25; - case kBytesList: { - total_size += 2 + - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( - *value_.bytes_list_); - break; - } - case VALUE_NOT_SET: { - break; - } - } - return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); -} - -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData Value::_class_data_ = { - ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, - Value::MergeImpl -}; -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*Value::GetClassData() const { return &_class_data_; } - -void Value::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, - const ::PROTOBUF_NAMESPACE_ID::Message& from) { - static_cast(to)->MergeFrom( - static_cast(from)); -} - - -void Value::MergeFrom(const Value& from) { -// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.Value) - GOOGLE_DCHECK_NE(&from, this); - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - (void) cached_has_bits; - - switch (from.value_case()) { - case kDouble: { - _internal_set_double_(from._internal_double_()); - break; - } - case kSint64: { - _internal_set_sint64(from._internal_sint64()); - break; - } - case kBool: { - _internal_set_bool_(from._internal_bool_()); - break; - } - case kString: { - _internal_set_string(from._internal_string()); - break; - } - case kBytes: { - _internal_set_bytes(from._internal_bytes()); - break; - } - case kDoubleList: { - _internal_mutable_double_list()->::flwr::proto::Value_DoubleList::MergeFrom(from._internal_double_list()); - break; - } - case kSint64List: { - _internal_mutable_sint64_list()->::flwr::proto::Value_Sint64List::MergeFrom(from._internal_sint64_list()); - break; - } - case kBoolList: { - _internal_mutable_bool_list()->::flwr::proto::Value_BoolList::MergeFrom(from._internal_bool_list()); - break; - } - case kStringList: { - _internal_mutable_string_list()->::flwr::proto::Value_StringList::MergeFrom(from._internal_string_list()); - break; - } - case kBytesList: { - _internal_mutable_bytes_list()->::flwr::proto::Value_BytesList::MergeFrom(from._internal_bytes_list()); - break; - } - case VALUE_NOT_SET: { - break; - } - } - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); -} - -void Value::CopyFrom(const Value& from) { -// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.Value) - if (&from == this) return; - Clear(); - MergeFrom(from); -} - -bool Value::IsInitialized() const { - return true; -} - -void Value::InternalSwap(Value* other) { - using std::swap; - _internal_metadata_.InternalSwap(&other->_internal_metadata_); - swap(value_, other->value_); - swap(_oneof_case_[0], other->_oneof_case_[0]); -} - -::PROTOBUF_NAMESPACE_ID::Metadata Value::GetMetadata() const { - return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( - &descriptor_table_flwr_2fproto_2ftask_2eproto_getter, &descriptor_table_flwr_2fproto_2ftask_2eproto_once, - file_level_metadata_flwr_2fproto_2ftask_2eproto[8]); -} - -// =================================================================== - -SecureAggregation_NamedValuesEntry_DoNotUse::SecureAggregation_NamedValuesEntry_DoNotUse() {} -SecureAggregation_NamedValuesEntry_DoNotUse::SecureAggregation_NamedValuesEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena) - : SuperType(arena) {} -void SecureAggregation_NamedValuesEntry_DoNotUse::MergeFrom(const SecureAggregation_NamedValuesEntry_DoNotUse& other) { - MergeFromInternal(other); -} -::PROTOBUF_NAMESPACE_ID::Metadata SecureAggregation_NamedValuesEntry_DoNotUse::GetMetadata() const { - return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( - &descriptor_table_flwr_2fproto_2ftask_2eproto_getter, &descriptor_table_flwr_2fproto_2ftask_2eproto_once, - file_level_metadata_flwr_2fproto_2ftask_2eproto[9]); -} - -// =================================================================== - -class SecureAggregation::_Internal { - public: -}; - -SecureAggregation::SecureAggregation(::PROTOBUF_NAMESPACE_ID::Arena* arena, - bool is_message_owned) - : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), - named_values_(arena) { - SharedCtor(); - if (!is_message_owned) { - RegisterArenaDtor(arena); - } - // @@protoc_insertion_point(arena_constructor:flwr.proto.SecureAggregation) -} -SecureAggregation::SecureAggregation(const SecureAggregation& from) - : ::PROTOBUF_NAMESPACE_ID::Message() { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); - named_values_.MergeFrom(from.named_values_); - // @@protoc_insertion_point(copy_constructor:flwr.proto.SecureAggregation) -} - -void SecureAggregation::SharedCtor() { -} - -SecureAggregation::~SecureAggregation() { - // @@protoc_insertion_point(destructor:flwr.proto.SecureAggregation) - if (GetArenaForAllocation() != nullptr) return; - SharedDtor(); - _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); -} - -inline void SecureAggregation::SharedDtor() { - GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); -} - -void SecureAggregation::ArenaDtor(void* object) { - SecureAggregation* _this = reinterpret_cast< SecureAggregation* >(object); - (void)_this; - _this->named_values_. ~MapField(); -} -inline void SecureAggregation::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena) { - if (arena != nullptr) { - arena->OwnCustomDestructor(this, &SecureAggregation::ArenaDtor); - } -} -void SecureAggregation::SetCachedSize(int size) const { - _cached_size_.Set(size); -} - -void SecureAggregation::Clear() { -// @@protoc_insertion_point(message_clear_start:flwr.proto.SecureAggregation) - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - // Prevent compiler warnings about cached_has_bits being unused - (void) cached_has_bits; - - named_values_.Clear(); - _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); -} - -const char* SecureAggregation::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { -#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure - while (!ctx->Done(&ptr)) { - ::PROTOBUF_NAMESPACE_ID::uint32 tag; - ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); - switch (tag >> 3) { - // map named_values = 1; - case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { - ptr -= 1; - do { - ptr += 1; - ptr = ctx->ParseMessage(&named_values_, ptr); - CHK_(ptr); - if (!ctx->DataAvailable(ptr)) break; - } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr)); - } else - goto handle_unusual; - continue; - default: - goto handle_unusual; - } // switch - handle_unusual: - if ((tag == 0) || ((tag & 7) == 4)) { - CHK_(ptr); - ctx->SetLastTag(tag); - goto message_done; - } - ptr = UnknownFieldParse( - tag, - _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), - ptr, ctx); - CHK_(ptr != nullptr); - } // while -message_done: - return ptr; -failure: - ptr = nullptr; - goto message_done; -#undef CHK_ -} - -::PROTOBUF_NAMESPACE_ID::uint8* SecureAggregation::_InternalSerialize( - ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { - // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.SecureAggregation) - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - (void) cached_has_bits; - - // map named_values = 1; - if (!this->_internal_named_values().empty()) { - typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::Value >::const_pointer - ConstPtr; - typedef ConstPtr SortItem; - typedef ::PROTOBUF_NAMESPACE_ID::internal::CompareByDerefFirst Less; - struct Utf8Check { - static void Check(ConstPtr p) { - (void)p; - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( - p->first.data(), static_cast(p->first.length()), - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, - "flwr.proto.SecureAggregation.NamedValuesEntry.key"); - } - }; - - if (stream->IsSerializationDeterministic() && - this->_internal_named_values().size() > 1) { - ::std::unique_ptr items( - new SortItem[this->_internal_named_values().size()]); - typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::Value >::size_type size_type; - size_type n = 0; - for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::Value >::const_iterator - it = this->_internal_named_values().begin(); - it != this->_internal_named_values().end(); ++it, ++n) { - items[static_cast(n)] = SortItem(&*it); - } - ::std::sort(&items[0], &items[static_cast(n)], Less()); - for (size_type i = 0; i < n; i++) { - target = SecureAggregation_NamedValuesEntry_DoNotUse::Funcs::InternalSerialize(1, items[static_cast(i)]->first, items[static_cast(i)]->second, target, stream); - Utf8Check::Check(&(*items[static_cast(i)])); - } - } else { - for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::Value >::const_iterator - it = this->_internal_named_values().begin(); - it != this->_internal_named_values().end(); ++it) { - target = SecureAggregation_NamedValuesEntry_DoNotUse::Funcs::InternalSerialize(1, it->first, it->second, target, stream); - Utf8Check::Check(&(*it)); - } - } - } - - if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( - _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); - } - // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.SecureAggregation) - return target; -} - -size_t SecureAggregation::ByteSizeLong() const { -// @@protoc_insertion_point(message_byte_size_start:flwr.proto.SecureAggregation) - size_t total_size = 0; - - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - // Prevent compiler warnings about cached_has_bits being unused - (void) cached_has_bits; - - // map named_values = 1; - total_size += 1 * - ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->_internal_named_values_size()); - for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::Value >::const_iterator - it = this->_internal_named_values().begin(); - it != this->_internal_named_values().end(); ++it) { - total_size += SecureAggregation_NamedValuesEntry_DoNotUse::Funcs::ByteSizeLong(it->first, it->second); - } - - return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); -} - -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData SecureAggregation::_class_data_ = { - ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, - SecureAggregation::MergeImpl -}; -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*SecureAggregation::GetClassData() const { return &_class_data_; } - -void SecureAggregation::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, - const ::PROTOBUF_NAMESPACE_ID::Message& from) { - static_cast(to)->MergeFrom( - static_cast(from)); -} - - -void SecureAggregation::MergeFrom(const SecureAggregation& from) { -// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.SecureAggregation) - GOOGLE_DCHECK_NE(&from, this); - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - (void) cached_has_bits; - - named_values_.MergeFrom(from.named_values_); - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); -} - -void SecureAggregation::CopyFrom(const SecureAggregation& from) { -// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.SecureAggregation) - if (&from == this) return; - Clear(); - MergeFrom(from); -} - -bool SecureAggregation::IsInitialized() const { - return true; -} - -void SecureAggregation::InternalSwap(SecureAggregation* other) { - using std::swap; - _internal_metadata_.InternalSwap(&other->_internal_metadata_); - named_values_.InternalSwap(&other->named_values_); -} - -::PROTOBUF_NAMESPACE_ID::Metadata SecureAggregation::GetMetadata() const { - return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( - &descriptor_table_flwr_2fproto_2ftask_2eproto_getter, &descriptor_table_flwr_2fproto_2ftask_2eproto_once, - file_level_metadata_flwr_2fproto_2ftask_2eproto[10]); -} - -// @@protoc_insertion_point(namespace_scope) -} // namespace proto -} // namespace flwr -PROTOBUF_NAMESPACE_OPEN -template<> PROTOBUF_NOINLINE ::flwr::proto::Task* Arena::CreateMaybeMessage< ::flwr::proto::Task >(Arena* arena) { - return Arena::CreateMessageInternal< ::flwr::proto::Task >(arena); -} -template<> PROTOBUF_NOINLINE ::flwr::proto::TaskIns* Arena::CreateMaybeMessage< ::flwr::proto::TaskIns >(Arena* arena) { - return Arena::CreateMessageInternal< ::flwr::proto::TaskIns >(arena); +template<> PROTOBUF_NOINLINE ::flwr::proto::TaskIns* Arena::CreateMaybeMessage< ::flwr::proto::TaskIns >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::TaskIns >(arena); } template<> PROTOBUF_NOINLINE ::flwr::proto::TaskRes* Arena::CreateMaybeMessage< ::flwr::proto::TaskRes >(Arena* arena) { return Arena::CreateMessageInternal< ::flwr::proto::TaskRes >(arena); } -template<> PROTOBUF_NOINLINE ::flwr::proto::Value_DoubleList* Arena::CreateMaybeMessage< ::flwr::proto::Value_DoubleList >(Arena* arena) { - return Arena::CreateMessageInternal< ::flwr::proto::Value_DoubleList >(arena); -} -template<> PROTOBUF_NOINLINE ::flwr::proto::Value_Sint64List* Arena::CreateMaybeMessage< ::flwr::proto::Value_Sint64List >(Arena* arena) { - return Arena::CreateMessageInternal< ::flwr::proto::Value_Sint64List >(arena); -} -template<> PROTOBUF_NOINLINE ::flwr::proto::Value_BoolList* Arena::CreateMaybeMessage< ::flwr::proto::Value_BoolList >(Arena* arena) { - return Arena::CreateMessageInternal< ::flwr::proto::Value_BoolList >(arena); -} -template<> PROTOBUF_NOINLINE ::flwr::proto::Value_StringList* Arena::CreateMaybeMessage< ::flwr::proto::Value_StringList >(Arena* arena) { - return Arena::CreateMessageInternal< ::flwr::proto::Value_StringList >(arena); -} -template<> PROTOBUF_NOINLINE ::flwr::proto::Value_BytesList* Arena::CreateMaybeMessage< ::flwr::proto::Value_BytesList >(Arena* arena) { - return Arena::CreateMessageInternal< ::flwr::proto::Value_BytesList >(arena); -} -template<> PROTOBUF_NOINLINE ::flwr::proto::Value* Arena::CreateMaybeMessage< ::flwr::proto::Value >(Arena* arena) { - return Arena::CreateMessageInternal< ::flwr::proto::Value >(arena); -} -template<> PROTOBUF_NOINLINE ::flwr::proto::SecureAggregation_NamedValuesEntry_DoNotUse* Arena::CreateMaybeMessage< ::flwr::proto::SecureAggregation_NamedValuesEntry_DoNotUse >(Arena* arena) { - return Arena::CreateMessageInternal< ::flwr::proto::SecureAggregation_NamedValuesEntry_DoNotUse >(arena); -} -template<> PROTOBUF_NOINLINE ::flwr::proto::SecureAggregation* Arena::CreateMaybeMessage< ::flwr::proto::SecureAggregation >(Arena* arena) { - return Arena::CreateMessageInternal< ::flwr::proto::SecureAggregation >(arena); -} PROTOBUF_NAMESPACE_CLOSE // @@protoc_insertion_point(global_scope) diff --git a/src/cc/flwr/include/flwr/proto/task.pb.h b/src/cc/flwr/include/flwr/proto/task.pb.h index 0c2c94c64938..3dc421e2f8ab 100644 --- a/src/cc/flwr/include/flwr/proto/task.pb.h +++ b/src/cc/flwr/include/flwr/proto/task.pb.h @@ -30,12 +30,11 @@ #include #include // IWYU pragma: export #include // IWYU pragma: export -#include // IWYU pragma: export -#include -#include #include #include "flwr/proto/node.pb.h" +#include "flwr/proto/recordset.pb.h" #include "flwr/proto/transport.pb.h" +#include "flwr/proto/error.pb.h" // @@protoc_insertion_point(includes) #include #define PROTOBUF_INTERNAL_EXPORT_flwr_2fproto_2ftask_2eproto @@ -51,7 +50,7 @@ struct TableStruct_flwr_2fproto_2ftask_2eproto { PROTOBUF_SECTION_VARIABLE(protodesc_cold); static const ::PROTOBUF_NAMESPACE_ID::internal::AuxiliaryParseTableField aux[] PROTOBUF_SECTION_VARIABLE(protodesc_cold); - static const ::PROTOBUF_NAMESPACE_ID::internal::ParseTable schema[11] + static const ::PROTOBUF_NAMESPACE_ID::internal::ParseTable schema[3] PROTOBUF_SECTION_VARIABLE(protodesc_cold); static const ::PROTOBUF_NAMESPACE_ID::internal::FieldMetadata field_metadata[]; static const ::PROTOBUF_NAMESPACE_ID::internal::SerializationTable serialization_table[]; @@ -60,12 +59,6 @@ struct TableStruct_flwr_2fproto_2ftask_2eproto { extern const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_flwr_2fproto_2ftask_2eproto; namespace flwr { namespace proto { -class SecureAggregation; -struct SecureAggregationDefaultTypeInternal; -extern SecureAggregationDefaultTypeInternal _SecureAggregation_default_instance_; -class SecureAggregation_NamedValuesEntry_DoNotUse; -struct SecureAggregation_NamedValuesEntry_DoNotUseDefaultTypeInternal; -extern SecureAggregation_NamedValuesEntry_DoNotUseDefaultTypeInternal _SecureAggregation_NamedValuesEntry_DoNotUse_default_instance_; class Task; struct TaskDefaultTypeInternal; extern TaskDefaultTypeInternal _Task_default_instance_; @@ -75,38 +68,12 @@ extern TaskInsDefaultTypeInternal _TaskIns_default_instance_; class TaskRes; struct TaskResDefaultTypeInternal; extern TaskResDefaultTypeInternal _TaskRes_default_instance_; -class Value; -struct ValueDefaultTypeInternal; -extern ValueDefaultTypeInternal _Value_default_instance_; -class Value_BoolList; -struct Value_BoolListDefaultTypeInternal; -extern Value_BoolListDefaultTypeInternal _Value_BoolList_default_instance_; -class Value_BytesList; -struct Value_BytesListDefaultTypeInternal; -extern Value_BytesListDefaultTypeInternal _Value_BytesList_default_instance_; -class Value_DoubleList; -struct Value_DoubleListDefaultTypeInternal; -extern Value_DoubleListDefaultTypeInternal _Value_DoubleList_default_instance_; -class Value_Sint64List; -struct Value_Sint64ListDefaultTypeInternal; -extern Value_Sint64ListDefaultTypeInternal _Value_Sint64List_default_instance_; -class Value_StringList; -struct Value_StringListDefaultTypeInternal; -extern Value_StringListDefaultTypeInternal _Value_StringList_default_instance_; } // namespace proto } // namespace flwr PROTOBUF_NAMESPACE_OPEN -template<> ::flwr::proto::SecureAggregation* Arena::CreateMaybeMessage<::flwr::proto::SecureAggregation>(Arena*); -template<> ::flwr::proto::SecureAggregation_NamedValuesEntry_DoNotUse* Arena::CreateMaybeMessage<::flwr::proto::SecureAggregation_NamedValuesEntry_DoNotUse>(Arena*); template<> ::flwr::proto::Task* Arena::CreateMaybeMessage<::flwr::proto::Task>(Arena*); template<> ::flwr::proto::TaskIns* Arena::CreateMaybeMessage<::flwr::proto::TaskIns>(Arena*); template<> ::flwr::proto::TaskRes* Arena::CreateMaybeMessage<::flwr::proto::TaskRes>(Arena*); -template<> ::flwr::proto::Value* Arena::CreateMaybeMessage<::flwr::proto::Value>(Arena*); -template<> ::flwr::proto::Value_BoolList* Arena::CreateMaybeMessage<::flwr::proto::Value_BoolList>(Arena*); -template<> ::flwr::proto::Value_BytesList* Arena::CreateMaybeMessage<::flwr::proto::Value_BytesList>(Arena*); -template<> ::flwr::proto::Value_DoubleList* Arena::CreateMaybeMessage<::flwr::proto::Value_DoubleList>(Arena*); -template<> ::flwr::proto::Value_Sint64List* Arena::CreateMaybeMessage<::flwr::proto::Value_Sint64List>(Arena*); -template<> ::flwr::proto::Value_StringList* Arena::CreateMaybeMessage<::flwr::proto::Value_StringList>(Arena*); PROTOBUF_NAMESPACE_CLOSE namespace flwr { namespace proto { @@ -232,17 +199,18 @@ class Task final : // accessors ------------------------------------------------------- enum : int { - kAncestryFieldNumber = 6, - kCreatedAtFieldNumber = 3, + kAncestryFieldNumber = 7, kDeliveredAtFieldNumber = 4, - kTtlFieldNumber = 5, + kTaskTypeFieldNumber = 8, kProducerFieldNumber = 1, kConsumerFieldNumber = 2, - kSaFieldNumber = 7, - kLegacyServerMessageFieldNumber = 101, - kLegacyClientMessageFieldNumber = 102, + kRecordsetFieldNumber = 9, + kErrorFieldNumber = 10, + kCreatedAtFieldNumber = 3, + kPushedAtFieldNumber = 5, + kTtlFieldNumber = 6, }; - // repeated string ancestry = 6; + // repeated string ancestry = 7; int ancestry_size() const; private: int _internal_ancestry_size() const; @@ -266,20 +234,6 @@ class Task final : std::string* _internal_add_ancestry(); public: - // string created_at = 3; - void clear_created_at(); - const std::string& created_at() const; - template - void set_created_at(ArgT0&& arg0, ArgT... args); - std::string* mutable_created_at(); - PROTOBUF_MUST_USE_RESULT std::string* release_created_at(); - void set_allocated_created_at(std::string* created_at); - private: - const std::string& _internal_created_at() const; - inline PROTOBUF_ALWAYS_INLINE void _internal_set_created_at(const std::string& value); - std::string* _internal_mutable_created_at(); - public: - // string delivered_at = 4; void clear_delivered_at(); const std::string& delivered_at() const; @@ -294,18 +248,18 @@ class Task final : std::string* _internal_mutable_delivered_at(); public: - // string ttl = 5; - void clear_ttl(); - const std::string& ttl() const; + // string task_type = 8; + void clear_task_type(); + const std::string& task_type() const; template - void set_ttl(ArgT0&& arg0, ArgT... args); - std::string* mutable_ttl(); - PROTOBUF_MUST_USE_RESULT std::string* release_ttl(); - void set_allocated_ttl(std::string* ttl); + void set_task_type(ArgT0&& arg0, ArgT... args); + std::string* mutable_task_type(); + PROTOBUF_MUST_USE_RESULT std::string* release_task_type(); + void set_allocated_task_type(std::string* task_type); private: - const std::string& _internal_ttl() const; - inline PROTOBUF_ALWAYS_INLINE void _internal_set_ttl(const std::string& value); - std::string* _internal_mutable_ttl(); + const std::string& _internal_task_type() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_task_type(const std::string& value); + std::string* _internal_mutable_task_type(); public: // .flwr.proto.Node producer = 1; @@ -344,59 +298,68 @@ class Task final : ::flwr::proto::Node* consumer); ::flwr::proto::Node* unsafe_arena_release_consumer(); - // .flwr.proto.SecureAggregation sa = 7; - bool has_sa() const; + // .flwr.proto.RecordSet recordset = 9; + bool has_recordset() const; private: - bool _internal_has_sa() const; + bool _internal_has_recordset() const; public: - void clear_sa(); - const ::flwr::proto::SecureAggregation& sa() const; - PROTOBUF_MUST_USE_RESULT ::flwr::proto::SecureAggregation* release_sa(); - ::flwr::proto::SecureAggregation* mutable_sa(); - void set_allocated_sa(::flwr::proto::SecureAggregation* sa); + void clear_recordset(); + const ::flwr::proto::RecordSet& recordset() const; + PROTOBUF_MUST_USE_RESULT ::flwr::proto::RecordSet* release_recordset(); + ::flwr::proto::RecordSet* mutable_recordset(); + void set_allocated_recordset(::flwr::proto::RecordSet* recordset); private: - const ::flwr::proto::SecureAggregation& _internal_sa() const; - ::flwr::proto::SecureAggregation* _internal_mutable_sa(); + const ::flwr::proto::RecordSet& _internal_recordset() const; + ::flwr::proto::RecordSet* _internal_mutable_recordset(); public: - void unsafe_arena_set_allocated_sa( - ::flwr::proto::SecureAggregation* sa); - ::flwr::proto::SecureAggregation* unsafe_arena_release_sa(); + void unsafe_arena_set_allocated_recordset( + ::flwr::proto::RecordSet* recordset); + ::flwr::proto::RecordSet* unsafe_arena_release_recordset(); - // .flwr.proto.ServerMessage legacy_server_message = 101 [deprecated = true]; - PROTOBUF_DEPRECATED bool has_legacy_server_message() const; + // .flwr.proto.Error error = 10; + bool has_error() const; private: - bool _internal_has_legacy_server_message() const; + bool _internal_has_error() const; public: - PROTOBUF_DEPRECATED void clear_legacy_server_message(); - PROTOBUF_DEPRECATED const ::flwr::proto::ServerMessage& legacy_server_message() const; - PROTOBUF_MUST_USE_RESULT PROTOBUF_DEPRECATED ::flwr::proto::ServerMessage* release_legacy_server_message(); - PROTOBUF_DEPRECATED ::flwr::proto::ServerMessage* mutable_legacy_server_message(); - PROTOBUF_DEPRECATED void set_allocated_legacy_server_message(::flwr::proto::ServerMessage* legacy_server_message); + void clear_error(); + const ::flwr::proto::Error& error() const; + PROTOBUF_MUST_USE_RESULT ::flwr::proto::Error* release_error(); + ::flwr::proto::Error* mutable_error(); + void set_allocated_error(::flwr::proto::Error* error); private: - const ::flwr::proto::ServerMessage& _internal_legacy_server_message() const; - ::flwr::proto::ServerMessage* _internal_mutable_legacy_server_message(); + const ::flwr::proto::Error& _internal_error() const; + ::flwr::proto::Error* _internal_mutable_error(); public: - PROTOBUF_DEPRECATED void unsafe_arena_set_allocated_legacy_server_message( - ::flwr::proto::ServerMessage* legacy_server_message); - PROTOBUF_DEPRECATED ::flwr::proto::ServerMessage* unsafe_arena_release_legacy_server_message(); + void unsafe_arena_set_allocated_error( + ::flwr::proto::Error* error); + ::flwr::proto::Error* unsafe_arena_release_error(); - // .flwr.proto.ClientMessage legacy_client_message = 102 [deprecated = true]; - PROTOBUF_DEPRECATED bool has_legacy_client_message() const; + // double created_at = 3; + void clear_created_at(); + double created_at() const; + void set_created_at(double value); + private: + double _internal_created_at() const; + void _internal_set_created_at(double value); + public: + + // double pushed_at = 5; + void clear_pushed_at(); + double pushed_at() const; + void set_pushed_at(double value); private: - bool _internal_has_legacy_client_message() const; + double _internal_pushed_at() const; + void _internal_set_pushed_at(double value); public: - PROTOBUF_DEPRECATED void clear_legacy_client_message(); - PROTOBUF_DEPRECATED const ::flwr::proto::ClientMessage& legacy_client_message() const; - PROTOBUF_MUST_USE_RESULT PROTOBUF_DEPRECATED ::flwr::proto::ClientMessage* release_legacy_client_message(); - PROTOBUF_DEPRECATED ::flwr::proto::ClientMessage* mutable_legacy_client_message(); - PROTOBUF_DEPRECATED void set_allocated_legacy_client_message(::flwr::proto::ClientMessage* legacy_client_message); + + // double ttl = 6; + void clear_ttl(); + double ttl() const; + void set_ttl(double value); private: - const ::flwr::proto::ClientMessage& _internal_legacy_client_message() const; - ::flwr::proto::ClientMessage* _internal_mutable_legacy_client_message(); + double _internal_ttl() const; + void _internal_set_ttl(double value); public: - PROTOBUF_DEPRECATED void unsafe_arena_set_allocated_legacy_client_message( - ::flwr::proto::ClientMessage* legacy_client_message); - PROTOBUF_DEPRECATED ::flwr::proto::ClientMessage* unsafe_arena_release_legacy_client_message(); // @@protoc_insertion_point(class_scope:flwr.proto.Task) private: @@ -406,14 +369,15 @@ class Task final : typedef void InternalArenaConstructable_; typedef void DestructorSkippable_; ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField ancestry_; - ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr created_at_; ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr delivered_at_; - ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr ttl_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr task_type_; ::flwr::proto::Node* producer_; ::flwr::proto::Node* consumer_; - ::flwr::proto::SecureAggregation* sa_; - ::flwr::proto::ServerMessage* legacy_server_message_; - ::flwr::proto::ClientMessage* legacy_client_message_; + ::flwr::proto::RecordSet* recordset_; + ::flwr::proto::Error* error_; + double created_at_; + double pushed_at_; + double ttl_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; friend struct ::TableStruct_flwr_2fproto_2ftask_2eproto; }; @@ -541,7 +505,7 @@ class TaskIns final : kTaskIdFieldNumber = 1, kGroupIdFieldNumber = 2, kTaskFieldNumber = 4, - kWorkloadIdFieldNumber = 3, + kRunIdFieldNumber = 3, }; // string task_id = 1; void clear_task_id(); @@ -589,13 +553,13 @@ class TaskIns final : ::flwr::proto::Task* task); ::flwr::proto::Task* unsafe_arena_release_task(); - // sint64 workload_id = 3; - void clear_workload_id(); - ::PROTOBUF_NAMESPACE_ID::int64 workload_id() const; - void set_workload_id(::PROTOBUF_NAMESPACE_ID::int64 value); + // sint64 run_id = 3; + void clear_run_id(); + ::PROTOBUF_NAMESPACE_ID::int64 run_id() const; + void set_run_id(::PROTOBUF_NAMESPACE_ID::int64 value); private: - ::PROTOBUF_NAMESPACE_ID::int64 _internal_workload_id() const; - void _internal_set_workload_id(::PROTOBUF_NAMESPACE_ID::int64 value); + ::PROTOBUF_NAMESPACE_ID::int64 _internal_run_id() const; + void _internal_set_run_id(::PROTOBUF_NAMESPACE_ID::int64 value); public: // @@protoc_insertion_point(class_scope:flwr.proto.TaskIns) @@ -608,7 +572,7 @@ class TaskIns final : ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr task_id_; ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr group_id_; ::flwr::proto::Task* task_; - ::PROTOBUF_NAMESPACE_ID::int64 workload_id_; + ::PROTOBUF_NAMESPACE_ID::int64 run_id_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; friend struct ::TableStruct_flwr_2fproto_2ftask_2eproto; }; @@ -736,7 +700,7 @@ class TaskRes final : kTaskIdFieldNumber = 1, kGroupIdFieldNumber = 2, kTaskFieldNumber = 4, - kWorkloadIdFieldNumber = 3, + kRunIdFieldNumber = 3, }; // string task_id = 1; void clear_task_id(); @@ -784,13 +748,13 @@ class TaskRes final : ::flwr::proto::Task* task); ::flwr::proto::Task* unsafe_arena_release_task(); - // sint64 workload_id = 3; - void clear_workload_id(); - ::PROTOBUF_NAMESPACE_ID::int64 workload_id() const; - void set_workload_id(::PROTOBUF_NAMESPACE_ID::int64 value); + // sint64 run_id = 3; + void clear_run_id(); + ::PROTOBUF_NAMESPACE_ID::int64 run_id() const; + void set_run_id(::PROTOBUF_NAMESPACE_ID::int64 value); private: - ::PROTOBUF_NAMESPACE_ID::int64 _internal_workload_id() const; - void _internal_set_workload_id(::PROTOBUF_NAMESPACE_ID::int64 value); + ::PROTOBUF_NAMESPACE_ID::int64 _internal_run_id() const; + void _internal_set_run_id(::PROTOBUF_NAMESPACE_ID::int64 value); public: // @@protoc_insertion_point(class_scope:flwr.proto.TaskRes) @@ -803,1561 +767,211 @@ class TaskRes final : ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr task_id_; ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr group_id_; ::flwr::proto::Task* task_; - ::PROTOBUF_NAMESPACE_ID::int64 workload_id_; + ::PROTOBUF_NAMESPACE_ID::int64 run_id_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; friend struct ::TableStruct_flwr_2fproto_2ftask_2eproto; }; -// ------------------------------------------------------------------- +// =================================================================== -class Value_DoubleList final : - public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.Value.DoubleList) */ { - public: - inline Value_DoubleList() : Value_DoubleList(nullptr) {} - ~Value_DoubleList() override; - explicit constexpr Value_DoubleList(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); - Value_DoubleList(const Value_DoubleList& from); - Value_DoubleList(Value_DoubleList&& from) noexcept - : Value_DoubleList() { - *this = ::std::move(from); - } +// =================================================================== - inline Value_DoubleList& operator=(const Value_DoubleList& from) { - CopyFrom(from); - return *this; - } - inline Value_DoubleList& operator=(Value_DoubleList&& from) noexcept { - if (this == &from) return *this; - if (GetOwningArena() == from.GetOwningArena() - #ifdef PROTOBUF_FORCE_COPY_IN_MOVE - && GetOwningArena() != nullptr - #endif // !PROTOBUF_FORCE_COPY_IN_MOVE - ) { - InternalSwap(&from); - } else { - CopyFrom(from); - } - return *this; - } +#ifdef __GNUC__ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wstrict-aliasing" +#endif // __GNUC__ +// Task - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { - return GetDescriptor(); +// .flwr.proto.Node producer = 1; +inline bool Task::_internal_has_producer() const { + return this != internal_default_instance() && producer_ != nullptr; +} +inline bool Task::has_producer() const { + return _internal_has_producer(); +} +inline const ::flwr::proto::Node& Task::_internal_producer() const { + const ::flwr::proto::Node* p = producer_; + return p != nullptr ? *p : reinterpret_cast( + ::flwr::proto::_Node_default_instance_); +} +inline const ::flwr::proto::Node& Task::producer() const { + // @@protoc_insertion_point(field_get:flwr.proto.Task.producer) + return _internal_producer(); +} +inline void Task::unsafe_arena_set_allocated_producer( + ::flwr::proto::Node* producer) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(producer_); } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { - return default_instance().GetMetadata().descriptor; + producer_ = producer; + if (producer) { + + } else { + } - static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { - return default_instance().GetMetadata().reflection; + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.Task.producer) +} +inline ::flwr::proto::Node* Task::release_producer() { + + ::flwr::proto::Node* temp = producer_; + producer_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); } - static const Value_DoubleList& default_instance() { - return *internal_default_instance(); +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::flwr::proto::Node* Task::unsafe_arena_release_producer() { + // @@protoc_insertion_point(field_release:flwr.proto.Task.producer) + + ::flwr::proto::Node* temp = producer_; + producer_ = nullptr; + return temp; +} +inline ::flwr::proto::Node* Task::_internal_mutable_producer() { + + if (producer_ == nullptr) { + auto* p = CreateMaybeMessage<::flwr::proto::Node>(GetArenaForAllocation()); + producer_ = p; } - static inline const Value_DoubleList* internal_default_instance() { - return reinterpret_cast( - &_Value_DoubleList_default_instance_); + return producer_; +} +inline ::flwr::proto::Node* Task::mutable_producer() { + ::flwr::proto::Node* _msg = _internal_mutable_producer(); + // @@protoc_insertion_point(field_mutable:flwr.proto.Task.producer) + return _msg; +} +inline void Task::set_allocated_producer(::flwr::proto::Node* producer) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(producer_); } - static constexpr int kIndexInFileMessages = - 3; + if (producer) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper< + ::PROTOBUF_NAMESPACE_ID::MessageLite>::GetOwningArena( + reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(producer)); + if (message_arena != submessage_arena) { + producer = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, producer, submessage_arena); + } + + } else { + + } + producer_ = producer; + // @@protoc_insertion_point(field_set_allocated:flwr.proto.Task.producer) +} - friend void swap(Value_DoubleList& a, Value_DoubleList& b) { - a.Swap(&b); +// .flwr.proto.Node consumer = 2; +inline bool Task::_internal_has_consumer() const { + return this != internal_default_instance() && consumer_ != nullptr; +} +inline bool Task::has_consumer() const { + return _internal_has_consumer(); +} +inline const ::flwr::proto::Node& Task::_internal_consumer() const { + const ::flwr::proto::Node* p = consumer_; + return p != nullptr ? *p : reinterpret_cast( + ::flwr::proto::_Node_default_instance_); +} +inline const ::flwr::proto::Node& Task::consumer() const { + // @@protoc_insertion_point(field_get:flwr.proto.Task.consumer) + return _internal_consumer(); +} +inline void Task::unsafe_arena_set_allocated_consumer( + ::flwr::proto::Node* consumer) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(consumer_); } - inline void Swap(Value_DoubleList* other) { - if (other == this) return; - if (GetOwningArena() == other->GetOwningArena()) { - InternalSwap(other); - } else { - ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); - } + consumer_ = consumer; + if (consumer) { + + } else { + } - void UnsafeArenaSwap(Value_DoubleList* other) { - if (other == this) return; - GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); - InternalSwap(other); + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.Task.consumer) +} +inline ::flwr::proto::Node* Task::release_consumer() { + + ::flwr::proto::Node* temp = consumer_; + consumer_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); } - - // implements Message ---------------------------------------------- - - inline Value_DoubleList* New() const final { - return new Value_DoubleList(); +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::flwr::proto::Node* Task::unsafe_arena_release_consumer() { + // @@protoc_insertion_point(field_release:flwr.proto.Task.consumer) + + ::flwr::proto::Node* temp = consumer_; + consumer_ = nullptr; + return temp; +} +inline ::flwr::proto::Node* Task::_internal_mutable_consumer() { + + if (consumer_ == nullptr) { + auto* p = CreateMaybeMessage<::flwr::proto::Node>(GetArenaForAllocation()); + consumer_ = p; } - - Value_DoubleList* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { - return CreateMaybeMessage(arena); + return consumer_; +} +inline ::flwr::proto::Node* Task::mutable_consumer() { + ::flwr::proto::Node* _msg = _internal_mutable_consumer(); + // @@protoc_insertion_point(field_mutable:flwr.proto.Task.consumer) + return _msg; +} +inline void Task::set_allocated_consumer(::flwr::proto::Node* consumer) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(consumer_); } - using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; - void CopyFrom(const Value_DoubleList& from); - using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; - void MergeFrom(const Value_DoubleList& from); - private: - static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); - public: - PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; - bool IsInitialized() const final; - - size_t ByteSizeLong() const final; - const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( - ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; - int GetCachedSize() const final { return _cached_size_.Get(); } - - private: - void SharedCtor(); - void SharedDtor(); - void SetCachedSize(int size) const final; - void InternalSwap(Value_DoubleList* other); - friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { - return "flwr.proto.Value.DoubleList"; + if (consumer) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper< + ::PROTOBUF_NAMESPACE_ID::MessageLite>::GetOwningArena( + reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(consumer)); + if (message_arena != submessage_arena) { + consumer = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, consumer, submessage_arena); + } + + } else { + } - protected: - explicit Value_DoubleList(::PROTOBUF_NAMESPACE_ID::Arena* arena, - bool is_message_owned = false); - private: - static void ArenaDtor(void* object); - inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); - public: - - static const ClassData _class_data_; - const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; - - ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; - - // nested types ---------------------------------------------------- - - // accessors ------------------------------------------------------- + consumer_ = consumer; + // @@protoc_insertion_point(field_set_allocated:flwr.proto.Task.consumer) +} - enum : int { - kValsFieldNumber = 1, - }; - // repeated double vals = 1; - int vals_size() const; - private: - int _internal_vals_size() const; - public: - void clear_vals(); - private: - double _internal_vals(int index) const; - const ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >& - _internal_vals() const; - void _internal_add_vals(double value); - ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >* - _internal_mutable_vals(); - public: - double vals(int index) const; - void set_vals(int index, double value); - void add_vals(double value); - const ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >& - vals() const; - ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >* - mutable_vals(); - - // @@protoc_insertion_point(class_scope:flwr.proto.Value.DoubleList) - private: - class _Internal; - - template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; - typedef void InternalArenaConstructable_; - typedef void DestructorSkippable_; - ::PROTOBUF_NAMESPACE_ID::RepeatedField< double > vals_; - mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; - friend struct ::TableStruct_flwr_2fproto_2ftask_2eproto; -}; -// ------------------------------------------------------------------- - -class Value_Sint64List final : - public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.Value.Sint64List) */ { - public: - inline Value_Sint64List() : Value_Sint64List(nullptr) {} - ~Value_Sint64List() override; - explicit constexpr Value_Sint64List(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); - - Value_Sint64List(const Value_Sint64List& from); - Value_Sint64List(Value_Sint64List&& from) noexcept - : Value_Sint64List() { - *this = ::std::move(from); - } - - inline Value_Sint64List& operator=(const Value_Sint64List& from) { - CopyFrom(from); - return *this; - } - inline Value_Sint64List& operator=(Value_Sint64List&& from) noexcept { - if (this == &from) return *this; - if (GetOwningArena() == from.GetOwningArena() - #ifdef PROTOBUF_FORCE_COPY_IN_MOVE - && GetOwningArena() != nullptr - #endif // !PROTOBUF_FORCE_COPY_IN_MOVE - ) { - InternalSwap(&from); - } else { - CopyFrom(from); - } - return *this; - } - - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { - return GetDescriptor(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { - return default_instance().GetMetadata().descriptor; - } - static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { - return default_instance().GetMetadata().reflection; - } - static const Value_Sint64List& default_instance() { - return *internal_default_instance(); - } - static inline const Value_Sint64List* internal_default_instance() { - return reinterpret_cast( - &_Value_Sint64List_default_instance_); - } - static constexpr int kIndexInFileMessages = - 4; - - friend void swap(Value_Sint64List& a, Value_Sint64List& b) { - a.Swap(&b); - } - inline void Swap(Value_Sint64List* other) { - if (other == this) return; - if (GetOwningArena() == other->GetOwningArena()) { - InternalSwap(other); - } else { - ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); - } - } - void UnsafeArenaSwap(Value_Sint64List* other) { - if (other == this) return; - GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); - InternalSwap(other); - } - - // implements Message ---------------------------------------------- - - inline Value_Sint64List* New() const final { - return new Value_Sint64List(); - } - - Value_Sint64List* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { - return CreateMaybeMessage(arena); - } - using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; - void CopyFrom(const Value_Sint64List& from); - using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; - void MergeFrom(const Value_Sint64List& from); - private: - static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); - public: - PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; - bool IsInitialized() const final; - - size_t ByteSizeLong() const final; - const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( - ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; - int GetCachedSize() const final { return _cached_size_.Get(); } - - private: - void SharedCtor(); - void SharedDtor(); - void SetCachedSize(int size) const final; - void InternalSwap(Value_Sint64List* other); - friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { - return "flwr.proto.Value.Sint64List"; - } - protected: - explicit Value_Sint64List(::PROTOBUF_NAMESPACE_ID::Arena* arena, - bool is_message_owned = false); - private: - static void ArenaDtor(void* object); - inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); - public: - - static const ClassData _class_data_; - const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; - - ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; - - // nested types ---------------------------------------------------- - - // accessors ------------------------------------------------------- - - enum : int { - kValsFieldNumber = 1, - }; - // repeated sint64 vals = 1; - int vals_size() const; - private: - int _internal_vals_size() const; - public: - void clear_vals(); - private: - ::PROTOBUF_NAMESPACE_ID::int64 _internal_vals(int index) const; - const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 >& - _internal_vals() const; - void _internal_add_vals(::PROTOBUF_NAMESPACE_ID::int64 value); - ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 >* - _internal_mutable_vals(); - public: - ::PROTOBUF_NAMESPACE_ID::int64 vals(int index) const; - void set_vals(int index, ::PROTOBUF_NAMESPACE_ID::int64 value); - void add_vals(::PROTOBUF_NAMESPACE_ID::int64 value); - const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 >& - vals() const; - ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 >* - mutable_vals(); - - // @@protoc_insertion_point(class_scope:flwr.proto.Value.Sint64List) - private: - class _Internal; - - template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; - typedef void InternalArenaConstructable_; - typedef void DestructorSkippable_; - ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 > vals_; - mutable std::atomic _vals_cached_byte_size_; - mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; - friend struct ::TableStruct_flwr_2fproto_2ftask_2eproto; -}; -// ------------------------------------------------------------------- - -class Value_BoolList final : - public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.Value.BoolList) */ { - public: - inline Value_BoolList() : Value_BoolList(nullptr) {} - ~Value_BoolList() override; - explicit constexpr Value_BoolList(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); - - Value_BoolList(const Value_BoolList& from); - Value_BoolList(Value_BoolList&& from) noexcept - : Value_BoolList() { - *this = ::std::move(from); - } - - inline Value_BoolList& operator=(const Value_BoolList& from) { - CopyFrom(from); - return *this; - } - inline Value_BoolList& operator=(Value_BoolList&& from) noexcept { - if (this == &from) return *this; - if (GetOwningArena() == from.GetOwningArena() - #ifdef PROTOBUF_FORCE_COPY_IN_MOVE - && GetOwningArena() != nullptr - #endif // !PROTOBUF_FORCE_COPY_IN_MOVE - ) { - InternalSwap(&from); - } else { - CopyFrom(from); - } - return *this; - } - - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { - return GetDescriptor(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { - return default_instance().GetMetadata().descriptor; - } - static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { - return default_instance().GetMetadata().reflection; - } - static const Value_BoolList& default_instance() { - return *internal_default_instance(); - } - static inline const Value_BoolList* internal_default_instance() { - return reinterpret_cast( - &_Value_BoolList_default_instance_); - } - static constexpr int kIndexInFileMessages = - 5; - - friend void swap(Value_BoolList& a, Value_BoolList& b) { - a.Swap(&b); - } - inline void Swap(Value_BoolList* other) { - if (other == this) return; - if (GetOwningArena() == other->GetOwningArena()) { - InternalSwap(other); - } else { - ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); - } - } - void UnsafeArenaSwap(Value_BoolList* other) { - if (other == this) return; - GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); - InternalSwap(other); - } - - // implements Message ---------------------------------------------- - - inline Value_BoolList* New() const final { - return new Value_BoolList(); - } - - Value_BoolList* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { - return CreateMaybeMessage(arena); - } - using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; - void CopyFrom(const Value_BoolList& from); - using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; - void MergeFrom(const Value_BoolList& from); - private: - static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); - public: - PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; - bool IsInitialized() const final; - - size_t ByteSizeLong() const final; - const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( - ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; - int GetCachedSize() const final { return _cached_size_.Get(); } - - private: - void SharedCtor(); - void SharedDtor(); - void SetCachedSize(int size) const final; - void InternalSwap(Value_BoolList* other); - friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { - return "flwr.proto.Value.BoolList"; - } - protected: - explicit Value_BoolList(::PROTOBUF_NAMESPACE_ID::Arena* arena, - bool is_message_owned = false); - private: - static void ArenaDtor(void* object); - inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); - public: - - static const ClassData _class_data_; - const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; - - ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; - - // nested types ---------------------------------------------------- - - // accessors ------------------------------------------------------- - - enum : int { - kValsFieldNumber = 1, - }; - // repeated bool vals = 1; - int vals_size() const; - private: - int _internal_vals_size() const; - public: - void clear_vals(); - private: - bool _internal_vals(int index) const; - const ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool >& - _internal_vals() const; - void _internal_add_vals(bool value); - ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool >* - _internal_mutable_vals(); - public: - bool vals(int index) const; - void set_vals(int index, bool value); - void add_vals(bool value); - const ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool >& - vals() const; - ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool >* - mutable_vals(); - - // @@protoc_insertion_point(class_scope:flwr.proto.Value.BoolList) - private: - class _Internal; - - template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; - typedef void InternalArenaConstructable_; - typedef void DestructorSkippable_; - ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool > vals_; - mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; - friend struct ::TableStruct_flwr_2fproto_2ftask_2eproto; -}; -// ------------------------------------------------------------------- - -class Value_StringList final : - public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.Value.StringList) */ { - public: - inline Value_StringList() : Value_StringList(nullptr) {} - ~Value_StringList() override; - explicit constexpr Value_StringList(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); - - Value_StringList(const Value_StringList& from); - Value_StringList(Value_StringList&& from) noexcept - : Value_StringList() { - *this = ::std::move(from); - } - - inline Value_StringList& operator=(const Value_StringList& from) { - CopyFrom(from); - return *this; - } - inline Value_StringList& operator=(Value_StringList&& from) noexcept { - if (this == &from) return *this; - if (GetOwningArena() == from.GetOwningArena() - #ifdef PROTOBUF_FORCE_COPY_IN_MOVE - && GetOwningArena() != nullptr - #endif // !PROTOBUF_FORCE_COPY_IN_MOVE - ) { - InternalSwap(&from); - } else { - CopyFrom(from); - } - return *this; - } - - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { - return GetDescriptor(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { - return default_instance().GetMetadata().descriptor; - } - static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { - return default_instance().GetMetadata().reflection; - } - static const Value_StringList& default_instance() { - return *internal_default_instance(); - } - static inline const Value_StringList* internal_default_instance() { - return reinterpret_cast( - &_Value_StringList_default_instance_); - } - static constexpr int kIndexInFileMessages = - 6; - - friend void swap(Value_StringList& a, Value_StringList& b) { - a.Swap(&b); - } - inline void Swap(Value_StringList* other) { - if (other == this) return; - if (GetOwningArena() == other->GetOwningArena()) { - InternalSwap(other); - } else { - ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); - } - } - void UnsafeArenaSwap(Value_StringList* other) { - if (other == this) return; - GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); - InternalSwap(other); - } - - // implements Message ---------------------------------------------- - - inline Value_StringList* New() const final { - return new Value_StringList(); - } - - Value_StringList* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { - return CreateMaybeMessage(arena); - } - using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; - void CopyFrom(const Value_StringList& from); - using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; - void MergeFrom(const Value_StringList& from); - private: - static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); - public: - PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; - bool IsInitialized() const final; - - size_t ByteSizeLong() const final; - const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( - ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; - int GetCachedSize() const final { return _cached_size_.Get(); } - - private: - void SharedCtor(); - void SharedDtor(); - void SetCachedSize(int size) const final; - void InternalSwap(Value_StringList* other); - friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { - return "flwr.proto.Value.StringList"; - } - protected: - explicit Value_StringList(::PROTOBUF_NAMESPACE_ID::Arena* arena, - bool is_message_owned = false); - private: - static void ArenaDtor(void* object); - inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); - public: - - static const ClassData _class_data_; - const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; - - ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; - - // nested types ---------------------------------------------------- - - // accessors ------------------------------------------------------- - - enum : int { - kValsFieldNumber = 1, - }; - // repeated string vals = 1; - int vals_size() const; - private: - int _internal_vals_size() const; - public: - void clear_vals(); - const std::string& vals(int index) const; - std::string* mutable_vals(int index); - void set_vals(int index, const std::string& value); - void set_vals(int index, std::string&& value); - void set_vals(int index, const char* value); - void set_vals(int index, const char* value, size_t size); - std::string* add_vals(); - void add_vals(const std::string& value); - void add_vals(std::string&& value); - void add_vals(const char* value); - void add_vals(const char* value, size_t size); - const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& vals() const; - ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* mutable_vals(); - private: - const std::string& _internal_vals(int index) const; - std::string* _internal_add_vals(); - public: - - // @@protoc_insertion_point(class_scope:flwr.proto.Value.StringList) - private: - class _Internal; - - template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; - typedef void InternalArenaConstructable_; - typedef void DestructorSkippable_; - ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField vals_; - mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; - friend struct ::TableStruct_flwr_2fproto_2ftask_2eproto; -}; -// ------------------------------------------------------------------- - -class Value_BytesList final : - public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.Value.BytesList) */ { - public: - inline Value_BytesList() : Value_BytesList(nullptr) {} - ~Value_BytesList() override; - explicit constexpr Value_BytesList(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); - - Value_BytesList(const Value_BytesList& from); - Value_BytesList(Value_BytesList&& from) noexcept - : Value_BytesList() { - *this = ::std::move(from); - } - - inline Value_BytesList& operator=(const Value_BytesList& from) { - CopyFrom(from); - return *this; - } - inline Value_BytesList& operator=(Value_BytesList&& from) noexcept { - if (this == &from) return *this; - if (GetOwningArena() == from.GetOwningArena() - #ifdef PROTOBUF_FORCE_COPY_IN_MOVE - && GetOwningArena() != nullptr - #endif // !PROTOBUF_FORCE_COPY_IN_MOVE - ) { - InternalSwap(&from); - } else { - CopyFrom(from); - } - return *this; - } - - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { - return GetDescriptor(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { - return default_instance().GetMetadata().descriptor; - } - static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { - return default_instance().GetMetadata().reflection; - } - static const Value_BytesList& default_instance() { - return *internal_default_instance(); - } - static inline const Value_BytesList* internal_default_instance() { - return reinterpret_cast( - &_Value_BytesList_default_instance_); - } - static constexpr int kIndexInFileMessages = - 7; - - friend void swap(Value_BytesList& a, Value_BytesList& b) { - a.Swap(&b); - } - inline void Swap(Value_BytesList* other) { - if (other == this) return; - if (GetOwningArena() == other->GetOwningArena()) { - InternalSwap(other); - } else { - ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); - } - } - void UnsafeArenaSwap(Value_BytesList* other) { - if (other == this) return; - GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); - InternalSwap(other); - } - - // implements Message ---------------------------------------------- - - inline Value_BytesList* New() const final { - return new Value_BytesList(); - } - - Value_BytesList* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { - return CreateMaybeMessage(arena); - } - using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; - void CopyFrom(const Value_BytesList& from); - using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; - void MergeFrom(const Value_BytesList& from); - private: - static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); - public: - PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; - bool IsInitialized() const final; - - size_t ByteSizeLong() const final; - const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( - ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; - int GetCachedSize() const final { return _cached_size_.Get(); } - - private: - void SharedCtor(); - void SharedDtor(); - void SetCachedSize(int size) const final; - void InternalSwap(Value_BytesList* other); - friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { - return "flwr.proto.Value.BytesList"; - } - protected: - explicit Value_BytesList(::PROTOBUF_NAMESPACE_ID::Arena* arena, - bool is_message_owned = false); - private: - static void ArenaDtor(void* object); - inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); - public: - - static const ClassData _class_data_; - const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; - - ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; - - // nested types ---------------------------------------------------- - - // accessors ------------------------------------------------------- - - enum : int { - kValsFieldNumber = 1, - }; - // repeated bytes vals = 1; - int vals_size() const; - private: - int _internal_vals_size() const; - public: - void clear_vals(); - const std::string& vals(int index) const; - std::string* mutable_vals(int index); - void set_vals(int index, const std::string& value); - void set_vals(int index, std::string&& value); - void set_vals(int index, const char* value); - void set_vals(int index, const void* value, size_t size); - std::string* add_vals(); - void add_vals(const std::string& value); - void add_vals(std::string&& value); - void add_vals(const char* value); - void add_vals(const void* value, size_t size); - const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& vals() const; - ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* mutable_vals(); - private: - const std::string& _internal_vals(int index) const; - std::string* _internal_add_vals(); - public: - - // @@protoc_insertion_point(class_scope:flwr.proto.Value.BytesList) - private: - class _Internal; - - template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; - typedef void InternalArenaConstructable_; - typedef void DestructorSkippable_; - ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField vals_; - mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; - friend struct ::TableStruct_flwr_2fproto_2ftask_2eproto; -}; -// ------------------------------------------------------------------- - -class Value final : - public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.Value) */ { - public: - inline Value() : Value(nullptr) {} - ~Value() override; - explicit constexpr Value(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); - - Value(const Value& from); - Value(Value&& from) noexcept - : Value() { - *this = ::std::move(from); - } - - inline Value& operator=(const Value& from) { - CopyFrom(from); - return *this; - } - inline Value& operator=(Value&& from) noexcept { - if (this == &from) return *this; - if (GetOwningArena() == from.GetOwningArena() - #ifdef PROTOBUF_FORCE_COPY_IN_MOVE - && GetOwningArena() != nullptr - #endif // !PROTOBUF_FORCE_COPY_IN_MOVE - ) { - InternalSwap(&from); - } else { - CopyFrom(from); - } - return *this; - } - - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { - return GetDescriptor(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { - return default_instance().GetMetadata().descriptor; - } - static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { - return default_instance().GetMetadata().reflection; - } - static const Value& default_instance() { - return *internal_default_instance(); - } - enum ValueCase { - kDouble = 1, - kSint64 = 2, - kBool = 3, - kString = 4, - kBytes = 5, - kDoubleList = 21, - kSint64List = 22, - kBoolList = 23, - kStringList = 24, - kBytesList = 25, - VALUE_NOT_SET = 0, - }; - - static inline const Value* internal_default_instance() { - return reinterpret_cast( - &_Value_default_instance_); - } - static constexpr int kIndexInFileMessages = - 8; - - friend void swap(Value& a, Value& b) { - a.Swap(&b); - } - inline void Swap(Value* other) { - if (other == this) return; - if (GetOwningArena() == other->GetOwningArena()) { - InternalSwap(other); - } else { - ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); - } - } - void UnsafeArenaSwap(Value* other) { - if (other == this) return; - GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); - InternalSwap(other); - } - - // implements Message ---------------------------------------------- - - inline Value* New() const final { - return new Value(); - } - - Value* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { - return CreateMaybeMessage(arena); - } - using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; - void CopyFrom(const Value& from); - using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; - void MergeFrom(const Value& from); - private: - static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); - public: - PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; - bool IsInitialized() const final; - - size_t ByteSizeLong() const final; - const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( - ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; - int GetCachedSize() const final { return _cached_size_.Get(); } - - private: - void SharedCtor(); - void SharedDtor(); - void SetCachedSize(int size) const final; - void InternalSwap(Value* other); - friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { - return "flwr.proto.Value"; - } - protected: - explicit Value(::PROTOBUF_NAMESPACE_ID::Arena* arena, - bool is_message_owned = false); - private: - static void ArenaDtor(void* object); - inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); - public: - - static const ClassData _class_data_; - const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; - - ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; - - // nested types ---------------------------------------------------- - - typedef Value_DoubleList DoubleList; - typedef Value_Sint64List Sint64List; - typedef Value_BoolList BoolList; - typedef Value_StringList StringList; - typedef Value_BytesList BytesList; - - // accessors ------------------------------------------------------- - - enum : int { - kDoubleFieldNumber = 1, - kSint64FieldNumber = 2, - kBoolFieldNumber = 3, - kStringFieldNumber = 4, - kBytesFieldNumber = 5, - kDoubleListFieldNumber = 21, - kSint64ListFieldNumber = 22, - kBoolListFieldNumber = 23, - kStringListFieldNumber = 24, - kBytesListFieldNumber = 25, - }; - // double double = 1; - bool has_double_() const; - private: - bool _internal_has_double_() const; - public: - void clear_double_(); - double double_() const; - void set_double_(double value); - private: - double _internal_double_() const; - void _internal_set_double_(double value); - public: - - // sint64 sint64 = 2; - bool has_sint64() const; - private: - bool _internal_has_sint64() const; - public: - void clear_sint64(); - ::PROTOBUF_NAMESPACE_ID::int64 sint64() const; - void set_sint64(::PROTOBUF_NAMESPACE_ID::int64 value); - private: - ::PROTOBUF_NAMESPACE_ID::int64 _internal_sint64() const; - void _internal_set_sint64(::PROTOBUF_NAMESPACE_ID::int64 value); - public: - - // bool bool = 3; - bool has_bool_() const; - private: - bool _internal_has_bool_() const; - public: - void clear_bool_(); - bool bool_() const; - void set_bool_(bool value); - private: - bool _internal_bool_() const; - void _internal_set_bool_(bool value); - public: - - // string string = 4; - bool has_string() const; - private: - bool _internal_has_string() const; - public: - void clear_string(); - const std::string& string() const; - template - void set_string(ArgT0&& arg0, ArgT... args); - std::string* mutable_string(); - PROTOBUF_MUST_USE_RESULT std::string* release_string(); - void set_allocated_string(std::string* string); - private: - const std::string& _internal_string() const; - inline PROTOBUF_ALWAYS_INLINE void _internal_set_string(const std::string& value); - std::string* _internal_mutable_string(); - public: - - // bytes bytes = 5; - bool has_bytes() const; - private: - bool _internal_has_bytes() const; - public: - void clear_bytes(); - const std::string& bytes() const; - template - void set_bytes(ArgT0&& arg0, ArgT... args); - std::string* mutable_bytes(); - PROTOBUF_MUST_USE_RESULT std::string* release_bytes(); - void set_allocated_bytes(std::string* bytes); - private: - const std::string& _internal_bytes() const; - inline PROTOBUF_ALWAYS_INLINE void _internal_set_bytes(const std::string& value); - std::string* _internal_mutable_bytes(); - public: - - // .flwr.proto.Value.DoubleList double_list = 21; - bool has_double_list() const; - private: - bool _internal_has_double_list() const; - public: - void clear_double_list(); - const ::flwr::proto::Value_DoubleList& double_list() const; - PROTOBUF_MUST_USE_RESULT ::flwr::proto::Value_DoubleList* release_double_list(); - ::flwr::proto::Value_DoubleList* mutable_double_list(); - void set_allocated_double_list(::flwr::proto::Value_DoubleList* double_list); - private: - const ::flwr::proto::Value_DoubleList& _internal_double_list() const; - ::flwr::proto::Value_DoubleList* _internal_mutable_double_list(); - public: - void unsafe_arena_set_allocated_double_list( - ::flwr::proto::Value_DoubleList* double_list); - ::flwr::proto::Value_DoubleList* unsafe_arena_release_double_list(); - - // .flwr.proto.Value.Sint64List sint64_list = 22; - bool has_sint64_list() const; - private: - bool _internal_has_sint64_list() const; - public: - void clear_sint64_list(); - const ::flwr::proto::Value_Sint64List& sint64_list() const; - PROTOBUF_MUST_USE_RESULT ::flwr::proto::Value_Sint64List* release_sint64_list(); - ::flwr::proto::Value_Sint64List* mutable_sint64_list(); - void set_allocated_sint64_list(::flwr::proto::Value_Sint64List* sint64_list); - private: - const ::flwr::proto::Value_Sint64List& _internal_sint64_list() const; - ::flwr::proto::Value_Sint64List* _internal_mutable_sint64_list(); - public: - void unsafe_arena_set_allocated_sint64_list( - ::flwr::proto::Value_Sint64List* sint64_list); - ::flwr::proto::Value_Sint64List* unsafe_arena_release_sint64_list(); - - // .flwr.proto.Value.BoolList bool_list = 23; - bool has_bool_list() const; - private: - bool _internal_has_bool_list() const; - public: - void clear_bool_list(); - const ::flwr::proto::Value_BoolList& bool_list() const; - PROTOBUF_MUST_USE_RESULT ::flwr::proto::Value_BoolList* release_bool_list(); - ::flwr::proto::Value_BoolList* mutable_bool_list(); - void set_allocated_bool_list(::flwr::proto::Value_BoolList* bool_list); - private: - const ::flwr::proto::Value_BoolList& _internal_bool_list() const; - ::flwr::proto::Value_BoolList* _internal_mutable_bool_list(); - public: - void unsafe_arena_set_allocated_bool_list( - ::flwr::proto::Value_BoolList* bool_list); - ::flwr::proto::Value_BoolList* unsafe_arena_release_bool_list(); - - // .flwr.proto.Value.StringList string_list = 24; - bool has_string_list() const; - private: - bool _internal_has_string_list() const; - public: - void clear_string_list(); - const ::flwr::proto::Value_StringList& string_list() const; - PROTOBUF_MUST_USE_RESULT ::flwr::proto::Value_StringList* release_string_list(); - ::flwr::proto::Value_StringList* mutable_string_list(); - void set_allocated_string_list(::flwr::proto::Value_StringList* string_list); - private: - const ::flwr::proto::Value_StringList& _internal_string_list() const; - ::flwr::proto::Value_StringList* _internal_mutable_string_list(); - public: - void unsafe_arena_set_allocated_string_list( - ::flwr::proto::Value_StringList* string_list); - ::flwr::proto::Value_StringList* unsafe_arena_release_string_list(); - - // .flwr.proto.Value.BytesList bytes_list = 25; - bool has_bytes_list() const; - private: - bool _internal_has_bytes_list() const; - public: - void clear_bytes_list(); - const ::flwr::proto::Value_BytesList& bytes_list() const; - PROTOBUF_MUST_USE_RESULT ::flwr::proto::Value_BytesList* release_bytes_list(); - ::flwr::proto::Value_BytesList* mutable_bytes_list(); - void set_allocated_bytes_list(::flwr::proto::Value_BytesList* bytes_list); - private: - const ::flwr::proto::Value_BytesList& _internal_bytes_list() const; - ::flwr::proto::Value_BytesList* _internal_mutable_bytes_list(); - public: - void unsafe_arena_set_allocated_bytes_list( - ::flwr::proto::Value_BytesList* bytes_list); - ::flwr::proto::Value_BytesList* unsafe_arena_release_bytes_list(); - - void clear_value(); - ValueCase value_case() const; - // @@protoc_insertion_point(class_scope:flwr.proto.Value) - private: - class _Internal; - void set_has_double_(); - void set_has_sint64(); - void set_has_bool_(); - void set_has_string(); - void set_has_bytes(); - void set_has_double_list(); - void set_has_sint64_list(); - void set_has_bool_list(); - void set_has_string_list(); - void set_has_bytes_list(); - - inline bool has_value() const; - inline void clear_has_value(); - - template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; - typedef void InternalArenaConstructable_; - typedef void DestructorSkippable_; - union ValueUnion { - constexpr ValueUnion() : _constinit_{} {} - ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_; - double double__; - ::PROTOBUF_NAMESPACE_ID::int64 sint64_; - bool bool__; - ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr string_; - ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr bytes_; - ::flwr::proto::Value_DoubleList* double_list_; - ::flwr::proto::Value_Sint64List* sint64_list_; - ::flwr::proto::Value_BoolList* bool_list_; - ::flwr::proto::Value_StringList* string_list_; - ::flwr::proto::Value_BytesList* bytes_list_; - } value_; - mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; - ::PROTOBUF_NAMESPACE_ID::uint32 _oneof_case_[1]; - - friend struct ::TableStruct_flwr_2fproto_2ftask_2eproto; -}; -// ------------------------------------------------------------------- - -class SecureAggregation_NamedValuesEntry_DoNotUse : public ::PROTOBUF_NAMESPACE_ID::internal::MapEntry { -public: - typedef ::PROTOBUF_NAMESPACE_ID::internal::MapEntry SuperType; - SecureAggregation_NamedValuesEntry_DoNotUse(); - explicit constexpr SecureAggregation_NamedValuesEntry_DoNotUse( - ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); - explicit SecureAggregation_NamedValuesEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena); - void MergeFrom(const SecureAggregation_NamedValuesEntry_DoNotUse& other); - static const SecureAggregation_NamedValuesEntry_DoNotUse* internal_default_instance() { return reinterpret_cast(&_SecureAggregation_NamedValuesEntry_DoNotUse_default_instance_); } - static bool ValidateKey(std::string* s) { - return ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(s->data(), static_cast(s->size()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, "flwr.proto.SecureAggregation.NamedValuesEntry.key"); - } - static bool ValidateValue(void*) { return true; } - using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; - ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; -}; - -// ------------------------------------------------------------------- - -class SecureAggregation final : - public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.SecureAggregation) */ { - public: - inline SecureAggregation() : SecureAggregation(nullptr) {} - ~SecureAggregation() override; - explicit constexpr SecureAggregation(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); - - SecureAggregation(const SecureAggregation& from); - SecureAggregation(SecureAggregation&& from) noexcept - : SecureAggregation() { - *this = ::std::move(from); - } - - inline SecureAggregation& operator=(const SecureAggregation& from) { - CopyFrom(from); - return *this; - } - inline SecureAggregation& operator=(SecureAggregation&& from) noexcept { - if (this == &from) return *this; - if (GetOwningArena() == from.GetOwningArena() - #ifdef PROTOBUF_FORCE_COPY_IN_MOVE - && GetOwningArena() != nullptr - #endif // !PROTOBUF_FORCE_COPY_IN_MOVE - ) { - InternalSwap(&from); - } else { - CopyFrom(from); - } - return *this; - } - - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { - return GetDescriptor(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { - return default_instance().GetMetadata().descriptor; - } - static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { - return default_instance().GetMetadata().reflection; - } - static const SecureAggregation& default_instance() { - return *internal_default_instance(); - } - static inline const SecureAggregation* internal_default_instance() { - return reinterpret_cast( - &_SecureAggregation_default_instance_); - } - static constexpr int kIndexInFileMessages = - 10; - - friend void swap(SecureAggregation& a, SecureAggregation& b) { - a.Swap(&b); - } - inline void Swap(SecureAggregation* other) { - if (other == this) return; - if (GetOwningArena() == other->GetOwningArena()) { - InternalSwap(other); - } else { - ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); - } - } - void UnsafeArenaSwap(SecureAggregation* other) { - if (other == this) return; - GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); - InternalSwap(other); - } - - // implements Message ---------------------------------------------- - - inline SecureAggregation* New() const final { - return new SecureAggregation(); - } - - SecureAggregation* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { - return CreateMaybeMessage(arena); - } - using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; - void CopyFrom(const SecureAggregation& from); - using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; - void MergeFrom(const SecureAggregation& from); - private: - static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); - public: - PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; - bool IsInitialized() const final; - - size_t ByteSizeLong() const final; - const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( - ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; - int GetCachedSize() const final { return _cached_size_.Get(); } - - private: - void SharedCtor(); - void SharedDtor(); - void SetCachedSize(int size) const final; - void InternalSwap(SecureAggregation* other); - friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { - return "flwr.proto.SecureAggregation"; - } - protected: - explicit SecureAggregation(::PROTOBUF_NAMESPACE_ID::Arena* arena, - bool is_message_owned = false); - private: - static void ArenaDtor(void* object); - inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); - public: - - static const ClassData _class_data_; - const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; - - ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; - - // nested types ---------------------------------------------------- - - - // accessors ------------------------------------------------------- - - enum : int { - kNamedValuesFieldNumber = 1, - }; - // map named_values = 1; - int named_values_size() const; - private: - int _internal_named_values_size() const; - public: - void clear_named_values(); - private: - const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::Value >& - _internal_named_values() const; - ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::Value >* - _internal_mutable_named_values(); - public: - const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::Value >& - named_values() const; - ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::Value >* - mutable_named_values(); - - // @@protoc_insertion_point(class_scope:flwr.proto.SecureAggregation) - private: - class _Internal; - - template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; - typedef void InternalArenaConstructable_; - typedef void DestructorSkippable_; - ::PROTOBUF_NAMESPACE_ID::internal::MapField< - SecureAggregation_NamedValuesEntry_DoNotUse, - std::string, ::flwr::proto::Value, - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING, - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_MESSAGE> named_values_; - mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; - friend struct ::TableStruct_flwr_2fproto_2ftask_2eproto; -}; -// =================================================================== - - -// =================================================================== - -#ifdef __GNUC__ - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wstrict-aliasing" -#endif // __GNUC__ -// Task - -// .flwr.proto.Node producer = 1; -inline bool Task::_internal_has_producer() const { - return this != internal_default_instance() && producer_ != nullptr; -} -inline bool Task::has_producer() const { - return _internal_has_producer(); -} -inline const ::flwr::proto::Node& Task::_internal_producer() const { - const ::flwr::proto::Node* p = producer_; - return p != nullptr ? *p : reinterpret_cast( - ::flwr::proto::_Node_default_instance_); -} -inline const ::flwr::proto::Node& Task::producer() const { - // @@protoc_insertion_point(field_get:flwr.proto.Task.producer) - return _internal_producer(); -} -inline void Task::unsafe_arena_set_allocated_producer( - ::flwr::proto::Node* producer) { - if (GetArenaForAllocation() == nullptr) { - delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(producer_); - } - producer_ = producer; - if (producer) { - - } else { - - } - // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.Task.producer) -} -inline ::flwr::proto::Node* Task::release_producer() { - - ::flwr::proto::Node* temp = producer_; - producer_ = nullptr; -#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE - auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); - temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); - if (GetArenaForAllocation() == nullptr) { delete old; } -#else // PROTOBUF_FORCE_COPY_IN_RELEASE - if (GetArenaForAllocation() != nullptr) { - temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); - } -#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE - return temp; -} -inline ::flwr::proto::Node* Task::unsafe_arena_release_producer() { - // @@protoc_insertion_point(field_release:flwr.proto.Task.producer) - - ::flwr::proto::Node* temp = producer_; - producer_ = nullptr; - return temp; -} -inline ::flwr::proto::Node* Task::_internal_mutable_producer() { - - if (producer_ == nullptr) { - auto* p = CreateMaybeMessage<::flwr::proto::Node>(GetArenaForAllocation()); - producer_ = p; - } - return producer_; -} -inline ::flwr::proto::Node* Task::mutable_producer() { - ::flwr::proto::Node* _msg = _internal_mutable_producer(); - // @@protoc_insertion_point(field_mutable:flwr.proto.Task.producer) - return _msg; -} -inline void Task::set_allocated_producer(::flwr::proto::Node* producer) { - ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); - if (message_arena == nullptr) { - delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(producer_); - } - if (producer) { - ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = - ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper< - ::PROTOBUF_NAMESPACE_ID::MessageLite>::GetOwningArena( - reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(producer)); - if (message_arena != submessage_arena) { - producer = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( - message_arena, producer, submessage_arena); - } - - } else { - - } - producer_ = producer; - // @@protoc_insertion_point(field_set_allocated:flwr.proto.Task.producer) -} - -// .flwr.proto.Node consumer = 2; -inline bool Task::_internal_has_consumer() const { - return this != internal_default_instance() && consumer_ != nullptr; -} -inline bool Task::has_consumer() const { - return _internal_has_consumer(); -} -inline const ::flwr::proto::Node& Task::_internal_consumer() const { - const ::flwr::proto::Node* p = consumer_; - return p != nullptr ? *p : reinterpret_cast( - ::flwr::proto::_Node_default_instance_); -} -inline const ::flwr::proto::Node& Task::consumer() const { - // @@protoc_insertion_point(field_get:flwr.proto.Task.consumer) - return _internal_consumer(); -} -inline void Task::unsafe_arena_set_allocated_consumer( - ::flwr::proto::Node* consumer) { - if (GetArenaForAllocation() == nullptr) { - delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(consumer_); - } - consumer_ = consumer; - if (consumer) { - - } else { - - } - // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.Task.consumer) -} -inline ::flwr::proto::Node* Task::release_consumer() { - - ::flwr::proto::Node* temp = consumer_; - consumer_ = nullptr; -#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE - auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); - temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); - if (GetArenaForAllocation() == nullptr) { delete old; } -#else // PROTOBUF_FORCE_COPY_IN_RELEASE - if (GetArenaForAllocation() != nullptr) { - temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); - } -#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE - return temp; -} -inline ::flwr::proto::Node* Task::unsafe_arena_release_consumer() { - // @@protoc_insertion_point(field_release:flwr.proto.Task.consumer) - - ::flwr::proto::Node* temp = consumer_; - consumer_ = nullptr; - return temp; -} -inline ::flwr::proto::Node* Task::_internal_mutable_consumer() { - - if (consumer_ == nullptr) { - auto* p = CreateMaybeMessage<::flwr::proto::Node>(GetArenaForAllocation()); - consumer_ = p; - } - return consumer_; -} -inline ::flwr::proto::Node* Task::mutable_consumer() { - ::flwr::proto::Node* _msg = _internal_mutable_consumer(); - // @@protoc_insertion_point(field_mutable:flwr.proto.Task.consumer) - return _msg; -} -inline void Task::set_allocated_consumer(::flwr::proto::Node* consumer) { - ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); - if (message_arena == nullptr) { - delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(consumer_); - } - if (consumer) { - ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = - ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper< - ::PROTOBUF_NAMESPACE_ID::MessageLite>::GetOwningArena( - reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(consumer)); - if (message_arena != submessage_arena) { - consumer = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( - message_arena, consumer, submessage_arena); - } - - } else { - - } - consumer_ = consumer; - // @@protoc_insertion_point(field_set_allocated:flwr.proto.Task.consumer) -} - -// string created_at = 3; +// double created_at = 3; inline void Task::clear_created_at() { - created_at_.ClearToEmpty(); + created_at_ = 0; } -inline const std::string& Task::created_at() const { +inline double Task::_internal_created_at() const { + return created_at_; +} +inline double Task::created_at() const { // @@protoc_insertion_point(field_get:flwr.proto.Task.created_at) return _internal_created_at(); } -template -inline PROTOBUF_ALWAYS_INLINE -void Task::set_created_at(ArgT0&& arg0, ArgT... args) { - - created_at_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); - // @@protoc_insertion_point(field_set:flwr.proto.Task.created_at) -} -inline std::string* Task::mutable_created_at() { - std::string* _s = _internal_mutable_created_at(); - // @@protoc_insertion_point(field_mutable:flwr.proto.Task.created_at) - return _s; -} -inline const std::string& Task::_internal_created_at() const { - return created_at_.Get(); -} -inline void Task::_internal_set_created_at(const std::string& value) { - - created_at_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); -} -inline std::string* Task::_internal_mutable_created_at() { +inline void Task::_internal_set_created_at(double value) { - return created_at_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); -} -inline std::string* Task::release_created_at() { - // @@protoc_insertion_point(field_release:flwr.proto.Task.created_at) - return created_at_.Release(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); + created_at_ = value; } -inline void Task::set_allocated_created_at(std::string* created_at) { - if (created_at != nullptr) { - - } else { - - } - created_at_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), created_at, - GetArenaForAllocation()); - // @@protoc_insertion_point(field_set_allocated:flwr.proto.Task.created_at) +inline void Task::set_created_at(double value) { + _internal_set_created_at(value); + // @@protoc_insertion_point(field_set:flwr.proto.Task.created_at) } // string delivered_at = 4; @@ -2406,53 +1020,47 @@ inline void Task::set_allocated_delivered_at(std::string* delivered_at) { // @@protoc_insertion_point(field_set_allocated:flwr.proto.Task.delivered_at) } -// string ttl = 5; -inline void Task::clear_ttl() { - ttl_.ClearToEmpty(); +// double pushed_at = 5; +inline void Task::clear_pushed_at() { + pushed_at_ = 0; } -inline const std::string& Task::ttl() const { - // @@protoc_insertion_point(field_get:flwr.proto.Task.ttl) - return _internal_ttl(); +inline double Task::_internal_pushed_at() const { + return pushed_at_; } -template -inline PROTOBUF_ALWAYS_INLINE -void Task::set_ttl(ArgT0&& arg0, ArgT... args) { - - ttl_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); - // @@protoc_insertion_point(field_set:flwr.proto.Task.ttl) +inline double Task::pushed_at() const { + // @@protoc_insertion_point(field_get:flwr.proto.Task.pushed_at) + return _internal_pushed_at(); } -inline std::string* Task::mutable_ttl() { - std::string* _s = _internal_mutable_ttl(); - // @@protoc_insertion_point(field_mutable:flwr.proto.Task.ttl) - return _s; +inline void Task::_internal_set_pushed_at(double value) { + + pushed_at_ = value; } -inline const std::string& Task::_internal_ttl() const { - return ttl_.Get(); +inline void Task::set_pushed_at(double value) { + _internal_set_pushed_at(value); + // @@protoc_insertion_point(field_set:flwr.proto.Task.pushed_at) } -inline void Task::_internal_set_ttl(const std::string& value) { - - ttl_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); + +// double ttl = 6; +inline void Task::clear_ttl() { + ttl_ = 0; } -inline std::string* Task::_internal_mutable_ttl() { - - return ttl_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); +inline double Task::_internal_ttl() const { + return ttl_; } -inline std::string* Task::release_ttl() { - // @@protoc_insertion_point(field_release:flwr.proto.Task.ttl) - return ttl_.Release(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); +inline double Task::ttl() const { + // @@protoc_insertion_point(field_get:flwr.proto.Task.ttl) + return _internal_ttl(); } -inline void Task::set_allocated_ttl(std::string* ttl) { - if (ttl != nullptr) { - - } else { - - } - ttl_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ttl, - GetArenaForAllocation()); - // @@protoc_insertion_point(field_set_allocated:flwr.proto.Task.ttl) +inline void Task::_internal_set_ttl(double value) { + + ttl_ = value; +} +inline void Task::set_ttl(double value) { + _internal_set_ttl(value); + // @@protoc_insertion_point(field_set:flwr.proto.Task.ttl) } -// repeated string ancestry = 6; +// repeated string ancestry = 7; inline int Task::_internal_ancestry_size() const { return ancestry_.size(); } @@ -2527,129 +1135,85 @@ Task::mutable_ancestry() { return &ancestry_; } -// .flwr.proto.SecureAggregation sa = 7; -inline bool Task::_internal_has_sa() const { - return this != internal_default_instance() && sa_ != nullptr; -} -inline bool Task::has_sa() const { - return _internal_has_sa(); -} -inline void Task::clear_sa() { - if (GetArenaForAllocation() == nullptr && sa_ != nullptr) { - delete sa_; - } - sa_ = nullptr; +// string task_type = 8; +inline void Task::clear_task_type() { + task_type_.ClearToEmpty(); } -inline const ::flwr::proto::SecureAggregation& Task::_internal_sa() const { - const ::flwr::proto::SecureAggregation* p = sa_; - return p != nullptr ? *p : reinterpret_cast( - ::flwr::proto::_SecureAggregation_default_instance_); +inline const std::string& Task::task_type() const { + // @@protoc_insertion_point(field_get:flwr.proto.Task.task_type) + return _internal_task_type(); } -inline const ::flwr::proto::SecureAggregation& Task::sa() const { - // @@protoc_insertion_point(field_get:flwr.proto.Task.sa) - return _internal_sa(); +template +inline PROTOBUF_ALWAYS_INLINE +void Task::set_task_type(ArgT0&& arg0, ArgT... args) { + + task_type_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:flwr.proto.Task.task_type) } -inline void Task::unsafe_arena_set_allocated_sa( - ::flwr::proto::SecureAggregation* sa) { - if (GetArenaForAllocation() == nullptr) { - delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(sa_); - } - sa_ = sa; - if (sa) { - - } else { - - } - // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.Task.sa) +inline std::string* Task::mutable_task_type() { + std::string* _s = _internal_mutable_task_type(); + // @@protoc_insertion_point(field_mutable:flwr.proto.Task.task_type) + return _s; } -inline ::flwr::proto::SecureAggregation* Task::release_sa() { - - ::flwr::proto::SecureAggregation* temp = sa_; - sa_ = nullptr; -#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE - auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); - temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); - if (GetArenaForAllocation() == nullptr) { delete old; } -#else // PROTOBUF_FORCE_COPY_IN_RELEASE - if (GetArenaForAllocation() != nullptr) { - temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); - } -#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE - return temp; +inline const std::string& Task::_internal_task_type() const { + return task_type_.Get(); } -inline ::flwr::proto::SecureAggregation* Task::unsafe_arena_release_sa() { - // @@protoc_insertion_point(field_release:flwr.proto.Task.sa) +inline void Task::_internal_set_task_type(const std::string& value) { - ::flwr::proto::SecureAggregation* temp = sa_; - sa_ = nullptr; - return temp; + task_type_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); } -inline ::flwr::proto::SecureAggregation* Task::_internal_mutable_sa() { +inline std::string* Task::_internal_mutable_task_type() { - if (sa_ == nullptr) { - auto* p = CreateMaybeMessage<::flwr::proto::SecureAggregation>(GetArenaForAllocation()); - sa_ = p; - } - return sa_; + return task_type_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); } -inline ::flwr::proto::SecureAggregation* Task::mutable_sa() { - ::flwr::proto::SecureAggregation* _msg = _internal_mutable_sa(); - // @@protoc_insertion_point(field_mutable:flwr.proto.Task.sa) - return _msg; +inline std::string* Task::release_task_type() { + // @@protoc_insertion_point(field_release:flwr.proto.Task.task_type) + return task_type_.Release(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); } -inline void Task::set_allocated_sa(::flwr::proto::SecureAggregation* sa) { - ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); - if (message_arena == nullptr) { - delete sa_; - } - if (sa) { - ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = - ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::SecureAggregation>::GetOwningArena(sa); - if (message_arena != submessage_arena) { - sa = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( - message_arena, sa, submessage_arena); - } +inline void Task::set_allocated_task_type(std::string* task_type) { + if (task_type != nullptr) { } else { } - sa_ = sa; - // @@protoc_insertion_point(field_set_allocated:flwr.proto.Task.sa) + task_type_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), task_type, + GetArenaForAllocation()); + // @@protoc_insertion_point(field_set_allocated:flwr.proto.Task.task_type) } -// .flwr.proto.ServerMessage legacy_server_message = 101 [deprecated = true]; -inline bool Task::_internal_has_legacy_server_message() const { - return this != internal_default_instance() && legacy_server_message_ != nullptr; +// .flwr.proto.RecordSet recordset = 9; +inline bool Task::_internal_has_recordset() const { + return this != internal_default_instance() && recordset_ != nullptr; } -inline bool Task::has_legacy_server_message() const { - return _internal_has_legacy_server_message(); +inline bool Task::has_recordset() const { + return _internal_has_recordset(); } -inline const ::flwr::proto::ServerMessage& Task::_internal_legacy_server_message() const { - const ::flwr::proto::ServerMessage* p = legacy_server_message_; - return p != nullptr ? *p : reinterpret_cast( - ::flwr::proto::_ServerMessage_default_instance_); +inline const ::flwr::proto::RecordSet& Task::_internal_recordset() const { + const ::flwr::proto::RecordSet* p = recordset_; + return p != nullptr ? *p : reinterpret_cast( + ::flwr::proto::_RecordSet_default_instance_); } -inline const ::flwr::proto::ServerMessage& Task::legacy_server_message() const { - // @@protoc_insertion_point(field_get:flwr.proto.Task.legacy_server_message) - return _internal_legacy_server_message(); +inline const ::flwr::proto::RecordSet& Task::recordset() const { + // @@protoc_insertion_point(field_get:flwr.proto.Task.recordset) + return _internal_recordset(); } -inline void Task::unsafe_arena_set_allocated_legacy_server_message( - ::flwr::proto::ServerMessage* legacy_server_message) { +inline void Task::unsafe_arena_set_allocated_recordset( + ::flwr::proto::RecordSet* recordset) { if (GetArenaForAllocation() == nullptr) { - delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(legacy_server_message_); + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(recordset_); } - legacy_server_message_ = legacy_server_message; - if (legacy_server_message) { + recordset_ = recordset; + if (recordset) { } else { } - // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.Task.legacy_server_message) + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.Task.recordset) } -inline ::flwr::proto::ServerMessage* Task::release_legacy_server_message() { +inline ::flwr::proto::RecordSet* Task::release_recordset() { - ::flwr::proto::ServerMessage* temp = legacy_server_message_; - legacy_server_message_ = nullptr; + ::flwr::proto::RecordSet* temp = recordset_; + recordset_ = nullptr; #ifdef PROTOBUF_FORCE_COPY_IN_RELEASE auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); @@ -2661,81 +1225,81 @@ inline ::flwr::proto::ServerMessage* Task::release_legacy_server_message() { #endif // !PROTOBUF_FORCE_COPY_IN_RELEASE return temp; } -inline ::flwr::proto::ServerMessage* Task::unsafe_arena_release_legacy_server_message() { - // @@protoc_insertion_point(field_release:flwr.proto.Task.legacy_server_message) +inline ::flwr::proto::RecordSet* Task::unsafe_arena_release_recordset() { + // @@protoc_insertion_point(field_release:flwr.proto.Task.recordset) - ::flwr::proto::ServerMessage* temp = legacy_server_message_; - legacy_server_message_ = nullptr; + ::flwr::proto::RecordSet* temp = recordset_; + recordset_ = nullptr; return temp; } -inline ::flwr::proto::ServerMessage* Task::_internal_mutable_legacy_server_message() { +inline ::flwr::proto::RecordSet* Task::_internal_mutable_recordset() { - if (legacy_server_message_ == nullptr) { - auto* p = CreateMaybeMessage<::flwr::proto::ServerMessage>(GetArenaForAllocation()); - legacy_server_message_ = p; + if (recordset_ == nullptr) { + auto* p = CreateMaybeMessage<::flwr::proto::RecordSet>(GetArenaForAllocation()); + recordset_ = p; } - return legacy_server_message_; + return recordset_; } -inline ::flwr::proto::ServerMessage* Task::mutable_legacy_server_message() { - ::flwr::proto::ServerMessage* _msg = _internal_mutable_legacy_server_message(); - // @@protoc_insertion_point(field_mutable:flwr.proto.Task.legacy_server_message) +inline ::flwr::proto::RecordSet* Task::mutable_recordset() { + ::flwr::proto::RecordSet* _msg = _internal_mutable_recordset(); + // @@protoc_insertion_point(field_mutable:flwr.proto.Task.recordset) return _msg; } -inline void Task::set_allocated_legacy_server_message(::flwr::proto::ServerMessage* legacy_server_message) { +inline void Task::set_allocated_recordset(::flwr::proto::RecordSet* recordset) { ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); if (message_arena == nullptr) { - delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(legacy_server_message_); + delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(recordset_); } - if (legacy_server_message) { + if (recordset) { ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper< ::PROTOBUF_NAMESPACE_ID::MessageLite>::GetOwningArena( - reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(legacy_server_message)); + reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(recordset)); if (message_arena != submessage_arena) { - legacy_server_message = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( - message_arena, legacy_server_message, submessage_arena); + recordset = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, recordset, submessage_arena); } } else { } - legacy_server_message_ = legacy_server_message; - // @@protoc_insertion_point(field_set_allocated:flwr.proto.Task.legacy_server_message) + recordset_ = recordset; + // @@protoc_insertion_point(field_set_allocated:flwr.proto.Task.recordset) } -// .flwr.proto.ClientMessage legacy_client_message = 102 [deprecated = true]; -inline bool Task::_internal_has_legacy_client_message() const { - return this != internal_default_instance() && legacy_client_message_ != nullptr; +// .flwr.proto.Error error = 10; +inline bool Task::_internal_has_error() const { + return this != internal_default_instance() && error_ != nullptr; } -inline bool Task::has_legacy_client_message() const { - return _internal_has_legacy_client_message(); +inline bool Task::has_error() const { + return _internal_has_error(); } -inline const ::flwr::proto::ClientMessage& Task::_internal_legacy_client_message() const { - const ::flwr::proto::ClientMessage* p = legacy_client_message_; - return p != nullptr ? *p : reinterpret_cast( - ::flwr::proto::_ClientMessage_default_instance_); +inline const ::flwr::proto::Error& Task::_internal_error() const { + const ::flwr::proto::Error* p = error_; + return p != nullptr ? *p : reinterpret_cast( + ::flwr::proto::_Error_default_instance_); } -inline const ::flwr::proto::ClientMessage& Task::legacy_client_message() const { - // @@protoc_insertion_point(field_get:flwr.proto.Task.legacy_client_message) - return _internal_legacy_client_message(); +inline const ::flwr::proto::Error& Task::error() const { + // @@protoc_insertion_point(field_get:flwr.proto.Task.error) + return _internal_error(); } -inline void Task::unsafe_arena_set_allocated_legacy_client_message( - ::flwr::proto::ClientMessage* legacy_client_message) { +inline void Task::unsafe_arena_set_allocated_error( + ::flwr::proto::Error* error) { if (GetArenaForAllocation() == nullptr) { - delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(legacy_client_message_); + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(error_); } - legacy_client_message_ = legacy_client_message; - if (legacy_client_message) { + error_ = error; + if (error) { } else { } - // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.Task.legacy_client_message) + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.Task.error) } -inline ::flwr::proto::ClientMessage* Task::release_legacy_client_message() { +inline ::flwr::proto::Error* Task::release_error() { - ::flwr::proto::ClientMessage* temp = legacy_client_message_; - legacy_client_message_ = nullptr; + ::flwr::proto::Error* temp = error_; + error_ = nullptr; #ifdef PROTOBUF_FORCE_COPY_IN_RELEASE auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); @@ -2747,46 +1311,46 @@ inline ::flwr::proto::ClientMessage* Task::release_legacy_client_message() { #endif // !PROTOBUF_FORCE_COPY_IN_RELEASE return temp; } -inline ::flwr::proto::ClientMessage* Task::unsafe_arena_release_legacy_client_message() { - // @@protoc_insertion_point(field_release:flwr.proto.Task.legacy_client_message) +inline ::flwr::proto::Error* Task::unsafe_arena_release_error() { + // @@protoc_insertion_point(field_release:flwr.proto.Task.error) - ::flwr::proto::ClientMessage* temp = legacy_client_message_; - legacy_client_message_ = nullptr; + ::flwr::proto::Error* temp = error_; + error_ = nullptr; return temp; } -inline ::flwr::proto::ClientMessage* Task::_internal_mutable_legacy_client_message() { +inline ::flwr::proto::Error* Task::_internal_mutable_error() { - if (legacy_client_message_ == nullptr) { - auto* p = CreateMaybeMessage<::flwr::proto::ClientMessage>(GetArenaForAllocation()); - legacy_client_message_ = p; + if (error_ == nullptr) { + auto* p = CreateMaybeMessage<::flwr::proto::Error>(GetArenaForAllocation()); + error_ = p; } - return legacy_client_message_; + return error_; } -inline ::flwr::proto::ClientMessage* Task::mutable_legacy_client_message() { - ::flwr::proto::ClientMessage* _msg = _internal_mutable_legacy_client_message(); - // @@protoc_insertion_point(field_mutable:flwr.proto.Task.legacy_client_message) +inline ::flwr::proto::Error* Task::mutable_error() { + ::flwr::proto::Error* _msg = _internal_mutable_error(); + // @@protoc_insertion_point(field_mutable:flwr.proto.Task.error) return _msg; } -inline void Task::set_allocated_legacy_client_message(::flwr::proto::ClientMessage* legacy_client_message) { +inline void Task::set_allocated_error(::flwr::proto::Error* error) { ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); if (message_arena == nullptr) { - delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(legacy_client_message_); + delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(error_); } - if (legacy_client_message) { + if (error) { ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper< ::PROTOBUF_NAMESPACE_ID::MessageLite>::GetOwningArena( - reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(legacy_client_message)); + reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(error)); if (message_arena != submessage_arena) { - legacy_client_message = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( - message_arena, legacy_client_message, submessage_arena); + error = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, error, submessage_arena); } } else { } - legacy_client_message_ = legacy_client_message; - // @@protoc_insertion_point(field_set_allocated:flwr.proto.Task.legacy_client_message) + error_ = error; + // @@protoc_insertion_point(field_set_allocated:flwr.proto.Task.error) } // ------------------------------------------------------------------- @@ -2808,233 +1372,27 @@ void TaskIns::set_task_id(ArgT0&& arg0, ArgT... args) { task_id_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); // @@protoc_insertion_point(field_set:flwr.proto.TaskIns.task_id) } -inline std::string* TaskIns::mutable_task_id() { - std::string* _s = _internal_mutable_task_id(); - // @@protoc_insertion_point(field_mutable:flwr.proto.TaskIns.task_id) - return _s; -} -inline const std::string& TaskIns::_internal_task_id() const { - return task_id_.Get(); -} -inline void TaskIns::_internal_set_task_id(const std::string& value) { - - task_id_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); -} -inline std::string* TaskIns::_internal_mutable_task_id() { - - return task_id_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); -} -inline std::string* TaskIns::release_task_id() { - // @@protoc_insertion_point(field_release:flwr.proto.TaskIns.task_id) - return task_id_.Release(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); -} -inline void TaskIns::set_allocated_task_id(std::string* task_id) { - if (task_id != nullptr) { - - } else { - - } - task_id_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), task_id, - GetArenaForAllocation()); - // @@protoc_insertion_point(field_set_allocated:flwr.proto.TaskIns.task_id) -} - -// string group_id = 2; -inline void TaskIns::clear_group_id() { - group_id_.ClearToEmpty(); -} -inline const std::string& TaskIns::group_id() const { - // @@protoc_insertion_point(field_get:flwr.proto.TaskIns.group_id) - return _internal_group_id(); -} -template -inline PROTOBUF_ALWAYS_INLINE -void TaskIns::set_group_id(ArgT0&& arg0, ArgT... args) { - - group_id_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); - // @@protoc_insertion_point(field_set:flwr.proto.TaskIns.group_id) -} -inline std::string* TaskIns::mutable_group_id() { - std::string* _s = _internal_mutable_group_id(); - // @@protoc_insertion_point(field_mutable:flwr.proto.TaskIns.group_id) - return _s; -} -inline const std::string& TaskIns::_internal_group_id() const { - return group_id_.Get(); -} -inline void TaskIns::_internal_set_group_id(const std::string& value) { - - group_id_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); -} -inline std::string* TaskIns::_internal_mutable_group_id() { - - return group_id_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); -} -inline std::string* TaskIns::release_group_id() { - // @@protoc_insertion_point(field_release:flwr.proto.TaskIns.group_id) - return group_id_.Release(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); -} -inline void TaskIns::set_allocated_group_id(std::string* group_id) { - if (group_id != nullptr) { - - } else { - - } - group_id_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), group_id, - GetArenaForAllocation()); - // @@protoc_insertion_point(field_set_allocated:flwr.proto.TaskIns.group_id) -} - -// sint64 workload_id = 3; -inline void TaskIns::clear_workload_id() { - workload_id_ = int64_t{0}; -} -inline ::PROTOBUF_NAMESPACE_ID::int64 TaskIns::_internal_workload_id() const { - return workload_id_; -} -inline ::PROTOBUF_NAMESPACE_ID::int64 TaskIns::workload_id() const { - // @@protoc_insertion_point(field_get:flwr.proto.TaskIns.workload_id) - return _internal_workload_id(); -} -inline void TaskIns::_internal_set_workload_id(::PROTOBUF_NAMESPACE_ID::int64 value) { - - workload_id_ = value; -} -inline void TaskIns::set_workload_id(::PROTOBUF_NAMESPACE_ID::int64 value) { - _internal_set_workload_id(value); - // @@protoc_insertion_point(field_set:flwr.proto.TaskIns.workload_id) -} - -// .flwr.proto.Task task = 4; -inline bool TaskIns::_internal_has_task() const { - return this != internal_default_instance() && task_ != nullptr; -} -inline bool TaskIns::has_task() const { - return _internal_has_task(); -} -inline void TaskIns::clear_task() { - if (GetArenaForAllocation() == nullptr && task_ != nullptr) { - delete task_; - } - task_ = nullptr; -} -inline const ::flwr::proto::Task& TaskIns::_internal_task() const { - const ::flwr::proto::Task* p = task_; - return p != nullptr ? *p : reinterpret_cast( - ::flwr::proto::_Task_default_instance_); -} -inline const ::flwr::proto::Task& TaskIns::task() const { - // @@protoc_insertion_point(field_get:flwr.proto.TaskIns.task) - return _internal_task(); -} -inline void TaskIns::unsafe_arena_set_allocated_task( - ::flwr::proto::Task* task) { - if (GetArenaForAllocation() == nullptr) { - delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(task_); - } - task_ = task; - if (task) { - - } else { - - } - // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.TaskIns.task) -} -inline ::flwr::proto::Task* TaskIns::release_task() { - - ::flwr::proto::Task* temp = task_; - task_ = nullptr; -#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE - auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); - temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); - if (GetArenaForAllocation() == nullptr) { delete old; } -#else // PROTOBUF_FORCE_COPY_IN_RELEASE - if (GetArenaForAllocation() != nullptr) { - temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); - } -#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE - return temp; -} -inline ::flwr::proto::Task* TaskIns::unsafe_arena_release_task() { - // @@protoc_insertion_point(field_release:flwr.proto.TaskIns.task) - - ::flwr::proto::Task* temp = task_; - task_ = nullptr; - return temp; -} -inline ::flwr::proto::Task* TaskIns::_internal_mutable_task() { - - if (task_ == nullptr) { - auto* p = CreateMaybeMessage<::flwr::proto::Task>(GetArenaForAllocation()); - task_ = p; - } - return task_; -} -inline ::flwr::proto::Task* TaskIns::mutable_task() { - ::flwr::proto::Task* _msg = _internal_mutable_task(); - // @@protoc_insertion_point(field_mutable:flwr.proto.TaskIns.task) - return _msg; -} -inline void TaskIns::set_allocated_task(::flwr::proto::Task* task) { - ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); - if (message_arena == nullptr) { - delete task_; - } - if (task) { - ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = - ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::Task>::GetOwningArena(task); - if (message_arena != submessage_arena) { - task = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( - message_arena, task, submessage_arena); - } - - } else { - - } - task_ = task; - // @@protoc_insertion_point(field_set_allocated:flwr.proto.TaskIns.task) -} - -// ------------------------------------------------------------------- - -// TaskRes - -// string task_id = 1; -inline void TaskRes::clear_task_id() { - task_id_.ClearToEmpty(); -} -inline const std::string& TaskRes::task_id() const { - // @@protoc_insertion_point(field_get:flwr.proto.TaskRes.task_id) - return _internal_task_id(); -} -template -inline PROTOBUF_ALWAYS_INLINE -void TaskRes::set_task_id(ArgT0&& arg0, ArgT... args) { - - task_id_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); - // @@protoc_insertion_point(field_set:flwr.proto.TaskRes.task_id) -} -inline std::string* TaskRes::mutable_task_id() { +inline std::string* TaskIns::mutable_task_id() { std::string* _s = _internal_mutable_task_id(); - // @@protoc_insertion_point(field_mutable:flwr.proto.TaskRes.task_id) + // @@protoc_insertion_point(field_mutable:flwr.proto.TaskIns.task_id) return _s; } -inline const std::string& TaskRes::_internal_task_id() const { +inline const std::string& TaskIns::_internal_task_id() const { return task_id_.Get(); } -inline void TaskRes::_internal_set_task_id(const std::string& value) { +inline void TaskIns::_internal_set_task_id(const std::string& value) { task_id_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); } -inline std::string* TaskRes::_internal_mutable_task_id() { +inline std::string* TaskIns::_internal_mutable_task_id() { return task_id_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); } -inline std::string* TaskRes::release_task_id() { - // @@protoc_insertion_point(field_release:flwr.proto.TaskRes.task_id) +inline std::string* TaskIns::release_task_id() { + // @@protoc_insertion_point(field_release:flwr.proto.TaskIns.task_id) return task_id_.Release(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); } -inline void TaskRes::set_allocated_task_id(std::string* task_id) { +inline void TaskIns::set_allocated_task_id(std::string* task_id) { if (task_id != nullptr) { } else { @@ -3042,45 +1400,45 @@ inline void TaskRes::set_allocated_task_id(std::string* task_id) { } task_id_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), task_id, GetArenaForAllocation()); - // @@protoc_insertion_point(field_set_allocated:flwr.proto.TaskRes.task_id) + // @@protoc_insertion_point(field_set_allocated:flwr.proto.TaskIns.task_id) } // string group_id = 2; -inline void TaskRes::clear_group_id() { +inline void TaskIns::clear_group_id() { group_id_.ClearToEmpty(); } -inline const std::string& TaskRes::group_id() const { - // @@protoc_insertion_point(field_get:flwr.proto.TaskRes.group_id) +inline const std::string& TaskIns::group_id() const { + // @@protoc_insertion_point(field_get:flwr.proto.TaskIns.group_id) return _internal_group_id(); } template inline PROTOBUF_ALWAYS_INLINE -void TaskRes::set_group_id(ArgT0&& arg0, ArgT... args) { +void TaskIns::set_group_id(ArgT0&& arg0, ArgT... args) { group_id_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); - // @@protoc_insertion_point(field_set:flwr.proto.TaskRes.group_id) + // @@protoc_insertion_point(field_set:flwr.proto.TaskIns.group_id) } -inline std::string* TaskRes::mutable_group_id() { +inline std::string* TaskIns::mutable_group_id() { std::string* _s = _internal_mutable_group_id(); - // @@protoc_insertion_point(field_mutable:flwr.proto.TaskRes.group_id) + // @@protoc_insertion_point(field_mutable:flwr.proto.TaskIns.group_id) return _s; } -inline const std::string& TaskRes::_internal_group_id() const { +inline const std::string& TaskIns::_internal_group_id() const { return group_id_.Get(); } -inline void TaskRes::_internal_set_group_id(const std::string& value) { +inline void TaskIns::_internal_set_group_id(const std::string& value) { group_id_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); } -inline std::string* TaskRes::_internal_mutable_group_id() { +inline std::string* TaskIns::_internal_mutable_group_id() { return group_id_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); } -inline std::string* TaskRes::release_group_id() { - // @@protoc_insertion_point(field_release:flwr.proto.TaskRes.group_id) +inline std::string* TaskIns::release_group_id() { + // @@protoc_insertion_point(field_release:flwr.proto.TaskIns.group_id) return group_id_.Release(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); } -inline void TaskRes::set_allocated_group_id(std::string* group_id) { +inline void TaskIns::set_allocated_group_id(std::string* group_id) { if (group_id != nullptr) { } else { @@ -3088,52 +1446,52 @@ inline void TaskRes::set_allocated_group_id(std::string* group_id) { } group_id_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), group_id, GetArenaForAllocation()); - // @@protoc_insertion_point(field_set_allocated:flwr.proto.TaskRes.group_id) + // @@protoc_insertion_point(field_set_allocated:flwr.proto.TaskIns.group_id) } -// sint64 workload_id = 3; -inline void TaskRes::clear_workload_id() { - workload_id_ = int64_t{0}; +// sint64 run_id = 3; +inline void TaskIns::clear_run_id() { + run_id_ = int64_t{0}; } -inline ::PROTOBUF_NAMESPACE_ID::int64 TaskRes::_internal_workload_id() const { - return workload_id_; +inline ::PROTOBUF_NAMESPACE_ID::int64 TaskIns::_internal_run_id() const { + return run_id_; } -inline ::PROTOBUF_NAMESPACE_ID::int64 TaskRes::workload_id() const { - // @@protoc_insertion_point(field_get:flwr.proto.TaskRes.workload_id) - return _internal_workload_id(); +inline ::PROTOBUF_NAMESPACE_ID::int64 TaskIns::run_id() const { + // @@protoc_insertion_point(field_get:flwr.proto.TaskIns.run_id) + return _internal_run_id(); } -inline void TaskRes::_internal_set_workload_id(::PROTOBUF_NAMESPACE_ID::int64 value) { +inline void TaskIns::_internal_set_run_id(::PROTOBUF_NAMESPACE_ID::int64 value) { - workload_id_ = value; + run_id_ = value; } -inline void TaskRes::set_workload_id(::PROTOBUF_NAMESPACE_ID::int64 value) { - _internal_set_workload_id(value); - // @@protoc_insertion_point(field_set:flwr.proto.TaskRes.workload_id) +inline void TaskIns::set_run_id(::PROTOBUF_NAMESPACE_ID::int64 value) { + _internal_set_run_id(value); + // @@protoc_insertion_point(field_set:flwr.proto.TaskIns.run_id) } // .flwr.proto.Task task = 4; -inline bool TaskRes::_internal_has_task() const { +inline bool TaskIns::_internal_has_task() const { return this != internal_default_instance() && task_ != nullptr; } -inline bool TaskRes::has_task() const { +inline bool TaskIns::has_task() const { return _internal_has_task(); } -inline void TaskRes::clear_task() { +inline void TaskIns::clear_task() { if (GetArenaForAllocation() == nullptr && task_ != nullptr) { delete task_; } task_ = nullptr; } -inline const ::flwr::proto::Task& TaskRes::_internal_task() const { +inline const ::flwr::proto::Task& TaskIns::_internal_task() const { const ::flwr::proto::Task* p = task_; return p != nullptr ? *p : reinterpret_cast( ::flwr::proto::_Task_default_instance_); } -inline const ::flwr::proto::Task& TaskRes::task() const { - // @@protoc_insertion_point(field_get:flwr.proto.TaskRes.task) +inline const ::flwr::proto::Task& TaskIns::task() const { + // @@protoc_insertion_point(field_get:flwr.proto.TaskIns.task) return _internal_task(); } -inline void TaskRes::unsafe_arena_set_allocated_task( +inline void TaskIns::unsafe_arena_set_allocated_task( ::flwr::proto::Task* task) { if (GetArenaForAllocation() == nullptr) { delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(task_); @@ -3144,9 +1502,9 @@ inline void TaskRes::unsafe_arena_set_allocated_task( } else { } - // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.TaskRes.task) + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.TaskIns.task) } -inline ::flwr::proto::Task* TaskRes::release_task() { +inline ::flwr::proto::Task* TaskIns::release_task() { ::flwr::proto::Task* temp = task_; task_ = nullptr; @@ -3161,14 +1519,14 @@ inline ::flwr::proto::Task* TaskRes::release_task() { #endif // !PROTOBUF_FORCE_COPY_IN_RELEASE return temp; } -inline ::flwr::proto::Task* TaskRes::unsafe_arena_release_task() { - // @@protoc_insertion_point(field_release:flwr.proto.TaskRes.task) +inline ::flwr::proto::Task* TaskIns::unsafe_arena_release_task() { + // @@protoc_insertion_point(field_release:flwr.proto.TaskIns.task) ::flwr::proto::Task* temp = task_; task_ = nullptr; return temp; } -inline ::flwr::proto::Task* TaskRes::_internal_mutable_task() { +inline ::flwr::proto::Task* TaskIns::_internal_mutable_task() { if (task_ == nullptr) { auto* p = CreateMaybeMessage<::flwr::proto::Task>(GetArenaForAllocation()); @@ -3176,1036 +1534,235 @@ inline ::flwr::proto::Task* TaskRes::_internal_mutable_task() { } return task_; } -inline ::flwr::proto::Task* TaskRes::mutable_task() { +inline ::flwr::proto::Task* TaskIns::mutable_task() { ::flwr::proto::Task* _msg = _internal_mutable_task(); - // @@protoc_insertion_point(field_mutable:flwr.proto.TaskRes.task) + // @@protoc_insertion_point(field_mutable:flwr.proto.TaskIns.task) return _msg; } -inline void TaskRes::set_allocated_task(::flwr::proto::Task* task) { +inline void TaskIns::set_allocated_task(::flwr::proto::Task* task) { ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); - if (message_arena == nullptr) { - delete task_; - } - if (task) { - ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = - ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::Task>::GetOwningArena(task); - if (message_arena != submessage_arena) { - task = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( - message_arena, task, submessage_arena); - } - - } else { - - } - task_ = task; - // @@protoc_insertion_point(field_set_allocated:flwr.proto.TaskRes.task) -} - -// ------------------------------------------------------------------- - -// Value_DoubleList - -// repeated double vals = 1; -inline int Value_DoubleList::_internal_vals_size() const { - return vals_.size(); -} -inline int Value_DoubleList::vals_size() const { - return _internal_vals_size(); -} -inline void Value_DoubleList::clear_vals() { - vals_.Clear(); -} -inline double Value_DoubleList::_internal_vals(int index) const { - return vals_.Get(index); -} -inline double Value_DoubleList::vals(int index) const { - // @@protoc_insertion_point(field_get:flwr.proto.Value.DoubleList.vals) - return _internal_vals(index); -} -inline void Value_DoubleList::set_vals(int index, double value) { - vals_.Set(index, value); - // @@protoc_insertion_point(field_set:flwr.proto.Value.DoubleList.vals) -} -inline void Value_DoubleList::_internal_add_vals(double value) { - vals_.Add(value); -} -inline void Value_DoubleList::add_vals(double value) { - _internal_add_vals(value); - // @@protoc_insertion_point(field_add:flwr.proto.Value.DoubleList.vals) -} -inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >& -Value_DoubleList::_internal_vals() const { - return vals_; -} -inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >& -Value_DoubleList::vals() const { - // @@protoc_insertion_point(field_list:flwr.proto.Value.DoubleList.vals) - return _internal_vals(); -} -inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >* -Value_DoubleList::_internal_mutable_vals() { - return &vals_; -} -inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >* -Value_DoubleList::mutable_vals() { - // @@protoc_insertion_point(field_mutable_list:flwr.proto.Value.DoubleList.vals) - return _internal_mutable_vals(); -} - -// ------------------------------------------------------------------- - -// Value_Sint64List - -// repeated sint64 vals = 1; -inline int Value_Sint64List::_internal_vals_size() const { - return vals_.size(); -} -inline int Value_Sint64List::vals_size() const { - return _internal_vals_size(); -} -inline void Value_Sint64List::clear_vals() { - vals_.Clear(); -} -inline ::PROTOBUF_NAMESPACE_ID::int64 Value_Sint64List::_internal_vals(int index) const { - return vals_.Get(index); -} -inline ::PROTOBUF_NAMESPACE_ID::int64 Value_Sint64List::vals(int index) const { - // @@protoc_insertion_point(field_get:flwr.proto.Value.Sint64List.vals) - return _internal_vals(index); -} -inline void Value_Sint64List::set_vals(int index, ::PROTOBUF_NAMESPACE_ID::int64 value) { - vals_.Set(index, value); - // @@protoc_insertion_point(field_set:flwr.proto.Value.Sint64List.vals) -} -inline void Value_Sint64List::_internal_add_vals(::PROTOBUF_NAMESPACE_ID::int64 value) { - vals_.Add(value); -} -inline void Value_Sint64List::add_vals(::PROTOBUF_NAMESPACE_ID::int64 value) { - _internal_add_vals(value); - // @@protoc_insertion_point(field_add:flwr.proto.Value.Sint64List.vals) -} -inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 >& -Value_Sint64List::_internal_vals() const { - return vals_; -} -inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 >& -Value_Sint64List::vals() const { - // @@protoc_insertion_point(field_list:flwr.proto.Value.Sint64List.vals) - return _internal_vals(); -} -inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 >* -Value_Sint64List::_internal_mutable_vals() { - return &vals_; -} -inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 >* -Value_Sint64List::mutable_vals() { - // @@protoc_insertion_point(field_mutable_list:flwr.proto.Value.Sint64List.vals) - return _internal_mutable_vals(); -} - -// ------------------------------------------------------------------- - -// Value_BoolList - -// repeated bool vals = 1; -inline int Value_BoolList::_internal_vals_size() const { - return vals_.size(); -} -inline int Value_BoolList::vals_size() const { - return _internal_vals_size(); -} -inline void Value_BoolList::clear_vals() { - vals_.Clear(); -} -inline bool Value_BoolList::_internal_vals(int index) const { - return vals_.Get(index); -} -inline bool Value_BoolList::vals(int index) const { - // @@protoc_insertion_point(field_get:flwr.proto.Value.BoolList.vals) - return _internal_vals(index); -} -inline void Value_BoolList::set_vals(int index, bool value) { - vals_.Set(index, value); - // @@protoc_insertion_point(field_set:flwr.proto.Value.BoolList.vals) -} -inline void Value_BoolList::_internal_add_vals(bool value) { - vals_.Add(value); -} -inline void Value_BoolList::add_vals(bool value) { - _internal_add_vals(value); - // @@protoc_insertion_point(field_add:flwr.proto.Value.BoolList.vals) -} -inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool >& -Value_BoolList::_internal_vals() const { - return vals_; -} -inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool >& -Value_BoolList::vals() const { - // @@protoc_insertion_point(field_list:flwr.proto.Value.BoolList.vals) - return _internal_vals(); -} -inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool >* -Value_BoolList::_internal_mutable_vals() { - return &vals_; -} -inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool >* -Value_BoolList::mutable_vals() { - // @@protoc_insertion_point(field_mutable_list:flwr.proto.Value.BoolList.vals) - return _internal_mutable_vals(); -} - -// ------------------------------------------------------------------- - -// Value_StringList - -// repeated string vals = 1; -inline int Value_StringList::_internal_vals_size() const { - return vals_.size(); -} -inline int Value_StringList::vals_size() const { - return _internal_vals_size(); -} -inline void Value_StringList::clear_vals() { - vals_.Clear(); -} -inline std::string* Value_StringList::add_vals() { - std::string* _s = _internal_add_vals(); - // @@protoc_insertion_point(field_add_mutable:flwr.proto.Value.StringList.vals) - return _s; -} -inline const std::string& Value_StringList::_internal_vals(int index) const { - return vals_.Get(index); -} -inline const std::string& Value_StringList::vals(int index) const { - // @@protoc_insertion_point(field_get:flwr.proto.Value.StringList.vals) - return _internal_vals(index); -} -inline std::string* Value_StringList::mutable_vals(int index) { - // @@protoc_insertion_point(field_mutable:flwr.proto.Value.StringList.vals) - return vals_.Mutable(index); -} -inline void Value_StringList::set_vals(int index, const std::string& value) { - vals_.Mutable(index)->assign(value); - // @@protoc_insertion_point(field_set:flwr.proto.Value.StringList.vals) -} -inline void Value_StringList::set_vals(int index, std::string&& value) { - vals_.Mutable(index)->assign(std::move(value)); - // @@protoc_insertion_point(field_set:flwr.proto.Value.StringList.vals) -} -inline void Value_StringList::set_vals(int index, const char* value) { - GOOGLE_DCHECK(value != nullptr); - vals_.Mutable(index)->assign(value); - // @@protoc_insertion_point(field_set_char:flwr.proto.Value.StringList.vals) -} -inline void Value_StringList::set_vals(int index, const char* value, size_t size) { - vals_.Mutable(index)->assign( - reinterpret_cast(value), size); - // @@protoc_insertion_point(field_set_pointer:flwr.proto.Value.StringList.vals) -} -inline std::string* Value_StringList::_internal_add_vals() { - return vals_.Add(); -} -inline void Value_StringList::add_vals(const std::string& value) { - vals_.Add()->assign(value); - // @@protoc_insertion_point(field_add:flwr.proto.Value.StringList.vals) -} -inline void Value_StringList::add_vals(std::string&& value) { - vals_.Add(std::move(value)); - // @@protoc_insertion_point(field_add:flwr.proto.Value.StringList.vals) -} -inline void Value_StringList::add_vals(const char* value) { - GOOGLE_DCHECK(value != nullptr); - vals_.Add()->assign(value); - // @@protoc_insertion_point(field_add_char:flwr.proto.Value.StringList.vals) -} -inline void Value_StringList::add_vals(const char* value, size_t size) { - vals_.Add()->assign(reinterpret_cast(value), size); - // @@protoc_insertion_point(field_add_pointer:flwr.proto.Value.StringList.vals) -} -inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& -Value_StringList::vals() const { - // @@protoc_insertion_point(field_list:flwr.proto.Value.StringList.vals) - return vals_; -} -inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* -Value_StringList::mutable_vals() { - // @@protoc_insertion_point(field_mutable_list:flwr.proto.Value.StringList.vals) - return &vals_; -} - -// ------------------------------------------------------------------- - -// Value_BytesList - -// repeated bytes vals = 1; -inline int Value_BytesList::_internal_vals_size() const { - return vals_.size(); -} -inline int Value_BytesList::vals_size() const { - return _internal_vals_size(); -} -inline void Value_BytesList::clear_vals() { - vals_.Clear(); -} -inline std::string* Value_BytesList::add_vals() { - std::string* _s = _internal_add_vals(); - // @@protoc_insertion_point(field_add_mutable:flwr.proto.Value.BytesList.vals) - return _s; -} -inline const std::string& Value_BytesList::_internal_vals(int index) const { - return vals_.Get(index); -} -inline const std::string& Value_BytesList::vals(int index) const { - // @@protoc_insertion_point(field_get:flwr.proto.Value.BytesList.vals) - return _internal_vals(index); -} -inline std::string* Value_BytesList::mutable_vals(int index) { - // @@protoc_insertion_point(field_mutable:flwr.proto.Value.BytesList.vals) - return vals_.Mutable(index); -} -inline void Value_BytesList::set_vals(int index, const std::string& value) { - vals_.Mutable(index)->assign(value); - // @@protoc_insertion_point(field_set:flwr.proto.Value.BytesList.vals) -} -inline void Value_BytesList::set_vals(int index, std::string&& value) { - vals_.Mutable(index)->assign(std::move(value)); - // @@protoc_insertion_point(field_set:flwr.proto.Value.BytesList.vals) -} -inline void Value_BytesList::set_vals(int index, const char* value) { - GOOGLE_DCHECK(value != nullptr); - vals_.Mutable(index)->assign(value); - // @@protoc_insertion_point(field_set_char:flwr.proto.Value.BytesList.vals) -} -inline void Value_BytesList::set_vals(int index, const void* value, size_t size) { - vals_.Mutable(index)->assign( - reinterpret_cast(value), size); - // @@protoc_insertion_point(field_set_pointer:flwr.proto.Value.BytesList.vals) -} -inline std::string* Value_BytesList::_internal_add_vals() { - return vals_.Add(); -} -inline void Value_BytesList::add_vals(const std::string& value) { - vals_.Add()->assign(value); - // @@protoc_insertion_point(field_add:flwr.proto.Value.BytesList.vals) -} -inline void Value_BytesList::add_vals(std::string&& value) { - vals_.Add(std::move(value)); - // @@protoc_insertion_point(field_add:flwr.proto.Value.BytesList.vals) -} -inline void Value_BytesList::add_vals(const char* value) { - GOOGLE_DCHECK(value != nullptr); - vals_.Add()->assign(value); - // @@protoc_insertion_point(field_add_char:flwr.proto.Value.BytesList.vals) -} -inline void Value_BytesList::add_vals(const void* value, size_t size) { - vals_.Add()->assign(reinterpret_cast(value), size); - // @@protoc_insertion_point(field_add_pointer:flwr.proto.Value.BytesList.vals) -} -inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& -Value_BytesList::vals() const { - // @@protoc_insertion_point(field_list:flwr.proto.Value.BytesList.vals) - return vals_; -} -inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* -Value_BytesList::mutable_vals() { - // @@protoc_insertion_point(field_mutable_list:flwr.proto.Value.BytesList.vals) - return &vals_; -} - -// ------------------------------------------------------------------- - -// Value - -// double double = 1; -inline bool Value::_internal_has_double_() const { - return value_case() == kDouble; -} -inline bool Value::has_double_() const { - return _internal_has_double_(); -} -inline void Value::set_has_double_() { - _oneof_case_[0] = kDouble; -} -inline void Value::clear_double_() { - if (_internal_has_double_()) { - value_.double__ = 0; - clear_has_value(); - } -} -inline double Value::_internal_double_() const { - if (_internal_has_double_()) { - return value_.double__; - } - return 0; -} -inline void Value::_internal_set_double_(double value) { - if (!_internal_has_double_()) { - clear_value(); - set_has_double_(); - } - value_.double__ = value; -} -inline double Value::double_() const { - // @@protoc_insertion_point(field_get:flwr.proto.Value.double) - return _internal_double_(); -} -inline void Value::set_double_(double value) { - _internal_set_double_(value); - // @@protoc_insertion_point(field_set:flwr.proto.Value.double) -} - -// sint64 sint64 = 2; -inline bool Value::_internal_has_sint64() const { - return value_case() == kSint64; -} -inline bool Value::has_sint64() const { - return _internal_has_sint64(); -} -inline void Value::set_has_sint64() { - _oneof_case_[0] = kSint64; -} -inline void Value::clear_sint64() { - if (_internal_has_sint64()) { - value_.sint64_ = int64_t{0}; - clear_has_value(); - } -} -inline ::PROTOBUF_NAMESPACE_ID::int64 Value::_internal_sint64() const { - if (_internal_has_sint64()) { - return value_.sint64_; - } - return int64_t{0}; -} -inline void Value::_internal_set_sint64(::PROTOBUF_NAMESPACE_ID::int64 value) { - if (!_internal_has_sint64()) { - clear_value(); - set_has_sint64(); - } - value_.sint64_ = value; -} -inline ::PROTOBUF_NAMESPACE_ID::int64 Value::sint64() const { - // @@protoc_insertion_point(field_get:flwr.proto.Value.sint64) - return _internal_sint64(); -} -inline void Value::set_sint64(::PROTOBUF_NAMESPACE_ID::int64 value) { - _internal_set_sint64(value); - // @@protoc_insertion_point(field_set:flwr.proto.Value.sint64) -} - -// bool bool = 3; -inline bool Value::_internal_has_bool_() const { - return value_case() == kBool; -} -inline bool Value::has_bool_() const { - return _internal_has_bool_(); -} -inline void Value::set_has_bool_() { - _oneof_case_[0] = kBool; -} -inline void Value::clear_bool_() { - if (_internal_has_bool_()) { - value_.bool__ = false; - clear_has_value(); - } -} -inline bool Value::_internal_bool_() const { - if (_internal_has_bool_()) { - return value_.bool__; - } - return false; -} -inline void Value::_internal_set_bool_(bool value) { - if (!_internal_has_bool_()) { - clear_value(); - set_has_bool_(); - } - value_.bool__ = value; -} -inline bool Value::bool_() const { - // @@protoc_insertion_point(field_get:flwr.proto.Value.bool) - return _internal_bool_(); -} -inline void Value::set_bool_(bool value) { - _internal_set_bool_(value); - // @@protoc_insertion_point(field_set:flwr.proto.Value.bool) -} - -// string string = 4; -inline bool Value::_internal_has_string() const { - return value_case() == kString; -} -inline bool Value::has_string() const { - return _internal_has_string(); -} -inline void Value::set_has_string() { - _oneof_case_[0] = kString; -} -inline void Value::clear_string() { - if (_internal_has_string()) { - value_.string_.Destroy(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); - clear_has_value(); - } -} -inline const std::string& Value::string() const { - // @@protoc_insertion_point(field_get:flwr.proto.Value.string) - return _internal_string(); -} -template -inline void Value::set_string(ArgT0&& arg0, ArgT... args) { - if (!_internal_has_string()) { - clear_value(); - set_has_string(); - value_.string_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); - } - value_.string_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); - // @@protoc_insertion_point(field_set:flwr.proto.Value.string) -} -inline std::string* Value::mutable_string() { - std::string* _s = _internal_mutable_string(); - // @@protoc_insertion_point(field_mutable:flwr.proto.Value.string) - return _s; -} -inline const std::string& Value::_internal_string() const { - if (_internal_has_string()) { - return value_.string_.Get(); - } - return ::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(); -} -inline void Value::_internal_set_string(const std::string& value) { - if (!_internal_has_string()) { - clear_value(); - set_has_string(); - value_.string_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); - } - value_.string_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); -} -inline std::string* Value::_internal_mutable_string() { - if (!_internal_has_string()) { - clear_value(); - set_has_string(); - value_.string_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); - } - return value_.string_.Mutable( - ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); -} -inline std::string* Value::release_string() { - // @@protoc_insertion_point(field_release:flwr.proto.Value.string) - if (_internal_has_string()) { - clear_has_value(); - return value_.string_.ReleaseNonDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); - } else { - return nullptr; - } -} -inline void Value::set_allocated_string(std::string* string) { - if (has_value()) { - clear_value(); + if (message_arena == nullptr) { + delete task_; } - if (string != nullptr) { - set_has_string(); - value_.string_.UnsafeSetDefault(string); - ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaForAllocation(); - if (arena != nullptr) { - arena->Own(string); + if (task) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::Task>::GetOwningArena(task); + if (message_arena != submessage_arena) { + task = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, task, submessage_arena); } + + } else { + } - // @@protoc_insertion_point(field_set_allocated:flwr.proto.Value.string) + task_ = task; + // @@protoc_insertion_point(field_set_allocated:flwr.proto.TaskIns.task) } -// bytes bytes = 5; -inline bool Value::_internal_has_bytes() const { - return value_case() == kBytes; -} -inline bool Value::has_bytes() const { - return _internal_has_bytes(); -} -inline void Value::set_has_bytes() { - _oneof_case_[0] = kBytes; -} -inline void Value::clear_bytes() { - if (_internal_has_bytes()) { - value_.bytes_.Destroy(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); - clear_has_value(); - } +// ------------------------------------------------------------------- + +// TaskRes + +// string task_id = 1; +inline void TaskRes::clear_task_id() { + task_id_.ClearToEmpty(); } -inline const std::string& Value::bytes() const { - // @@protoc_insertion_point(field_get:flwr.proto.Value.bytes) - return _internal_bytes(); +inline const std::string& TaskRes::task_id() const { + // @@protoc_insertion_point(field_get:flwr.proto.TaskRes.task_id) + return _internal_task_id(); } template -inline void Value::set_bytes(ArgT0&& arg0, ArgT... args) { - if (!_internal_has_bytes()) { - clear_value(); - set_has_bytes(); - value_.bytes_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); - } - value_.bytes_.SetBytes(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); - // @@protoc_insertion_point(field_set:flwr.proto.Value.bytes) +inline PROTOBUF_ALWAYS_INLINE +void TaskRes::set_task_id(ArgT0&& arg0, ArgT... args) { + + task_id_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:flwr.proto.TaskRes.task_id) } -inline std::string* Value::mutable_bytes() { - std::string* _s = _internal_mutable_bytes(); - // @@protoc_insertion_point(field_mutable:flwr.proto.Value.bytes) +inline std::string* TaskRes::mutable_task_id() { + std::string* _s = _internal_mutable_task_id(); + // @@protoc_insertion_point(field_mutable:flwr.proto.TaskRes.task_id) return _s; } -inline const std::string& Value::_internal_bytes() const { - if (_internal_has_bytes()) { - return value_.bytes_.Get(); - } - return ::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(); -} -inline void Value::_internal_set_bytes(const std::string& value) { - if (!_internal_has_bytes()) { - clear_value(); - set_has_bytes(); - value_.bytes_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); - } - value_.bytes_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); -} -inline std::string* Value::_internal_mutable_bytes() { - if (!_internal_has_bytes()) { - clear_value(); - set_has_bytes(); - value_.bytes_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); - } - return value_.bytes_.Mutable( - ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); -} -inline std::string* Value::release_bytes() { - // @@protoc_insertion_point(field_release:flwr.proto.Value.bytes) - if (_internal_has_bytes()) { - clear_has_value(); - return value_.bytes_.ReleaseNonDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); - } else { - return nullptr; - } -} -inline void Value::set_allocated_bytes(std::string* bytes) { - if (has_value()) { - clear_value(); - } - if (bytes != nullptr) { - set_has_bytes(); - value_.bytes_.UnsafeSetDefault(bytes); - ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaForAllocation(); - if (arena != nullptr) { - arena->Own(bytes); - } - } - // @@protoc_insertion_point(field_set_allocated:flwr.proto.Value.bytes) -} - -// .flwr.proto.Value.DoubleList double_list = 21; -inline bool Value::_internal_has_double_list() const { - return value_case() == kDoubleList; -} -inline bool Value::has_double_list() const { - return _internal_has_double_list(); +inline const std::string& TaskRes::_internal_task_id() const { + return task_id_.Get(); } -inline void Value::set_has_double_list() { - _oneof_case_[0] = kDoubleList; +inline void TaskRes::_internal_set_task_id(const std::string& value) { + + task_id_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); } -inline void Value::clear_double_list() { - if (_internal_has_double_list()) { - if (GetArenaForAllocation() == nullptr) { - delete value_.double_list_; - } - clear_has_value(); - } +inline std::string* TaskRes::_internal_mutable_task_id() { + + return task_id_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); } -inline ::flwr::proto::Value_DoubleList* Value::release_double_list() { - // @@protoc_insertion_point(field_release:flwr.proto.Value.double_list) - if (_internal_has_double_list()) { - clear_has_value(); - ::flwr::proto::Value_DoubleList* temp = value_.double_list_; - if (GetArenaForAllocation() != nullptr) { - temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); - } - value_.double_list_ = nullptr; - return temp; - } else { - return nullptr; - } +inline std::string* TaskRes::release_task_id() { + // @@protoc_insertion_point(field_release:flwr.proto.TaskRes.task_id) + return task_id_.Release(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); } -inline const ::flwr::proto::Value_DoubleList& Value::_internal_double_list() const { - return _internal_has_double_list() - ? *value_.double_list_ - : reinterpret_cast< ::flwr::proto::Value_DoubleList&>(::flwr::proto::_Value_DoubleList_default_instance_); -} -inline const ::flwr::proto::Value_DoubleList& Value::double_list() const { - // @@protoc_insertion_point(field_get:flwr.proto.Value.double_list) - return _internal_double_list(); -} -inline ::flwr::proto::Value_DoubleList* Value::unsafe_arena_release_double_list() { - // @@protoc_insertion_point(field_unsafe_arena_release:flwr.proto.Value.double_list) - if (_internal_has_double_list()) { - clear_has_value(); - ::flwr::proto::Value_DoubleList* temp = value_.double_list_; - value_.double_list_ = nullptr; - return temp; +inline void TaskRes::set_allocated_task_id(std::string* task_id) { + if (task_id != nullptr) { + } else { - return nullptr; + } + task_id_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), task_id, + GetArenaForAllocation()); + // @@protoc_insertion_point(field_set_allocated:flwr.proto.TaskRes.task_id) } -inline void Value::unsafe_arena_set_allocated_double_list(::flwr::proto::Value_DoubleList* double_list) { - clear_value(); - if (double_list) { - set_has_double_list(); - value_.double_list_ = double_list; - } - // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.Value.double_list) + +// string group_id = 2; +inline void TaskRes::clear_group_id() { + group_id_.ClearToEmpty(); } -inline ::flwr::proto::Value_DoubleList* Value::_internal_mutable_double_list() { - if (!_internal_has_double_list()) { - clear_value(); - set_has_double_list(); - value_.double_list_ = CreateMaybeMessage< ::flwr::proto::Value_DoubleList >(GetArenaForAllocation()); - } - return value_.double_list_; +inline const std::string& TaskRes::group_id() const { + // @@protoc_insertion_point(field_get:flwr.proto.TaskRes.group_id) + return _internal_group_id(); } -inline ::flwr::proto::Value_DoubleList* Value::mutable_double_list() { - ::flwr::proto::Value_DoubleList* _msg = _internal_mutable_double_list(); - // @@protoc_insertion_point(field_mutable:flwr.proto.Value.double_list) - return _msg; +template +inline PROTOBUF_ALWAYS_INLINE +void TaskRes::set_group_id(ArgT0&& arg0, ArgT... args) { + + group_id_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:flwr.proto.TaskRes.group_id) } - -// .flwr.proto.Value.Sint64List sint64_list = 22; -inline bool Value::_internal_has_sint64_list() const { - return value_case() == kSint64List; +inline std::string* TaskRes::mutable_group_id() { + std::string* _s = _internal_mutable_group_id(); + // @@protoc_insertion_point(field_mutable:flwr.proto.TaskRes.group_id) + return _s; } -inline bool Value::has_sint64_list() const { - return _internal_has_sint64_list(); +inline const std::string& TaskRes::_internal_group_id() const { + return group_id_.Get(); } -inline void Value::set_has_sint64_list() { - _oneof_case_[0] = kSint64List; +inline void TaskRes::_internal_set_group_id(const std::string& value) { + + group_id_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); } -inline void Value::clear_sint64_list() { - if (_internal_has_sint64_list()) { - if (GetArenaForAllocation() == nullptr) { - delete value_.sint64_list_; - } - clear_has_value(); - } +inline std::string* TaskRes::_internal_mutable_group_id() { + + return group_id_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); } -inline ::flwr::proto::Value_Sint64List* Value::release_sint64_list() { - // @@protoc_insertion_point(field_release:flwr.proto.Value.sint64_list) - if (_internal_has_sint64_list()) { - clear_has_value(); - ::flwr::proto::Value_Sint64List* temp = value_.sint64_list_; - if (GetArenaForAllocation() != nullptr) { - temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); - } - value_.sint64_list_ = nullptr; - return temp; - } else { - return nullptr; - } +inline std::string* TaskRes::release_group_id() { + // @@protoc_insertion_point(field_release:flwr.proto.TaskRes.group_id) + return group_id_.Release(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); } -inline const ::flwr::proto::Value_Sint64List& Value::_internal_sint64_list() const { - return _internal_has_sint64_list() - ? *value_.sint64_list_ - : reinterpret_cast< ::flwr::proto::Value_Sint64List&>(::flwr::proto::_Value_Sint64List_default_instance_); -} -inline const ::flwr::proto::Value_Sint64List& Value::sint64_list() const { - // @@protoc_insertion_point(field_get:flwr.proto.Value.sint64_list) - return _internal_sint64_list(); -} -inline ::flwr::proto::Value_Sint64List* Value::unsafe_arena_release_sint64_list() { - // @@protoc_insertion_point(field_unsafe_arena_release:flwr.proto.Value.sint64_list) - if (_internal_has_sint64_list()) { - clear_has_value(); - ::flwr::proto::Value_Sint64List* temp = value_.sint64_list_; - value_.sint64_list_ = nullptr; - return temp; +inline void TaskRes::set_allocated_group_id(std::string* group_id) { + if (group_id != nullptr) { + } else { - return nullptr; - } -} -inline void Value::unsafe_arena_set_allocated_sint64_list(::flwr::proto::Value_Sint64List* sint64_list) { - clear_value(); - if (sint64_list) { - set_has_sint64_list(); - value_.sint64_list_ = sint64_list; - } - // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.Value.sint64_list) -} -inline ::flwr::proto::Value_Sint64List* Value::_internal_mutable_sint64_list() { - if (!_internal_has_sint64_list()) { - clear_value(); - set_has_sint64_list(); - value_.sint64_list_ = CreateMaybeMessage< ::flwr::proto::Value_Sint64List >(GetArenaForAllocation()); + } - return value_.sint64_list_; -} -inline ::flwr::proto::Value_Sint64List* Value::mutable_sint64_list() { - ::flwr::proto::Value_Sint64List* _msg = _internal_mutable_sint64_list(); - // @@protoc_insertion_point(field_mutable:flwr.proto.Value.sint64_list) - return _msg; + group_id_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), group_id, + GetArenaForAllocation()); + // @@protoc_insertion_point(field_set_allocated:flwr.proto.TaskRes.group_id) } -// .flwr.proto.Value.BoolList bool_list = 23; -inline bool Value::_internal_has_bool_list() const { - return value_case() == kBoolList; +// sint64 run_id = 3; +inline void TaskRes::clear_run_id() { + run_id_ = int64_t{0}; } -inline bool Value::has_bool_list() const { - return _internal_has_bool_list(); +inline ::PROTOBUF_NAMESPACE_ID::int64 TaskRes::_internal_run_id() const { + return run_id_; } -inline void Value::set_has_bool_list() { - _oneof_case_[0] = kBoolList; +inline ::PROTOBUF_NAMESPACE_ID::int64 TaskRes::run_id() const { + // @@protoc_insertion_point(field_get:flwr.proto.TaskRes.run_id) + return _internal_run_id(); } -inline void Value::clear_bool_list() { - if (_internal_has_bool_list()) { - if (GetArenaForAllocation() == nullptr) { - delete value_.bool_list_; - } - clear_has_value(); - } +inline void TaskRes::_internal_set_run_id(::PROTOBUF_NAMESPACE_ID::int64 value) { + + run_id_ = value; } -inline ::flwr::proto::Value_BoolList* Value::release_bool_list() { - // @@protoc_insertion_point(field_release:flwr.proto.Value.bool_list) - if (_internal_has_bool_list()) { - clear_has_value(); - ::flwr::proto::Value_BoolList* temp = value_.bool_list_; - if (GetArenaForAllocation() != nullptr) { - temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); - } - value_.bool_list_ = nullptr; - return temp; - } else { - return nullptr; - } +inline void TaskRes::set_run_id(::PROTOBUF_NAMESPACE_ID::int64 value) { + _internal_set_run_id(value); + // @@protoc_insertion_point(field_set:flwr.proto.TaskRes.run_id) } -inline const ::flwr::proto::Value_BoolList& Value::_internal_bool_list() const { - return _internal_has_bool_list() - ? *value_.bool_list_ - : reinterpret_cast< ::flwr::proto::Value_BoolList&>(::flwr::proto::_Value_BoolList_default_instance_); -} -inline const ::flwr::proto::Value_BoolList& Value::bool_list() const { - // @@protoc_insertion_point(field_get:flwr.proto.Value.bool_list) - return _internal_bool_list(); -} -inline ::flwr::proto::Value_BoolList* Value::unsafe_arena_release_bool_list() { - // @@protoc_insertion_point(field_unsafe_arena_release:flwr.proto.Value.bool_list) - if (_internal_has_bool_list()) { - clear_has_value(); - ::flwr::proto::Value_BoolList* temp = value_.bool_list_; - value_.bool_list_ = nullptr; - return temp; - } else { - return nullptr; - } + +// .flwr.proto.Task task = 4; +inline bool TaskRes::_internal_has_task() const { + return this != internal_default_instance() && task_ != nullptr; } -inline void Value::unsafe_arena_set_allocated_bool_list(::flwr::proto::Value_BoolList* bool_list) { - clear_value(); - if (bool_list) { - set_has_bool_list(); - value_.bool_list_ = bool_list; - } - // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.Value.bool_list) +inline bool TaskRes::has_task() const { + return _internal_has_task(); } -inline ::flwr::proto::Value_BoolList* Value::_internal_mutable_bool_list() { - if (!_internal_has_bool_list()) { - clear_value(); - set_has_bool_list(); - value_.bool_list_ = CreateMaybeMessage< ::flwr::proto::Value_BoolList >(GetArenaForAllocation()); +inline void TaskRes::clear_task() { + if (GetArenaForAllocation() == nullptr && task_ != nullptr) { + delete task_; } - return value_.bool_list_; -} -inline ::flwr::proto::Value_BoolList* Value::mutable_bool_list() { - ::flwr::proto::Value_BoolList* _msg = _internal_mutable_bool_list(); - // @@protoc_insertion_point(field_mutable:flwr.proto.Value.bool_list) - return _msg; -} - -// .flwr.proto.Value.StringList string_list = 24; -inline bool Value::_internal_has_string_list() const { - return value_case() == kStringList; + task_ = nullptr; } -inline bool Value::has_string_list() const { - return _internal_has_string_list(); +inline const ::flwr::proto::Task& TaskRes::_internal_task() const { + const ::flwr::proto::Task* p = task_; + return p != nullptr ? *p : reinterpret_cast( + ::flwr::proto::_Task_default_instance_); } -inline void Value::set_has_string_list() { - _oneof_case_[0] = kStringList; +inline const ::flwr::proto::Task& TaskRes::task() const { + // @@protoc_insertion_point(field_get:flwr.proto.TaskRes.task) + return _internal_task(); } -inline void Value::clear_string_list() { - if (_internal_has_string_list()) { - if (GetArenaForAllocation() == nullptr) { - delete value_.string_list_; - } - clear_has_value(); +inline void TaskRes::unsafe_arena_set_allocated_task( + ::flwr::proto::Task* task) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(task_); } -} -inline ::flwr::proto::Value_StringList* Value::release_string_list() { - // @@protoc_insertion_point(field_release:flwr.proto.Value.string_list) - if (_internal_has_string_list()) { - clear_has_value(); - ::flwr::proto::Value_StringList* temp = value_.string_list_; - if (GetArenaForAllocation() != nullptr) { - temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); - } - value_.string_list_ = nullptr; - return temp; + task_ = task; + if (task) { + } else { - return nullptr; + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.TaskRes.task) } -inline const ::flwr::proto::Value_StringList& Value::_internal_string_list() const { - return _internal_has_string_list() - ? *value_.string_list_ - : reinterpret_cast< ::flwr::proto::Value_StringList&>(::flwr::proto::_Value_StringList_default_instance_); -} -inline const ::flwr::proto::Value_StringList& Value::string_list() const { - // @@protoc_insertion_point(field_get:flwr.proto.Value.string_list) - return _internal_string_list(); -} -inline ::flwr::proto::Value_StringList* Value::unsafe_arena_release_string_list() { - // @@protoc_insertion_point(field_unsafe_arena_release:flwr.proto.Value.string_list) - if (_internal_has_string_list()) { - clear_has_value(); - ::flwr::proto::Value_StringList* temp = value_.string_list_; - value_.string_list_ = nullptr; - return temp; - } else { - return nullptr; +inline ::flwr::proto::Task* TaskRes::release_task() { + + ::flwr::proto::Task* temp = task_; + task_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; } -inline void Value::unsafe_arena_set_allocated_string_list(::flwr::proto::Value_StringList* string_list) { - clear_value(); - if (string_list) { - set_has_string_list(); - value_.string_list_ = string_list; - } - // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.Value.string_list) +inline ::flwr::proto::Task* TaskRes::unsafe_arena_release_task() { + // @@protoc_insertion_point(field_release:flwr.proto.TaskRes.task) + + ::flwr::proto::Task* temp = task_; + task_ = nullptr; + return temp; } -inline ::flwr::proto::Value_StringList* Value::_internal_mutable_string_list() { - if (!_internal_has_string_list()) { - clear_value(); - set_has_string_list(); - value_.string_list_ = CreateMaybeMessage< ::flwr::proto::Value_StringList >(GetArenaForAllocation()); +inline ::flwr::proto::Task* TaskRes::_internal_mutable_task() { + + if (task_ == nullptr) { + auto* p = CreateMaybeMessage<::flwr::proto::Task>(GetArenaForAllocation()); + task_ = p; } - return value_.string_list_; + return task_; } -inline ::flwr::proto::Value_StringList* Value::mutable_string_list() { - ::flwr::proto::Value_StringList* _msg = _internal_mutable_string_list(); - // @@protoc_insertion_point(field_mutable:flwr.proto.Value.string_list) +inline ::flwr::proto::Task* TaskRes::mutable_task() { + ::flwr::proto::Task* _msg = _internal_mutable_task(); + // @@protoc_insertion_point(field_mutable:flwr.proto.TaskRes.task) return _msg; } - -// .flwr.proto.Value.BytesList bytes_list = 25; -inline bool Value::_internal_has_bytes_list() const { - return value_case() == kBytesList; -} -inline bool Value::has_bytes_list() const { - return _internal_has_bytes_list(); -} -inline void Value::set_has_bytes_list() { - _oneof_case_[0] = kBytesList; -} -inline void Value::clear_bytes_list() { - if (_internal_has_bytes_list()) { - if (GetArenaForAllocation() == nullptr) { - delete value_.bytes_list_; - } - clear_has_value(); +inline void TaskRes::set_allocated_task(::flwr::proto::Task* task) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete task_; } -} -inline ::flwr::proto::Value_BytesList* Value::release_bytes_list() { - // @@protoc_insertion_point(field_release:flwr.proto.Value.bytes_list) - if (_internal_has_bytes_list()) { - clear_has_value(); - ::flwr::proto::Value_BytesList* temp = value_.bytes_list_; - if (GetArenaForAllocation() != nullptr) { - temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (task) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::Task>::GetOwningArena(task); + if (message_arena != submessage_arena) { + task = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, task, submessage_arena); } - value_.bytes_list_ = nullptr; - return temp; - } else { - return nullptr; - } -} -inline const ::flwr::proto::Value_BytesList& Value::_internal_bytes_list() const { - return _internal_has_bytes_list() - ? *value_.bytes_list_ - : reinterpret_cast< ::flwr::proto::Value_BytesList&>(::flwr::proto::_Value_BytesList_default_instance_); -} -inline const ::flwr::proto::Value_BytesList& Value::bytes_list() const { - // @@protoc_insertion_point(field_get:flwr.proto.Value.bytes_list) - return _internal_bytes_list(); -} -inline ::flwr::proto::Value_BytesList* Value::unsafe_arena_release_bytes_list() { - // @@protoc_insertion_point(field_unsafe_arena_release:flwr.proto.Value.bytes_list) - if (_internal_has_bytes_list()) { - clear_has_value(); - ::flwr::proto::Value_BytesList* temp = value_.bytes_list_; - value_.bytes_list_ = nullptr; - return temp; + } else { - return nullptr; - } -} -inline void Value::unsafe_arena_set_allocated_bytes_list(::flwr::proto::Value_BytesList* bytes_list) { - clear_value(); - if (bytes_list) { - set_has_bytes_list(); - value_.bytes_list_ = bytes_list; - } - // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.Value.bytes_list) -} -inline ::flwr::proto::Value_BytesList* Value::_internal_mutable_bytes_list() { - if (!_internal_has_bytes_list()) { - clear_value(); - set_has_bytes_list(); - value_.bytes_list_ = CreateMaybeMessage< ::flwr::proto::Value_BytesList >(GetArenaForAllocation()); + } - return value_.bytes_list_; -} -inline ::flwr::proto::Value_BytesList* Value::mutable_bytes_list() { - ::flwr::proto::Value_BytesList* _msg = _internal_mutable_bytes_list(); - // @@protoc_insertion_point(field_mutable:flwr.proto.Value.bytes_list) - return _msg; -} - -inline bool Value::has_value() const { - return value_case() != VALUE_NOT_SET; -} -inline void Value::clear_has_value() { - _oneof_case_[0] = VALUE_NOT_SET; -} -inline Value::ValueCase Value::value_case() const { - return Value::ValueCase(_oneof_case_[0]); -} -// ------------------------------------------------------------------- - -// ------------------------------------------------------------------- - -// SecureAggregation - -// map named_values = 1; -inline int SecureAggregation::_internal_named_values_size() const { - return named_values_.size(); -} -inline int SecureAggregation::named_values_size() const { - return _internal_named_values_size(); -} -inline void SecureAggregation::clear_named_values() { - named_values_.Clear(); -} -inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::Value >& -SecureAggregation::_internal_named_values() const { - return named_values_.GetMap(); -} -inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::Value >& -SecureAggregation::named_values() const { - // @@protoc_insertion_point(field_map:flwr.proto.SecureAggregation.named_values) - return _internal_named_values(); -} -inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::Value >* -SecureAggregation::_internal_mutable_named_values() { - return named_values_.MutableMap(); -} -inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::Value >* -SecureAggregation::mutable_named_values() { - // @@protoc_insertion_point(field_mutable_map:flwr.proto.SecureAggregation.named_values) - return _internal_mutable_named_values(); + task_ = task; + // @@protoc_insertion_point(field_set_allocated:flwr.proto.TaskRes.task) } #ifdef __GNUC__ @@ -4215,22 +1772,6 @@ SecureAggregation::mutable_named_values() { // ------------------------------------------------------------------- -// ------------------------------------------------------------------- - -// ------------------------------------------------------------------- - -// ------------------------------------------------------------------- - -// ------------------------------------------------------------------- - -// ------------------------------------------------------------------- - -// ------------------------------------------------------------------- - -// ------------------------------------------------------------------- - -// ------------------------------------------------------------------- - // @@protoc_insertion_point(namespace_scope) diff --git a/src/cc/flwr/include/grpc_rere.h b/src/cc/flwr/include/grpc_rere.h index 2a04b923f200..4f5a1895cbac 100644 --- a/src/cc/flwr/include/grpc_rere.h +++ b/src/cc/flwr/include/grpc_rere.h @@ -15,15 +15,29 @@ #ifndef GRPC_RERE_H #define GRPC_RERE_H #pragma once +#include "communicator.h" +#include "flwr/proto/fleet.grpc.pb.h" #include "message_handler.h" -#include "task_handler.h" #include -void create_node(const std::unique_ptr &stub); -void delete_node(const std::unique_ptr &stub); -void send(const std::unique_ptr &stub, - flwr::proto::TaskRes task_res); -std::optional -receive(const std::unique_ptr &stub); +class gRPCRereCommunicator : public Communicator { +public: + gRPCRereCommunicator(std::string server_address, int grpc_max_message_length); + + bool send_create_node(flwr::proto::CreateNodeRequest request, + flwr::proto::CreateNodeResponse *response); + + bool send_delete_node(flwr::proto::DeleteNodeRequest request, + flwr::proto::DeleteNodeResponse *response); + + bool send_pull_task_ins(flwr::proto::PullTaskInsRequest request, + flwr::proto::PullTaskInsResponse *response); + + bool send_push_task_res(flwr::proto::PushTaskResRequest request, + flwr::proto::PushTaskResResponse *response); + +private: + std::unique_ptr stub; +}; #endif diff --git a/src/cc/flwr/include/message_handler.h b/src/cc/flwr/include/message_handler.h index eb6df5cc1f9d..0c45ea485359 100644 --- a/src/cc/flwr/include/message_handler.h +++ b/src/cc/flwr/include/message_handler.h @@ -16,24 +16,8 @@ #include "client.h" #include "serde.h" -std::tuple -_reconnect(flwr::proto::ServerMessage_ReconnectIns reconnect_msg); - -flwr::proto::ClientMessage _get_parameters(flwr_local::Client *client); - -flwr::proto::ClientMessage _fit(flwr_local::Client *client, - flwr::proto::ServerMessage_FitIns fit_msg); - -flwr::proto::ClientMessage -_evaluate(flwr_local::Client *client, - flwr::proto::ServerMessage_EvaluateIns evaluate_msg); - std::tuple handle(flwr_local::Client *client, flwr::proto::ServerMessage server_msg); std::tuple handle_task(flwr_local::Client *client, const flwr::proto::TaskIns &task_ins); - -flwr::proto::TaskRes configure_task_res(const flwr::proto::TaskRes &task_res, - const flwr::proto::TaskIns &task_ins, - const flwr::proto::Node &node); diff --git a/src/cc/flwr/include/serde.h b/src/cc/flwr/include/serde.h index 8b9d809d7c8a..384f2b05c011 100644 --- a/src/cc/flwr/include/serde.h +++ b/src/cc/flwr/include/serde.h @@ -13,9 +13,7 @@ * ********************************************************************************************************/ #pragma once -#include "flwr/proto/fleet.grpc.pb.h" #include "flwr/proto/fleet.pb.h" -#include "flwr/proto/transport.grpc.pb.h" #include "flwr/proto/transport.pb.h" #include "typing.h" @@ -80,3 +78,24 @@ evaluate_ins_from_proto(flwr::proto::ServerMessage_EvaluateIns msg); */ flwr::proto::ClientMessage_EvaluateRes evaluate_res_to_proto(flwr_local::EvaluateRes res); + +flwr_local::RecordSet +recordset_from_proto(const flwr::proto::RecordSet &recordset); + +flwr_local::FitIns recordset_to_fit_ins(const flwr_local::RecordSet &recordset, + bool keep_input); + +flwr_local::EvaluateIns +recordset_to_evaluate_ins(const flwr_local::RecordSet &recordset, + bool keep_input); + +flwr_local::RecordSet +recordset_from_evaluate_res(const flwr_local::EvaluateRes &evaluate_res); + +flwr_local::RecordSet recordset_from_fit_res(const flwr_local::FitRes &fit_res); + +flwr_local::RecordSet recordset_from_get_parameters_res( + const flwr_local::ParametersRes ¶meters_res); + +flwr::proto::RecordSet +recordset_to_proto(const flwr_local::RecordSet &recordset); diff --git a/src/cc/flwr/include/start.h b/src/cc/flwr/include/start.h index 2c233be8249c..1a9033278df9 100644 --- a/src/cc/flwr/include/start.h +++ b/src/cc/flwr/include/start.h @@ -17,6 +17,8 @@ #define START_H #pragma once #include "client.h" +#include "communicator.h" +#include "flwr/proto/transport.grpc.pb.h" #include "grpc_rere.h" #include "message_handler.h" #include @@ -51,8 +53,5 @@ class start { static void start_client(std::string server_address, flwr_local::Client *client, int grpc_max_message_length = GRPC_MAX_MESSAGE_LENGTH); - static void - start_rere_client(std::string server_address, flwr_local::Client *client, - int grpc_max_message_length = GRPC_MAX_MESSAGE_LENGTH); }; #endif diff --git a/src/cc/flwr/include/task_handler.h b/src/cc/flwr/include/task_handler.h deleted file mode 100644 index 77fe5fef4d98..000000000000 --- a/src/cc/flwr/include/task_handler.h +++ /dev/null @@ -1,24 +0,0 @@ -/************************************************************************************************* - * - * @file task_handler.h - * - * @brief Handle incoming or outgoing tasks - * - * @author The Flower Authors - * - * @version 1.0 - * - * @date 06/11/2023 - * - *************************************************************************************************/ - -#pragma once -#include "client.h" -#include "serde.h" - -bool validate_task_ins(const flwr::proto::TaskIns &task_ins, - const bool discard_reconnect_ins); -bool validate_task_res(const flwr::proto::TaskRes &task_res); -flwr::proto::TaskRes configure_task_res(const flwr::proto::TaskRes &task_res, - const flwr::proto::TaskIns &task_ins, - const flwr::proto::Node &node); diff --git a/src/cc/flwr/include/typing.h b/src/cc/flwr/include/typing.h index 5aee90b6c215..39b78dc89ede 100644 --- a/src/cc/flwr/include/typing.h +++ b/src/cc/flwr/include/typing.h @@ -17,6 +17,8 @@ #include #include #include +#include +#include namespace flwr_local { /** @@ -66,8 +68,8 @@ class Parameters { : tensors(tensors), tensor_type(tensor_type) {} // Getters - std::list getTensors() { return tensors; } - std::string getTensor_type() { return tensor_type; } + const std::list getTensors() const { return tensors; } + const std::string getTensor_type() const { return tensor_type; } // Setters void setTensors(const std::list &tensors) { @@ -90,7 +92,7 @@ class ParametersRes { explicit ParametersRes(const Parameters ¶meters) : parameters(parameters) {} - Parameters getParameters() { return parameters; } + const Parameters getParameters() const { return parameters; } void setParameters(const Parameters &p) { parameters = p; } private: @@ -129,35 +131,26 @@ class FitRes { FitRes() {} FitRes(const Parameters ¶meters, int num_examples, int num_examples_ceil, float fit_duration, const Metrics &metrics) - : parameters(parameters), num_examples(num_examples), - fit_duration(fit_duration), metrics(metrics) {} + : _parameters(parameters), _num_examples(num_examples), + _fit_duration(fit_duration), _metrics(metrics) {} // Getters - Parameters getParameters() { return parameters; } - int getNum_example() { return num_examples; } - /*std::optional getNum_examples_ceil() - { - return num_examples_ceil; - }*/ - std::optional getFit_duration() { return fit_duration; } - std::optional getMetrics() { return metrics; } + const Parameters getParameters() const { return _parameters; } + const int getNum_example() const { return _num_examples; } + const std::optional getFit_duration() const { return _fit_duration; } + const std::optional getMetrics() const { return _metrics; } // Setters - void setParameters(const Parameters &p) { parameters = p; } - void setNum_example(int n) { num_examples = n; } - /*void setNum_examples_ceil(int n) - { - num_examples_ceil = n; - }*/ - void setFit_duration(float f) { fit_duration = f; } - void setMetrics(const flwr_local::Metrics &m) { metrics = m; } + void setParameters(const Parameters &p) { _parameters = p; } + void setNum_example(int n) { _num_examples = n; } + void setFit_duration(float f) { _fit_duration = f; } + void setMetrics(const flwr_local::Metrics &m) { _metrics = m; } private: - Parameters parameters; - int num_examples; - // std::optional num_examples_ceil = std::nullopt; - std::optional fit_duration = std::nullopt; - std::optional metrics = std::nullopt; + Parameters _parameters; + int _num_examples; + std::optional _fit_duration = std::nullopt; + std::optional _metrics = std::nullopt; }; /** @@ -195,9 +188,9 @@ class EvaluateRes { : loss(loss), num_examples(num_examples), metrics(metrics) {} // Getters - float getLoss() { return loss; } - int getNum_example() { return num_examples; } - std::optional getMetrics() { return metrics; } + const float getLoss() const { return loss; } + const int getNum_example() const { return num_examples; } + const std::optional getMetrics() const { return metrics; } // Setters void setLoss(float f) { loss = f; } @@ -239,4 +232,62 @@ class PropertiesRes { Properties properties; }; +struct Array { + std::string dtype; + std::vector shape; + std::string stype; + std::string data; // use string to represent bytes +}; + +using ParametersRecord = std::map; +using MetricsRecord = + std::map, std::vector>>; + +using ConfigsRecord = + std::map, + std::vector, std::vector, + std::vector>>; + +class RecordSet { +public: + RecordSet( + const std::map ¶metersRecords = {}, + const std::map &metricsRecords = {}, + const std::map &configsRecords = {}) + : _parametersRecords(parametersRecords), _metricsRecords(metricsRecords), + _configsRecords(configsRecords) {} + + const std::map &getParametersRecords() const { + return _parametersRecords; + } + const std::map &getMetricsRecords() const { + return _metricsRecords; + } + const std::map &getConfigsRecords() const { + return _configsRecords; + } + + void setParametersRecords( + const std::map ¶metersRecords) { + _parametersRecords = parametersRecords; + } + + void setMetricsRecords( + const std::map &metricsRecords) { + _metricsRecords = metricsRecords; + } + + void setConfigsRecords( + const std::map &configsRecords) { + _configsRecords = configsRecords; + } + +private: + std::map _parametersRecords; + std::map _metricsRecords; + std::map _configsRecords; +}; + } // namespace flwr_local diff --git a/src/cc/flwr/src/communicator.cc b/src/cc/flwr/src/communicator.cc new file mode 100644 index 000000000000..bcbea9de60ef --- /dev/null +++ b/src/cc/flwr/src/communicator.cc @@ -0,0 +1,187 @@ +#include "communicator.h" + +const std::string KEY_NODE = "node"; +const std::string KEY_TASK_INS = "current_task_ins"; + +std::map> node_store; +std::map> state; + +std::mutex node_store_mutex; +std::mutex state_mutex; + +std::optional get_node_from_store() { + std::lock_guard lock(node_store_mutex); + auto node = node_store.find(KEY_NODE); + if (node == node_store.end() || !node->second.has_value()) { + std::cerr << "Node instance missing" << std::endl; + return std::nullopt; + } + return node->second; +} + +bool validate_task_ins(const flwr::proto::TaskIns &task_ins, + const bool discard_reconnect_ins) { + return task_ins.has_task() && task_ins.task().has_recordset(); +} + +bool validate_task_res(const flwr::proto::TaskRes &task_res) { + // Retrieve initialized fields in TaskRes + return true; +} + +flwr::proto::TaskRes +configure_task_res(const flwr::proto::TaskRes &task_res, + const flwr::proto::TaskIns &ref_task_ins, + const flwr::proto::Node &producer) { + flwr::proto::TaskRes result_task_res; + + // Setting scalar fields + result_task_res.set_task_id(""); + result_task_res.set_group_id(ref_task_ins.group_id()); + result_task_res.set_run_id(ref_task_ins.run_id()); + + // Merge the task from the input task_res + *result_task_res.mutable_task() = task_res.task(); + + // Construct and set the producer and consumer for the task + std::unique_ptr new_producer = + std::make_unique(producer); + result_task_res.mutable_task()->set_allocated_producer( + new_producer.release()); + + std::unique_ptr new_consumer = + std::make_unique(ref_task_ins.task().producer()); + result_task_res.mutable_task()->set_allocated_consumer( + new_consumer.release()); + + // Set ancestry in the task + result_task_res.mutable_task()->add_ancestry(ref_task_ins.task_id()); + + return result_task_res; +} + +void delete_node_from_store() { + std::lock_guard lock(node_store_mutex); + auto node = node_store.find(KEY_NODE); + if (node == node_store.end() || !node->second.has_value()) { + node_store.erase(node); + } +} + +std::optional get_current_task_ins() { + std::lock_guard state_lock(state_mutex); + auto current_task_ins = state.find(KEY_TASK_INS); + if (current_task_ins == state.end() || + !current_task_ins->second.has_value()) { + std::cerr << "No current TaskIns" << std::endl; + return std::nullopt; + } + return current_task_ins->second; +} + +void create_node(Communicator *communicator) { + flwr::proto::CreateNodeRequest create_node_request; + flwr::proto::CreateNodeResponse create_node_response; + + create_node_request.set_ping_interval(300.0); + + communicator->send_create_node(create_node_request, &create_node_response); + + // Validate the response + if (!create_node_response.has_node()) { + std::cerr << "Received response does not contain a node." << std::endl; + return; + } + + { + std::lock_guard lock(node_store_mutex); + node_store[KEY_NODE] = create_node_response.node(); + } +} + +void delete_node(Communicator *communicator) { + auto node = get_node_from_store(); + if (!node) { + return; + } + flwr::proto::DeleteNodeRequest delete_node_request; + flwr::proto::DeleteNodeResponse delete_node_response; + + auto heap_node = new flwr::proto::Node(*node); + delete_node_request.set_allocated_node(heap_node); + + if (!communicator->send_delete_node(delete_node_request, + &delete_node_response)) { + delete heap_node; // Make sure to delete if status is not ok + return; + } else { + delete_node_request.release_node(); // Release if status is ok + } + + delete_node_from_store(); +} + +std::optional receive(Communicator *communicator) { + auto node = get_node_from_store(); + if (!node) { + return std::nullopt; + } + flwr::proto::PullTaskInsResponse response; + flwr::proto::PullTaskInsRequest request; + + request.set_allocated_node(new flwr::proto::Node(*node)); + + bool success = communicator->send_pull_task_ins(request, &response); + + // Release ownership so that the heap_node won't be deleted when request + // goes out of scope. + request.release_node(); + + if (!success) { + return std::nullopt; + } + + if (response.task_ins_list_size() > 0) { + flwr::proto::TaskIns task_ins = response.task_ins_list().at(0); + if (validate_task_ins(task_ins, true)) { + std::lock_guard state_lock(state_mutex); + state[KEY_TASK_INS] = task_ins; + return task_ins; + } + } + std::cerr << "TaskIns list is empty." << std::endl; + return std::nullopt; +} + +void send(Communicator *communicator, flwr::proto::TaskRes task_res) { + auto node = get_node_from_store(); + if (!node) { + return; + } + + auto task_ins = get_current_task_ins(); + if (!task_ins) { + return; + } + + if (!validate_task_res(task_res)) { + std::cerr << "TaskRes is invalid" << std::endl; + std::lock_guard state_lock(state_mutex); + state[KEY_TASK_INS].reset(); + return; + } + + flwr::proto::TaskRes new_task_res = + configure_task_res(task_res, *task_ins, *node); + + flwr::proto::PushTaskResRequest request; + *request.add_task_res_list() = new_task_res; + flwr::proto::PushTaskResResponse response; + + communicator->send_push_task_res(request, &response); + + { + std::lock_guard state_lock(state_mutex); + state[KEY_TASK_INS].reset(); + } +} diff --git a/src/cc/flwr/src/grpc_rere.cc b/src/cc/flwr/src/grpc_rere.cc index 267874a7a0e2..b8a04d9b9bf7 100644 --- a/src/cc/flwr/src/grpc_rere.cc +++ b/src/cc/flwr/src/grpc_rere.cc @@ -1,167 +1,75 @@ #include "grpc_rere.h" +#include "flwr/proto/fleet.grpc.pb.h" -const std::string KEY_NODE = "node"; -const std::string KEY_TASK_INS = "current_task_ins"; +gRPCRereCommunicator::gRPCRereCommunicator(std::string server_address, + int grpc_max_message_length) { + grpc::ChannelArguments args; + args.SetMaxReceiveMessageSize(grpc_max_message_length); + args.SetMaxSendMessageSize(grpc_max_message_length); -std::map> node_store; -std::map> state; + // Establish an insecure gRPC connection to a gRPC server + std::shared_ptr channel = grpc::CreateCustomChannel( + server_address, grpc::InsecureChannelCredentials(), args); -std::mutex node_store_mutex; -std::mutex state_mutex; - -std::optional get_node_from_store() { - std::lock_guard lock(node_store_mutex); - auto node = node_store.find(KEY_NODE); - if (node == node_store.end() || !node->second.has_value()) { - std::cerr << "Node instance missing" << std::endl; - return std::nullopt; - } - return node->second; -} - -void delete_node_from_store() { - std::lock_guard lock(node_store_mutex); - auto node = node_store.find(KEY_NODE); - if (node == node_store.end() || !node->second.has_value()) { - node_store.erase(node); - } -} - -std::optional get_current_task_ins() { - std::lock_guard state_lock(state_mutex); - auto current_task_ins = state.find(KEY_TASK_INS); - if (current_task_ins == state.end() || - !current_task_ins->second.has_value()) { - std::cerr << "No current TaskIns" << std::endl; - return std::nullopt; - } - return current_task_ins->second; + // Create stub + stub = flwr::proto::Fleet::NewStub(channel); } -void create_node(const std::unique_ptr &stub) { - flwr::proto::CreateNodeRequest create_node_request; - flwr::proto::CreateNodeResponse create_node_response; - +bool gRPCRereCommunicator::send_create_node( + flwr::proto::CreateNodeRequest request, + flwr::proto::CreateNodeResponse *response) { grpc::ClientContext context; - grpc::Status status = - stub->CreateNode(&context, create_node_request, &create_node_response); - + grpc::Status status = stub->CreateNode(&context, request, response); if (!status.ok()) { std::cerr << "CreateNode RPC failed: " << status.error_message() << std::endl; - return; - } - - // Validate the response - if (!create_node_response.has_node()) { - std::cerr << "Received response does not contain a node." << std::endl; - return; + return false; } - { - std::lock_guard lock(node_store_mutex); - node_store[KEY_NODE] = create_node_response.node(); - } + return true; } -void delete_node(const std::unique_ptr &stub) { - auto node = get_node_from_store(); - if (!node) { - return; - } - flwr::proto::DeleteNodeRequest delete_node_request; - flwr::proto::DeleteNodeResponse delete_node_response; - - auto heap_node = new flwr::proto::Node(*node); - delete_node_request.set_allocated_node(heap_node); - +bool gRPCRereCommunicator::send_delete_node( + flwr::proto::DeleteNodeRequest request, + flwr::proto::DeleteNodeResponse *response) { grpc::ClientContext context; - grpc::Status status = - stub->DeleteNode(&context, delete_node_request, &delete_node_response); + grpc::Status status = stub->DeleteNode(&context, request, response); if (!status.ok()) { std::cerr << "DeleteNode RPC failed with status: " << status.error_message() << std::endl; - delete heap_node; // Make sure to delete if status is not ok - return; - } else { - delete_node_request.release_node(); // Release if status is ok + return false; } - delete_node_from_store(); + return true; } -std::optional -receive(const std::unique_ptr &stub) { - auto node = get_node_from_store(); - if (!node) { - return std::nullopt; - } - flwr::proto::PullTaskInsResponse response; - flwr::proto::PullTaskInsRequest request; - - request.set_allocated_node(new flwr::proto::Node(*node)); - +bool gRPCRereCommunicator::send_pull_task_ins( + flwr::proto::PullTaskInsRequest request, + flwr::proto::PullTaskInsResponse *response) { grpc::ClientContext context; - grpc::Status status = stub->PullTaskIns(&context, request, &response); - - // Release ownership so that the heap_node won't be deleted when request - // goes out of scope. - request.release_node(); + grpc::Status status = stub->PullTaskIns(&context, request, response); if (!status.ok()) { std::cerr << "PullTaskIns RPC failed with status: " << status.error_message() << std::endl; - return std::nullopt; + return false; } - if (response.task_ins_list_size() > 0) { - flwr::proto::TaskIns task_ins = response.task_ins_list().at(0); - if (validate_task_ins(task_ins, true)) { - std::lock_guard state_lock(state_mutex); - state[KEY_TASK_INS] = task_ins; - return task_ins; - } - } - std::cerr << "TaskIns list is empty." << std::endl; - return std::nullopt; + return true; } -void send(const std::unique_ptr &stub, - flwr::proto::TaskRes task_res) { - auto node = get_node_from_store(); - if (!node) { - return; - } - - auto task_ins = get_current_task_ins(); - if (!task_ins) { - return; - } - - if (!validate_task_res(task_res)) { - std::cerr << "TaskRes is invalid" << std::endl; - std::lock_guard state_lock(state_mutex); - state[KEY_TASK_INS].reset(); - return; - } - - flwr::proto::TaskRes new_task_res = - configure_task_res(task_res, *task_ins, *node); - - flwr::proto::PushTaskResRequest request; - *request.add_task_res_list() = new_task_res; - flwr::proto::PushTaskResResponse response; - +bool gRPCRereCommunicator::send_push_task_res( + flwr::proto::PushTaskResRequest request, + flwr::proto::PushTaskResResponse *response) { grpc::ClientContext context; - grpc::Status status = stub->PushTaskRes(&context, request, &response); + grpc::Status status = stub->PushTaskRes(&context, request, response); if (!status.ok()) { std::cerr << "PushTaskRes RPC failed with status: " << status.error_message() << std::endl; + return false; } - { - std::lock_guard state_lock(state_mutex); - state[KEY_TASK_INS].reset(); - } + + return true; } diff --git a/src/cc/flwr/src/message_handler.cc b/src/cc/flwr/src/message_handler.cc index 2c1e9ccbb49d..e1ce56f2cd96 100644 --- a/src/cc/flwr/src/message_handler.cc +++ b/src/cc/flwr/src/message_handler.cc @@ -1,109 +1,104 @@ #include "message_handler.h" +#include "flwr/proto/task.pb.h" +#include + +std::tuple +_reconnect(flwr::proto::RecordSet proto_recordset) { -std::tuple -_reconnect(flwr::proto::ServerMessage_ReconnectIns reconnect_msg) { // Determine the reason for sending Disconnect message flwr::proto::Reason reason = flwr::proto::Reason::ACK; int sleep_duration = 0; - if (reconnect_msg.seconds() != 0) { - reason = flwr::proto::Reason::RECONNECT; - sleep_duration = reconnect_msg.seconds(); - } // Build Disconnect message - flwr::proto::ClientMessage_DisconnectRes disconnect; - disconnect.set_reason(reason); - flwr::proto::ClientMessage cm; - *cm.mutable_disconnect_res() = disconnect; - - return std::make_tuple(cm, sleep_duration); + return std::make_tuple( + flwr_local::RecordSet({}, {}, {{"config", {{"reason", reason}}}}), + sleep_duration); } -flwr::proto::ClientMessage _get_parameters(flwr_local::Client *client) { - flwr::proto::ClientMessage cm; - *(cm.mutable_get_parameters_res()) = - parameters_res_to_proto(client->get_parameters()); - return cm; +flwr_local::RecordSet _get_parameters(flwr_local::Client *client) { + return recordset_from_get_parameters_res(client->get_parameters()); } -flwr::proto::ClientMessage _fit(flwr_local::Client *client, - flwr::proto::ServerMessage_FitIns fit_msg) { - // Deserialize fit instruction - flwr_local::FitIns fit_ins = fit_ins_from_proto(fit_msg); +flwr_local::RecordSet _fit(flwr_local::Client *client, + flwr::proto::RecordSet proto_recordset) { + flwr_local::RecordSet recordset = recordset_from_proto(proto_recordset); + flwr_local::FitIns fit_ins = recordset_to_fit_ins(recordset, true); // Perform fit flwr_local::FitRes fit_res = client->fit(fit_ins); - // Serialize fit result - flwr::proto::ClientMessage cm; - *cm.mutable_fit_res() = fit_res_to_proto(fit_res); - return cm; + + flwr_local::RecordSet out_recordset = recordset_from_fit_res(fit_res); + return out_recordset; } -flwr::proto::ClientMessage -_evaluate(flwr_local::Client *client, - flwr::proto::ServerMessage_EvaluateIns evaluate_msg) { - // Deserialize evaluate instruction - flwr_local::EvaluateIns evaluate_ins = evaluate_ins_from_proto(evaluate_msg); +flwr_local::RecordSet _evaluate(flwr_local::Client *client, + flwr::proto::RecordSet proto_recordset) { + flwr_local::RecordSet recordset = recordset_from_proto(proto_recordset); + flwr_local::EvaluateIns evaluate_ins = + recordset_to_evaluate_ins(recordset, true); // Perform evaluation flwr_local::EvaluateRes evaluate_res = client->evaluate(evaluate_ins); - // Serialize evaluate result - flwr::proto::ClientMessage cm; - *cm.mutable_evaluate_res() = evaluate_res_to_proto(evaluate_res); - return cm; + + flwr_local::RecordSet out_recordset = + recordset_from_evaluate_res(evaluate_res); + return out_recordset; } -std::tuple -handle(flwr_local::Client *client, flwr::proto::ServerMessage server_msg) { - if (server_msg.has_reconnect_ins()) { - std::tuple rec = - _reconnect(server_msg.reconnect_ins()); +std::tuple handle(flwr_local::Client *client, + flwr::proto::Task task) { + if (task.task_type() == "reconnect") { + std::tuple rec = _reconnect(task.recordset()); return std::make_tuple(std::get<0>(rec), std::get<1>(rec), false); } - if (server_msg.has_get_parameters_ins()) { + if (task.task_type() == "get_parameters") { return std::make_tuple(_get_parameters(client), 0, true); } - if (server_msg.has_fit_ins()) { - return std::make_tuple(_fit(client, server_msg.fit_ins()), 0, true); + if (task.task_type() == "train") { + return std::make_tuple(_fit(client, task.recordset()), 0, true); } - if (server_msg.has_evaluate_ins()) { - return std::make_tuple(_evaluate(client, server_msg.evaluate_ins()), 0, - true); + if (task.task_type() == "evaluate") { + return std::make_tuple(_evaluate(client, task.recordset()), 0, true); } throw "Unkown server message"; } std::tuple handle_task(flwr_local::Client *client, const flwr::proto::TaskIns &task_ins) { + flwr::proto::Task received_task = task_ins.task(); -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" - if (!task_ins.task().has_legacy_server_message()) { - // TODO: Handle SecureAggregation - throw std::runtime_error("Task still needs legacy server message"); - } - flwr::proto::ServerMessage server_msg = - task_ins.task().legacy_server_message(); -#pragma GCC diagnostic pop - - std::tuple legacy_res = - handle(client, server_msg); - std::unique_ptr client_message = - std::make_unique(std::get<0>(legacy_res)); + std::tuple legacy_res = + handle(client, received_task); + auto conf_records = + recordset_from_proto(recordset_to_proto(std::get<0>(legacy_res))) + .getConfigsRecords(); flwr::proto::TaskRes task_res; + task_res.set_task_id(""); - task_res.set_group_id(""); - task_res.set_workload_id(0); + task_res.set_group_id(task_ins.group_id()); + task_res.set_run_id(task_ins.run_id()); std::unique_ptr task = std::make_unique(); -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" - task->set_allocated_legacy_client_message( - client_message.release()); // Ownership transferred to `task` -#pragma GCC diagnostic pop + std::unique_ptr proto_recordset_ptr = + std::make_unique( + recordset_to_proto(std::get<0>(legacy_res))); + + task->set_allocated_recordset(proto_recordset_ptr.release()); + task->set_task_type(received_task.task_type()); + task->set_ttl(3600); + task->set_created_at(std::chrono::duration_cast( + std::chrono::system_clock::now().time_since_epoch()) + .count()); + task->set_allocated_consumer( + std::make_unique(received_task.producer()).release()); + task->set_allocated_producer( + std::make_unique(received_task.consumer()).release()); task_res.set_allocated_task(task.release()); - return std::make_tuple(task_res, std::get<1>(legacy_res), - std::get<2>(legacy_res)); + + std::tuple tuple = std::make_tuple( + task_res, std::get<1>(legacy_res), std::get<2>(legacy_res)); + + return tuple; } diff --git a/src/cc/flwr/src/serde.cc b/src/cc/flwr/src/serde.cc index 2977915b57df..f3ad17a3386b 100644 --- a/src/cc/flwr/src/serde.cc +++ b/src/cc/flwr/src/serde.cc @@ -1,4 +1,6 @@ #include "serde.h" +#include "flwr/proto/recordset.pb.h" +#include "typing.h" /** * Serialize client parameters to protobuf parameters message @@ -185,3 +187,441 @@ evaluate_res_to_proto(flwr_local::EvaluateRes res) { return cres; } + +flwr::proto::Array array_to_proto(const flwr_local::Array &array) { + flwr::proto::Array protoArray; + protoArray.set_dtype(array.dtype); + for (int32_t dim : array.shape) { + protoArray.add_shape(dim); + } + protoArray.set_stype(array.stype); + protoArray.set_data({array.data.begin(), array.data.end()}); + return protoArray; +} + +flwr_local::Array array_from_proto(const flwr::proto::Array &protoArray) { + flwr_local::Array array; + array.dtype = protoArray.dtype(); + array.shape.assign(protoArray.shape().begin(), protoArray.shape().end()); + array.stype = protoArray.stype(); + + const std::string &protoData = protoArray.data(); + array.data.assign(protoData.begin(), protoData.end()); + + return array; +} + +flwr::proto::ParametersRecord +parameters_record_to_proto(const flwr_local::ParametersRecord &record) { + flwr::proto::ParametersRecord protoRecord; + for (const auto &[key, value] : record) { + *protoRecord.add_data_keys() = key; + *protoRecord.add_data_values() = array_to_proto(value); + } + return protoRecord; +} + +flwr_local::ParametersRecord +parameters_record_from_proto(const flwr::proto::ParametersRecord &protoRecord) { + flwr_local::ParametersRecord record; + + auto keys = protoRecord.data_keys(); + auto values = protoRecord.data_values(); + for (size_t i = 0; i < keys.size(); ++i) { + record[keys[i]] = array_from_proto(values[i]); + } + return record; +} + +flwr::proto::MetricsRecord +metrics_record_to_proto(const flwr_local::MetricsRecord &record) { + flwr::proto::MetricsRecord protoRecord; + + for (const auto &[key, value] : record) { + auto &data = (*protoRecord.mutable_data())[key]; + + if (std::holds_alternative(value)) { + data.set_sint64(std::get(value)); + } else if (std::holds_alternative(value)) { + data.set_double_(std::get(value)); + } else if (std::holds_alternative>(value)) { + auto &int_list = std::get>(value); + auto *list = data.mutable_sint64_list(); + for (int val : int_list) { + list->add_vals(val); + } + } else if (std::holds_alternative>(value)) { + auto &double_list = std::get>(value); + auto *list = data.mutable_double_list(); + for (double val : double_list) { + list->add_vals(val); + } + } + } + + return protoRecord; +} + +flwr_local::MetricsRecord +metrics_record_from_proto(const flwr::proto::MetricsRecord &protoRecord) { + flwr_local::MetricsRecord record; + + for (const auto &[key, value] : protoRecord.data()) { + if (value.has_sint64()) { + record[key] = (int)value.sint64(); + } else if (value.has_double_()) { + record[key] = (double)value.double_(); + } else if (value.has_sint64_list()) { + std::vector int_list; + for (const auto sint : value.sint64_list().vals()) { + int_list.push_back((int)sint); + } + record[key] = int_list; + } else if (value.has_double_list()) { + std::vector double_list; + for (const auto proto_double : value.double_list().vals()) { + double_list.push_back((double)proto_double); + } + record[key] = double_list; + } + } + return record; +} + +flwr::proto::ConfigsRecord +configs_record_to_proto(const flwr_local::ConfigsRecord &record) { + flwr::proto::ConfigsRecord protoRecord; + + for (const auto &[key, value] : record) { + auto &data = (*protoRecord.mutable_data())[key]; + + if (std::holds_alternative(value)) { + data.set_sint64(std::get(value)); + } else if (std::holds_alternative(value)) { + data.set_double_(std::get(value)); + } else if (std::holds_alternative(value)) { + data.set_bool_(std::get(value)); + } else if (std::holds_alternative(value)) { + data.set_string(std::get(value)); + } else if (std::holds_alternative>(value)) { + auto &list = *data.mutable_sint64_list(); + for (int val : std::get>(value)) { + list.add_vals(val); + } + } else if (std::holds_alternative>(value)) { + auto &list = *data.mutable_double_list(); + for (double val : std::get>(value)) { + list.add_vals(val); + } + } else if (std::holds_alternative>(value)) { + auto &list = *data.mutable_bool_list(); + for (bool val : std::get>(value)) { + list.add_vals(val); + } + } else if (std::holds_alternative>(value)) { + auto &list = *data.mutable_string_list(); + for (const auto &val : std::get>(value)) { + list.add_vals(val); + } + } + } + + return protoRecord; +} + +flwr_local::ConfigsRecord +configs_record_from_proto(const flwr::proto::ConfigsRecord &protoRecord) { + flwr_local::ConfigsRecord record; + + for (const auto &[key, value] : protoRecord.data()) { + if (value.has_sint64_list()) { + std::vector int_list; + for (const auto sint : value.sint64_list().vals()) { + int_list.push_back((int)sint); + } + record[key] = int_list; + } else if (value.has_double_list()) { + std::vector double_list; + for (const auto proto_double : value.double_list().vals()) { + double_list.push_back((double)proto_double); + } + record[key] = double_list; + } else if (value.has_bool_list()) { + std::vector tmp_list; + for (const auto proto_val : value.bool_list().vals()) { + tmp_list.push_back((bool)proto_val); + } + record[key] = tmp_list; + } else if (value.has_bytes_list()) { + std::vector tmp_list; + for (const auto proto_val : value.bytes_list().vals()) { + tmp_list.push_back(proto_val); + } + record[key] = tmp_list; + } else if (value.has_string_list()) { + std::vector tmp_list; + for (const auto proto_val : value.bytes_list().vals()) { + tmp_list.push_back(proto_val); + } + record[key] = tmp_list; + } else if (value.has_sint64()) { + record[key] = (int)value.sint64(); + } else if (value.has_double_()) { + record[key] = (double)value.double_(); + } else if (value.has_bool_()) { + record[key] = value.bool_(); + } else if (value.has_bytes()) { + record[key] = value.bytes(); + } else if (value.has_string()) { + record[key] = value.string(); + } + } + return record; +} + +flwr_local::Parameters +parametersrecord_to_parameters(const flwr_local::ParametersRecord &record, + bool keep_input) { + std::list tensors; + std::string tensor_type; + + for (const auto &[key, array] : record) { + tensors.push_back(array.data); + + if (tensor_type.empty()) { + tensor_type = array.stype; + } + } + + return flwr_local::Parameters(tensors, tensor_type); +} + +flwr_local::EvaluateIns +recordset_to_evaluate_ins(const flwr_local::RecordSet &recordset, + bool keep_input) { + auto parameters_record = + recordset.getParametersRecords().at("evaluateins.parameters"); + + flwr_local::Parameters params = + parametersrecord_to_parameters(parameters_record, keep_input); + + auto configs_record = recordset.getConfigsRecords().at("evaluateins.config"); + flwr_local::Config config_dict; + + for (const auto &[key, value] : configs_record) { + flwr_local::Scalar scalar; + + std::visit( + [&scalar](auto &&arg) { + using T = std::decay_t; + if constexpr (std::is_same_v) { + scalar.setInt(arg); + } else if constexpr (std::is_same_v) { + scalar.setDouble(arg); + } else if constexpr (std::is_same_v) { + scalar.setString(arg); + } else if constexpr (std::is_same_v) { + scalar.setBool(arg); + } else if constexpr (std::is_same_v>) { + } else if constexpr (std::is_same_v>) { + } else if constexpr (std::is_same_v>) { + } else if constexpr (std::is_same_v>) { + } + }, + value); + + config_dict[key] = scalar; + } + + return flwr_local::EvaluateIns(params, config_dict); +} + +flwr_local::ConfigsRecord +metrics_to_config_record(const flwr_local::Metrics metrics) { + flwr_local::ConfigsRecord config_record; + for (const auto &[key, value] : metrics) { + flwr_local::Scalar scalar_value = value; + if (scalar_value.getBool().has_value()) { + config_record[key] = scalar_value.getBool().value(); + } else if (scalar_value.getBytes().has_value()) { + config_record[key] = scalar_value.getBytes().value(); + } else if (scalar_value.getDouble().has_value()) { + config_record[key] = scalar_value.getDouble().value(); + } else if (scalar_value.getInt().has_value()) { + config_record[key] = scalar_value.getInt().value(); + } else if (scalar_value.getString().has_value()) { + config_record[key] = scalar_value.getString().value(); + } else { + config_record[key] = ""; + } + } + return config_record; +} + +flwr_local::FitIns recordset_to_fit_ins(const flwr_local::RecordSet &recordset, + bool keep_input) { + auto parameters_record = + recordset.getParametersRecords().at("fitins.parameters"); + + flwr_local::Parameters params = + parametersrecord_to_parameters(parameters_record, keep_input); + + auto configs_record = recordset.getConfigsRecords().at("fitins.config"); + flwr_local::Config config_dict; + + for (const auto &[key, value] : configs_record) { + flwr_local::Scalar scalar; + + std::visit( + [&scalar](auto &&arg) { + using T = std::decay_t; + if constexpr (std::is_same_v) { + scalar.setInt(arg); + } else if constexpr (std::is_same_v) { + scalar.setDouble(arg); + } else if constexpr (std::is_same_v) { + scalar.setString(arg); + } else if constexpr (std::is_same_v) { + scalar.setBool(arg); + } else if constexpr (std::is_same_v>) { + } else if constexpr (std::is_same_v>) { + } else if constexpr (std::is_same_v>) { + } else if constexpr (std::is_same_v>) { + } + }, + value); + + config_dict[key] = scalar; + } + + return flwr_local::FitIns(params, config_dict); +} + +flwr_local::ParametersRecord +parameters_to_parametersrecord(const flwr_local::Parameters ¶meters) { + flwr_local::ParametersRecord record; + const std::list tensors = parameters.getTensors(); + const std::string tensor_type = parameters.getTensor_type(); + + int idx = 0; + for (const auto &tensor : tensors) { + flwr_local::Array array{tensor_type, std::vector(), tensor_type, + tensor}; + record[std::to_string(idx++)] = array; + } + + return record; +} + +flwr_local::RecordSet recordset_from_get_parameters_res( + const flwr_local::ParametersRes &get_parameters_res) { + std::map parameters_record = { + {"getparametersres.parameters", + parameters_to_parametersrecord(get_parameters_res.getParameters())}}; + + std::map configs_record = { + {"getparametersres.status", {{"code", 0}, {"message", "Success"}}}}; + + flwr_local::RecordSet recordset = flwr_local::RecordSet(); + + recordset.setParametersRecords(parameters_record); + recordset.setConfigsRecords(configs_record); + + return recordset; +} + +flwr_local::RecordSet recordset_from_fit_res(const flwr_local::FitRes &fitres) { + std::map parameters_record = { + {"fitres.parameters", + parameters_to_parametersrecord(fitres.getParameters())}}; + + std::map metrics_record = { + {"fitres.num_examples", {{"num_examples", fitres.getNum_example()}}}}; + + std::map configs_record = { + {"fitres.status", {{"code", 0}, {"message", "Success"}}}}; + + if (fitres.getMetrics() != std::nullopt) { + configs_record["fitres.metrics"] = + metrics_to_config_record(fitres.getMetrics().value()); + } else { + configs_record["fitres.metrics"] = {}; + } + flwr_local::RecordSet recordset = flwr_local::RecordSet(); + + recordset.setParametersRecords(parameters_record); + recordset.setMetricsRecords(metrics_record); + recordset.setConfigsRecords(configs_record); + + return recordset; +} + +flwr_local::RecordSet +recordset_from_evaluate_res(const flwr_local::EvaluateRes &evaluate_res) { + std::map metrics_record = { + {"evaluateres.loss", {{"loss", evaluate_res.getLoss()}}}, + {"evaluateres.num_examples", + {{"num_examples", evaluate_res.getNum_example()}}}}; + + std::map configs_record = { + {"evaluateres.status", {{"code", 0}, {"message", "Success"}}}}; + + if (evaluate_res.getMetrics() != std::nullopt) { + configs_record["evaluateres.metrics"] = + metrics_to_config_record(evaluate_res.getMetrics().value()); + } else { + configs_record["evaluateres.metrics"] = {}; + } + + flwr_local::RecordSet recordset = flwr_local::RecordSet(); + + recordset.setMetricsRecords(metrics_record); + recordset.setConfigsRecords(configs_record); + + return recordset; +} + +flwr_local::RecordSet +recordset_from_proto(const flwr::proto::RecordSet &recordset) { + + std::map parametersRecords; + std::map metricsRecords; + std::map configsRecords; + + for (const auto &[key, param_record] : recordset.parameters()) { + parametersRecords[key] = parameters_record_from_proto(param_record); + } + + for (const auto &[key, metrics_record] : recordset.metrics()) { + metricsRecords[key] = metrics_record_from_proto(metrics_record); + } + + for (const auto &[key, configs_record] : recordset.configs()) { + configsRecords[key] = configs_record_from_proto(configs_record); + } + + return flwr_local::RecordSet(parametersRecords, metricsRecords, + configsRecords); +} + +flwr::proto::RecordSet +recordset_to_proto(const flwr_local::RecordSet &recordset) { + flwr::proto::RecordSet proto_recordset; + + for (const auto &[key, param_record] : recordset.getParametersRecords()) { + (*(proto_recordset.mutable_parameters()))[key] = + parameters_record_to_proto(param_record); + } + + for (const auto &[key, metrics_record] : recordset.getMetricsRecords()) { + (*(proto_recordset.mutable_metrics()))[key] = + metrics_record_to_proto(metrics_record); + } + + for (const auto &[key, configs_record] : recordset.getConfigsRecords()) { + (*(proto_recordset.mutable_configs()))[key] = + configs_record_to_proto(configs_record); + } + + return proto_recordset; +} diff --git a/src/cc/flwr/src/start.cc b/src/cc/flwr/src/start.cc index 52f193a09af4..06b520ba8a06 100644 --- a/src/cc/flwr/src/start.cc +++ b/src/cc/flwr/src/start.cc @@ -3,92 +3,31 @@ // cppcheck-suppress unusedFunction void start::start_client(std::string server_address, flwr_local::Client *client, int grpc_max_message_length) { - while (true) { - int sleep_duration = 0; - - // Set channel parameters - grpc::ChannelArguments args; - args.SetMaxReceiveMessageSize(grpc_max_message_length); - args.SetMaxSendMessageSize(grpc_max_message_length); - - // Establish an insecure gRPC connection to a gRPC server - std::shared_ptr channel = grpc::CreateCustomChannel( - server_address, grpc::InsecureChannelCredentials(), args); - - // Create stub - std::unique_ptr stub_ = - flwr::proto::FlowerService::NewStub(channel); - - // Read and write messages - grpc::ClientContext context; - std::shared_ptr> - reader_writer(stub_->Join(&context)); - flwr::proto::ServerMessage sm; - while (reader_writer->Read(&sm)) { - std::tuple receive = - handle(client, sm); - sleep_duration = std::get<1>(receive); - reader_writer->Write(std::get<0>(receive)); - if (std::get<2>(receive) == false) { - break; - } - } - reader_writer->WritesDone(); - - // Check connection status - grpc::Status status = reader_writer->Finish(); - - if (sleep_duration == 0) { - std::cout << "Disconnect and shut down." << std::endl; - break; - } - // Sleep and reconnect afterwards - // std::cout << "Disconnect, then re-establish connection after" << - // sleep_duration << "second(s)" << std::endl; Sleep(sleep_duration * 1000); - } -} + gRPCRereCommunicator communicator(server_address, grpc_max_message_length); -// cppcheck-suppress unusedFunction -void start::start_rere_client(std::string server_address, - flwr_local::Client *client, - int grpc_max_message_length) { while (true) { int sleep_duration = 0; - // Set channel parameters - grpc::ChannelArguments args; - args.SetMaxReceiveMessageSize(grpc_max_message_length); - args.SetMaxSendMessageSize(grpc_max_message_length); - - // Establish an insecure gRPC connection to a gRPC server - std::shared_ptr channel = grpc::CreateCustomChannel( - server_address, grpc::InsecureChannelCredentials(), args); - - // Create stub - std::unique_ptr stub_ = - flwr::proto::Fleet::NewStub(channel); - - // Read and write messages - - create_node(stub_); + create_node(&communicator); while (true) { - auto task_ins = receive(stub_); + auto task_ins = receive(&communicator); if (!task_ins) { std::this_thread::sleep_for(std::chrono::seconds(3)); continue; } + auto [task_res, sleep_duration, keep_going] = handle_task(client, task_ins.value()); - send(stub_, task_res); + + send(&communicator, task_res); if (!keep_going) { break; } } - delete_node(stub_); + delete_node(&communicator); if (sleep_duration == 0) { std::cout << "Disconnect and shut down." << std::endl; break; diff --git a/src/cc/flwr/src/task_handler.cc b/src/cc/flwr/src/task_handler.cc deleted file mode 100644 index b356ea12dd66..000000000000 --- a/src/cc/flwr/src/task_handler.cc +++ /dev/null @@ -1,52 +0,0 @@ -#include "task_handler.h" - -bool validate_task_ins(const flwr::proto::TaskIns &task_ins, - const bool discard_reconnect_ins) { -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" - return !(!task_ins.has_task() || - (!task_ins.task().has_legacy_server_message() && - !task_ins.task().has_sa()) || - (discard_reconnect_ins && - task_ins.task().legacy_server_message().has_reconnect_ins())); -#pragma GCC diagnostic pop -} - -bool validate_task_res(const flwr::proto::TaskRes &task_res) { - // Retrieve initialized fields in TaskRes - return (task_res.task_id().empty() && task_res.group_id().empty() && - task_res.workload_id() == 0 && !task_res.task().has_producer() && - !task_res.task().has_producer() && !task_res.task().has_consumer() && - task_res.task().ancestry_size() == 0); -} - -flwr::proto::TaskRes -configure_task_res(const flwr::proto::TaskRes &task_res, - const flwr::proto::TaskIns &ref_task_ins, - const flwr::proto::Node &producer) { - flwr::proto::TaskRes result_task_res; - - // Setting scalar fields - result_task_res.set_task_id(""); // This will be generated by the server - result_task_res.set_group_id(ref_task_ins.group_id()); - result_task_res.set_workload_id(ref_task_ins.workload_id()); - - // Merge the task from the input task_res - *result_task_res.mutable_task() = task_res.task(); - - // Construct and set the producer and consumer for the task - std::unique_ptr new_producer = - std::make_unique(producer); - result_task_res.mutable_task()->set_allocated_producer( - new_producer.release()); - - std::unique_ptr new_consumer = - std::make_unique(ref_task_ins.task().producer()); - result_task_res.mutable_task()->set_allocated_consumer( - new_consumer.release()); - - // Set ancestry in the task - result_task_res.mutable_task()->add_ancestry(ref_task_ins.task_id()); - - return result_task_res; -} diff --git a/src/docker/base/alpine/Dockerfile b/src/docker/base/alpine/Dockerfile new file mode 100644 index 000000000000..d1c24d7d5480 --- /dev/null +++ b/src/docker/base/alpine/Dockerfile @@ -0,0 +1,85 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# Multi-stage base image build +# +# Building the base image for ARM64 requires installing some additional system dependencies to +# compile the grpcio package, as they do not provide a pre-built package. However, we don't want +# the dependencies in the final base image as they are only needed to compile the package. +# That's why we're creating a multi-stage build. When installing the flwr, we are using a +# virtual environment to keep all files in a single isolated directory as described here: +# https://pythonspeed.com/articles/multi-stage-docker-python/ + +# hadolint global ignore=DL3018 +ARG PYTHON_VERSION=3.11 +ARG DISTRO=alpine +ARG DISTRO_VERSION=3.19 +FROM python:${PYTHON_VERSION}-${DISTRO}${DISTRO_VERSION} as compile + +# Install system dependencies +RUN apk add --no-cache \ + # require for compiling grpcio on ARM64 + g++ \ + libffi-dev \ + # create virtual env + && python -m venv /opt/venv + +# Make sure we use the virtualenv +ENV PATH="/opt/venv/bin:$PATH" + +# Install specific version of pip, setuptools and flwr +ARG PIP_VERSION +ARG SETUPTOOLS_VERSION +ARG FLWR_VERSION +ARG FLWR_PACKAGE=flwr +RUN pip install -U --no-cache-dir \ + pip==${PIP_VERSION} \ + setuptools==${SETUPTOOLS_VERSION} \ + ${FLWR_PACKAGE}==${FLWR_VERSION} + +FROM python:${PYTHON_VERSION}-${DISTRO}${DISTRO_VERSION} as base + +# required by the grpc package +RUN apk add --no-cache \ + libstdc++ + +COPY --from=compile /opt/venv /opt/venv + +# Make sure we use the virtualenv +ENV PATH="/opt/venv/bin:$PATH" \ + # Send stdout and stderr stream directly to the terminal. Ensures that no + # output is retained in a buffer if the application crashes. + PYTHONUNBUFFERED=1 \ + # Typically, bytecode is created on the first invocation to speed up following invocation. + # However, in Docker we only make a single invocation (when we start the container). + # Therefore, we can disable bytecode writing. + PYTHONDONTWRITEBYTECODE=1 \ + # Ensure that python encoding is always UTF-8. + PYTHONIOENCODING=UTF-8 \ + LANG=C.UTF-8 \ + LC_ALL=C.UTF-8 + +# add non-root user +RUN adduser \ + --no-create-home \ + --disabled-password \ + --gecos "" \ + --uid 49999 app \ + && mkdir -p /app \ + && chown -R app:app /app + +WORKDIR /app +USER app +ENV HOME=/app diff --git a/src/docker/base/Dockerfile b/src/docker/base/ubuntu/Dockerfile similarity index 67% rename from src/docker/base/Dockerfile rename to src/docker/base/ubuntu/Dockerfile index 9cd410ba3fb5..5a4adf09df2d 100644 --- a/src/docker/base/Dockerfile +++ b/src/docker/base/ubuntu/Dockerfile @@ -1,7 +1,21 @@ -# Copyright 2023 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== -ARG UBUNTU_VERSION=22.04 -FROM ubuntu:$UBUNTU_VERSION as base +ARG DISTRO=ubuntu +ARG DISTRO_VERSION=22.04 +FROM $DISTRO:$DISTRO_VERSION as base ENV DEBIAN_FRONTEND noninteractive # Send stdout and stderr stream directly to the terminal. Ensures that no diff --git a/src/docker/client/Dockerfile b/src/docker/client/Dockerfile deleted file mode 100644 index 0755a7989281..000000000000 --- a/src/docker/client/Dockerfile +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright 2023 Flower Labs GmbH. All Rights Reserved. - -ARG BASE_REPOSITORY=flwr/base -ARG BASE_IMAGE_TAG -FROM $BASE_REPOSITORY:$BASE_IMAGE_TAG - -ARG FLWR_VERSION -RUN python -m pip install -U --no-cache-dir flwr[rest]==${FLWR_VERSION} diff --git a/src/docker/server/Dockerfile b/src/docker/server/Dockerfile deleted file mode 100644 index faa9cf2e56fe..000000000000 --- a/src/docker/server/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2023 Flower Labs GmbH. All Rights Reserved. - -ARG BASE_REPOSITORY=flwr/base -ARG BASE_IMAGE_TAG=py3.11-ubuntu22.04 -FROM $BASE_REPOSITORY:$BASE_IMAGE_TAG as server - -WORKDIR /app -ARG FLWR_VERSION -RUN python -m pip install -U --no-cache-dir flwr[rest]==${FLWR_VERSION} -ENTRYPOINT ["python", "-c", "from flwr.server import run_superlink; run_superlink()"] - -# Test if Flower can be successfully installed and imported -FROM server as test -RUN python -c "from flwr.server import run_superlink" diff --git a/src/py/flwr_experimental/baseline/tf_cifar/__init__.py b/src/docker/serverapp/Dockerfile similarity index 51% rename from src/py/flwr_experimental/baseline/tf_cifar/__init__.py rename to src/docker/serverapp/Dockerfile index ad2e33481ba8..22b464a3838c 100644 --- a/src/py/flwr_experimental/baseline/tf_cifar/__init__.py +++ b/src/docker/serverapp/Dockerfile @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,11 +12,21 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Flower baseline using TensorFlow for CIFAR-10/100 image classification.""" +ARG BASE_REPOSITORY=flwr/base +ARG PYTHON_VERSION=3.11 +ARG UBUNTU_VERSION=ubuntu22.04 +FROM $BASE_REPOSITORY:py${PYTHON_VERSION}-${UBUNTU_VERSION} -DEFAULT_SERVER_ADDRESS = "[::]:8080" +ARG FLWR_PACKAGE=flwr +ARG FLWR_VERSION +RUN python -m pip install -U --no-cache-dir \ + ${FLWR_PACKAGE}==${FLWR_VERSION} && \ + # Without pyenv rehash the executable cannot be found. + # pyenv rehash is usually called via the shell by adding + # `pyenv init -` in the shell profile, but that doesn't work + # well in docker + pyenv rehash -SEED = 2020 - -NUM_CLASSES = 10 +WORKDIR /app +ENTRYPOINT ["flower-server-app"] diff --git a/src/py/flwr_experimental/baseline/run.sh b/src/docker/superlink/Dockerfile old mode 100755 new mode 100644 similarity index 51% rename from src/py/flwr_experimental/baseline/run.sh rename to src/docker/superlink/Dockerfile index 9cb5b76626d3..acf06f66f2fb --- a/src/py/flwr_experimental/baseline/run.sh +++ b/src/docker/superlink/Dockerfile @@ -1,6 +1,4 @@ -#!/bin/bash - -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,14 +13,20 @@ # limitations under the License. # ============================================================================== -set -e -cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/../../../../ +ARG BASE_REPOSITORY=flwr/base +ARG PYTHON_VERSION=3.11 +ARG UBUNTU_VERSION=ubuntu22.04 +FROM $BASE_REPOSITORY:py${PYTHON_VERSION}-${UBUNTU_VERSION} -# Build `.whl` from current state -./dev/build.sh +ARG FLWR_PACKAGE=flwr +ARG FLWR_VERSION +RUN python -m pip install -U --no-cache-dir \ + ${FLWR_PACKAGE}==${FLWR_VERSION} && \ + # Without pyenv rehash the executable cannot be found. + # pyenv rehash is usually called via the shell by adding + # `pyenv init -` in the shell profile, but that doesn't work + # well in docker + pyenv rehash -# Execute `run.py` -python -m flwr_experimental.baseline.run \ - --adapter="docker" \ - --baseline="tf_fashion_mnist" \ - --setting="minimal" +WORKDIR /app +ENTRYPOINT ["flower-superlink"] diff --git a/src/py/flwr_experimental/baseline/ip.py b/src/docker/supernode/Dockerfile similarity index 61% rename from src/py/flwr_experimental/baseline/ip.py rename to src/docker/supernode/Dockerfile index 25efefa40d0a..8117dcc295df 100644 --- a/src/py/flwr_experimental/baseline/ip.py +++ b/src/docker/supernode/Dockerfile @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,14 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Provide method to get the ip address of a network interface.""" +ARG BASE_REPOSITORY=flwr/base +ARG PYTHON_VERSION=3.11 +ARG UBUNTU_VERSION=ubuntu22.04 +FROM $BASE_REPOSITORY:py${PYTHON_VERSION}-${UBUNTU_VERSION} -from subprocess import check_output +ARG FLWR_PACKAGE=flwr +ARG FLWR_VERSION +RUN python -m pip install -U --no-cache-dir \ + ${FLWR_PACKAGE}==${FLWR_VERSION} && \ + pyenv rehash - -def get_ip_address() -> str: - """Return IP address.""" - ips = check_output(["hostname", "--all-ip-addresses"]) - ips_decoded = ips.decode("utf-8").split(" ") - return ips_decoded[0] +WORKDIR /app +ENTRYPOINT ["flower-client-app"] diff --git a/src/proto/flwr/proto/driver.proto b/src/proto/flwr/proto/driver.proto index bc0062c4a51f..54e6b6b41b68 100644 --- a/src/proto/flwr/proto/driver.proto +++ b/src/proto/flwr/proto/driver.proto @@ -35,7 +35,10 @@ service Driver { } // CreateRun -message CreateRunRequest {} +message CreateRunRequest { + string fab_id = 1; + string fab_version = 2; +} message CreateRunResponse { sint64 run_id = 1; } // GetNodes messages diff --git a/src/proto/flwr/proto/fleet.proto b/src/proto/flwr/proto/fleet.proto index fcb301181f5a..df6b5843023d 100644 --- a/src/proto/flwr/proto/fleet.proto +++ b/src/proto/flwr/proto/fleet.proto @@ -34,10 +34,12 @@ service Fleet { // // HTTP API path: /api/v1/fleet/push-task-res rpc PushTaskRes(PushTaskResRequest) returns (PushTaskResResponse) {} + + rpc GetRun(GetRunRequest) returns (GetRunResponse) {} } // CreateNode messages -message CreateNodeRequest {} +message CreateNodeRequest { double ping_interval = 1; } message CreateNodeResponse { Node node = 1; } // DeleteNode messages @@ -45,7 +47,10 @@ message DeleteNodeRequest { Node node = 1; } message DeleteNodeResponse {} // Ping messages -message PingRequest { Node node = 1; } +message PingRequest { + Node node = 1; + double ping_interval = 2; +} message PingResponse { bool success = 1; } // PullTaskIns messages @@ -65,4 +70,13 @@ message PushTaskResResponse { map results = 2; } +// GetRun messages +message Run { + sint64 run_id = 1; + string fab_id = 2; + string fab_version = 3; +} +message GetRunRequest { sint64 run_id = 1; } +message GetRunResponse { Run run = 1; } + message Reconnect { uint64 reconnect = 1; } diff --git a/src/proto/flwr/proto/task.proto b/src/proto/flwr/proto/task.proto index 4c86ebae9562..cf77d110acab 100644 --- a/src/proto/flwr/proto/task.proto +++ b/src/proto/flwr/proto/task.proto @@ -25,13 +25,14 @@ import "flwr/proto/error.proto"; message Task { Node producer = 1; Node consumer = 2; - string created_at = 3; + double created_at = 3; string delivered_at = 4; - double ttl = 5; - repeated string ancestry = 6; - string task_type = 7; - RecordSet recordset = 8; - Error error = 9; + double pushed_at = 5; + double ttl = 6; + repeated string ancestry = 7; + string task_type = 8; + RecordSet recordset = 9; + Error error = 10; } message TaskIns { diff --git a/src/py/flwr/cli/app.py b/src/py/flwr/cli/app.py index 1c6ee6c97841..e1417f1267ac 100644 --- a/src/py/flwr/cli/app.py +++ b/src/py/flwr/cli/app.py @@ -16,6 +16,7 @@ import typer +from .build import build from .example import example from .new import new from .run import run @@ -32,6 +33,7 @@ app.command()(new) app.command()(example) app.command()(run) +app.command()(build) if __name__ == "__main__": app() diff --git a/src/py/flwr/cli/build.py b/src/py/flwr/cli/build.py new file mode 100644 index 000000000000..37753e5b57b1 --- /dev/null +++ b/src/py/flwr/cli/build.py @@ -0,0 +1,151 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower command line interface `build` command.""" + +import hashlib +import os +import zipfile +from pathlib import Path +from typing import Optional + +import pathspec +import typer +from typing_extensions import Annotated + +from .config_utils import load_and_validate_with_defaults +from .utils import is_valid_project_name + + +# pylint: disable=too-many-locals +def build( + directory: Annotated[ + Optional[Path], + typer.Option(help="The Flower project directory to bundle into a FAB"), + ] = None, +) -> None: + """Build a Flower project into a Flower App Bundle (FAB). + + You can run `flwr build` without any argument to bundle the current directory: + + `flwr build` + + You can also build a specific directory: + + `flwr build --directory ./projects/flower-hello-world` + """ + if directory is None: + directory = Path.cwd() + + directory = directory.resolve() + if not directory.is_dir(): + typer.secho( + f"❌ The path {directory} is not a valid directory.", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + + if not is_valid_project_name(directory.name): + typer.secho( + f"❌ The project name {directory.name} is invalid, " + "a valid project name must start with a letter or an underscore, " + "and can only contain letters, digits, and underscores.", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + + conf, errors, warnings = load_and_validate_with_defaults( + directory / "pyproject.toml" + ) + if conf is None: + typer.secho( + "Project configuration could not be loaded.\npyproject.toml is invalid:\n" + + "\n".join([f"- {line}" for line in errors]), + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + + if warnings: + typer.secho( + "Project configuration is missing the following " + "recommended properties:\n" + "\n".join([f"- {line}" for line in warnings]), + fg=typer.colors.RED, + bold=True, + ) + + # Load .gitignore rules if present + ignore_spec = _load_gitignore(directory) + + # Set the name of the zip file + fab_filename = ( + f"{conf['flower']['publisher']}" + f".{directory.name}" + f".{conf['project']['version'].replace('.', '-')}.fab" + ) + list_file_content = "" + + allowed_extensions = {".py", ".toml", ".md"} + + with zipfile.ZipFile(fab_filename, "w", zipfile.ZIP_DEFLATED) as fab_file: + for root, _, files in os.walk(directory, topdown=True): + # Filter directories and files based on .gitignore + files = [ + f + for f in files + if not ignore_spec.match_file(Path(root) / f) + and f != fab_filename + and Path(f).suffix in allowed_extensions + ] + + for file in files: + file_path = Path(root) / file + archive_path = file_path.relative_to(directory) + fab_file.write(file_path, archive_path) + + # Calculate file info + sha256_hash = _get_sha256_hash(file_path) + file_size_bits = os.path.getsize(file_path) * 8 # size in bits + list_file_content += f"{archive_path},{sha256_hash},{file_size_bits}\n" + + # Add CONTENT and CONTENT.jwt to the zip file + fab_file.writestr(".info/CONTENT", list_file_content) + + typer.secho( + f"🎊 Successfully built {fab_filename}.", fg=typer.colors.GREEN, bold=True + ) + + +def _get_sha256_hash(file_path: Path) -> str: + """Calculate the SHA-256 hash of a file.""" + sha256 = hashlib.sha256() + with open(file_path, "rb") as f: + while True: + data = f.read(65536) # Read in 64kB blocks + if not data: + break + sha256.update(data) + return sha256.hexdigest() + + +def _load_gitignore(directory: Path) -> pathspec.PathSpec: + """Load and parse .gitignore file, returning a pathspec.""" + gitignore_path = directory / ".gitignore" + patterns = ["__pycache__/"] # Default pattern + if gitignore_path.exists(): + with open(gitignore_path, encoding="UTF-8") as file: + patterns.extend(file.readlines()) + return pathspec.PathSpec.from_lines("gitwildmatch", patterns) diff --git a/src/py/flwr/cli/flower_toml.py b/src/py/flwr/cli/config_utils.py similarity index 75% rename from src/py/flwr/cli/flower_toml.py rename to src/py/flwr/cli/config_utils.py index 103f83532054..bca35a51dde5 100644 --- a/src/py/flwr/cli/flower_toml.py +++ b/src/py/flwr/cli/config_utils.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Utility to validate the `flower.toml` file.""" +"""Utility to validate the `pyproject.toml` file.""" -import os +from pathlib import Path from typing import Any, Dict, List, Optional, Tuple import tomli @@ -23,9 +23,9 @@ def load_and_validate_with_defaults( - path: Optional[str] = None, + path: Optional[Path] = None, ) -> Tuple[Optional[Dict[str, Any]], List[str], List[str]]: - """Load and validate flower.toml as dict. + """Load and validate pyproject.toml as dict. Returns ------- @@ -37,7 +37,8 @@ def load_and_validate_with_defaults( if config is None: errors = [ - "Project configuration could not be loaded. flower.toml does not exist." + "Project configuration could not be loaded. " + "`pyproject.toml` does not exist." ] return (None, errors, []) @@ -57,24 +58,25 @@ def load_and_validate_with_defaults( return (config, errors, warnings) -def load(path: Optional[str] = None) -> Optional[Dict[str, Any]]: - """Load flower.toml and return as dict.""" +def load(path: Optional[Path] = None) -> Optional[Dict[str, Any]]: + """Load pyproject.toml and return as dict.""" if path is None: - cur_dir = os.getcwd() - toml_path = os.path.join(cur_dir, "flower.toml") + cur_dir = Path.cwd() + toml_path = cur_dir / "pyproject.toml" else: toml_path = path - if not os.path.isfile(toml_path): + if not toml_path.is_file(): return None - with open(toml_path, encoding="utf-8") as toml_file: + with toml_path.open(encoding="utf-8") as toml_file: data = tomli.loads(toml_file.read()) return data +# pylint: disable=too-many-branches def validate_fields(config: Dict[str, Any]) -> Tuple[bool, List[str], List[str]]: - """Validate flower.toml fields.""" + """Validate pyproject.toml fields.""" errors = [] warnings = [] @@ -94,19 +96,22 @@ def validate_fields(config: Dict[str, Any]) -> Tuple[bool, List[str], List[str]] if "flower" not in config: errors.append("Missing [flower] section") - elif "components" not in config["flower"]: - errors.append("Missing [flower.components] section") else: - if "serverapp" not in config["flower"]["components"]: - errors.append('Property "serverapp" missing in [flower.components]') - if "clientapp" not in config["flower"]["components"]: - errors.append('Property "clientapp" missing in [flower.components]') + if "publisher" not in config["flower"]: + errors.append('Property "publisher" missing in [flower]') + if "components" not in config["flower"]: + errors.append("Missing [flower.components] section") + else: + if "serverapp" not in config["flower"]["components"]: + errors.append('Property "serverapp" missing in [flower.components]') + if "clientapp" not in config["flower"]["components"]: + errors.append('Property "clientapp" missing in [flower.components]') return len(errors) == 0, errors, warnings def validate(config: Dict[str, Any]) -> Tuple[bool, List[str], List[str]]: - """Validate flower.toml.""" + """Validate pyproject.toml.""" is_valid, errors, warnings = validate_fields(config) if not is_valid: diff --git a/src/py/flwr/cli/flower_toml_test.py b/src/py/flwr/cli/config_utils_test.py similarity index 61% rename from src/py/flwr/cli/flower_toml_test.py rename to src/py/flwr/cli/config_utils_test.py index 72a52e4e8b9b..b47206249dfc 100644 --- a/src/py/flwr/cli/flower_toml_test.py +++ b/src/py/flwr/cli/config_utils_test.py @@ -16,17 +16,35 @@ import os import textwrap +from pathlib import Path from typing import Any, Dict -from .flower_toml import load, validate, validate_fields +from .config_utils import load, validate, validate_fields -def test_load_flower_toml_load_from_cwd(tmp_path: str) -> None: +def test_load_pyproject_toml_load_from_cwd(tmp_path: Path) -> None: """Test if load_template returns a string.""" # Prepare - flower_toml_content = """ + pyproject_toml_content = """ + [build-system] + requires = ["hatchling"] + build-backend = "hatchling.build" + [project] name = "fedgpt" + version = "1.0.0" + description = "" + authors = [ + { name = "The Flower Authors", email = "hello@flower.ai" }, + ] + license = {text = "Apache License (2.0)"} + dependencies = [ + "flwr[simulation]>=1.8.0,<2.0", + "numpy>=1.21.0", + ] + + [flower] + publisher = "flwrlabs" [flower.components] serverapp = "fedgpt.server:app" @@ -39,10 +57,17 @@ def test_load_flower_toml_load_from_cwd(tmp_path: str) -> None: count = 10 # optional """ expected_config = { + "build-system": {"build-backend": "hatchling.build", "requires": ["hatchling"]}, "project": { "name": "fedgpt", + "version": "1.0.0", + "description": "", + "authors": [{"email": "hello@flower.ai", "name": "The Flower Authors"}], + "license": {"text": "Apache License (2.0)"}, + "dependencies": ["flwr[simulation]>=1.8.0,<2.0", "numpy>=1.21.0"], }, "flower": { + "publisher": "flwrlabs", "components": { "serverapp": "fedgpt.server:app", "clientapp": "fedgpt.client:app", @@ -55,13 +80,13 @@ def test_load_flower_toml_load_from_cwd(tmp_path: str) -> None: } # Current directory - origin = os.getcwd() + origin = Path.cwd() try: # Change into the temporary directory os.chdir(tmp_path) - with open("flower.toml", "w", encoding="utf-8") as f: - f.write(textwrap.dedent(flower_toml_content)) + with open("pyproject.toml", "w", encoding="utf-8") as f: + f.write(textwrap.dedent(pyproject_toml_content)) # Execute config = load() @@ -72,12 +97,29 @@ def test_load_flower_toml_load_from_cwd(tmp_path: str) -> None: os.chdir(origin) -def test_load_flower_toml_from_path(tmp_path: str) -> None: +def test_load_pyproject_toml_from_path(tmp_path: Path) -> None: """Test if load_template returns a string.""" # Prepare - flower_toml_content = """ + pyproject_toml_content = """ + [build-system] + requires = ["hatchling"] + build-backend = "hatchling.build" + [project] name = "fedgpt" + version = "1.0.0" + description = "" + authors = [ + { name = "The Flower Authors", email = "hello@flower.ai" }, + ] + license = {text = "Apache License (2.0)"} + dependencies = [ + "flwr[simulation]>=1.8.0,<2.0", + "numpy>=1.21.0", + ] + + [flower] + publisher = "flwrlabs" [flower.components] serverapp = "fedgpt.server:app" @@ -90,10 +132,17 @@ def test_load_flower_toml_from_path(tmp_path: str) -> None: count = 10 # optional """ expected_config = { + "build-system": {"build-backend": "hatchling.build", "requires": ["hatchling"]}, "project": { "name": "fedgpt", + "version": "1.0.0", + "description": "", + "authors": [{"email": "hello@flower.ai", "name": "The Flower Authors"}], + "license": {"text": "Apache License (2.0)"}, + "dependencies": ["flwr[simulation]>=1.8.0,<2.0", "numpy>=1.21.0"], }, "flower": { + "publisher": "flwrlabs", "components": { "serverapp": "fedgpt.server:app", "clientapp": "fedgpt.client:app", @@ -111,11 +160,11 @@ def test_load_flower_toml_from_path(tmp_path: str) -> None: try: # Change into the temporary directory os.chdir(tmp_path) - with open("flower.toml", "w", encoding="utf-8") as f: - f.write(textwrap.dedent(flower_toml_content)) + with open("pyproject.toml", "w", encoding="utf-8") as f: + f.write(textwrap.dedent(pyproject_toml_content)) # Execute - config = load(path=os.path.join(tmp_path, "flower.toml")) + config = load(path=tmp_path / "pyproject.toml") # Assert assert config == expected_config @@ -123,8 +172,8 @@ def test_load_flower_toml_from_path(tmp_path: str) -> None: os.chdir(origin) -def test_validate_flower_toml_fields_empty() -> None: - """Test that validate_flower_toml_fields fails correctly.""" +def test_validate_pyproject_toml_fields_empty() -> None: + """Test that validate_pyproject_toml_fields fails correctly.""" # Prepare config: Dict[str, Any] = {} @@ -137,8 +186,8 @@ def test_validate_flower_toml_fields_empty() -> None: assert len(warnings) == 0 -def test_validate_flower_toml_fields_no_flower() -> None: - """Test that validate_flower_toml_fields fails correctly.""" +def test_validate_pyproject_toml_fields_no_flower() -> None: + """Test that validate_pyproject_toml_fields fails correctly.""" # Prepare config = { "project": { @@ -159,8 +208,8 @@ def test_validate_flower_toml_fields_no_flower() -> None: assert len(warnings) == 0 -def test_validate_flower_toml_fields_no_flower_components() -> None: - """Test that validate_flower_toml_fields fails correctly.""" +def test_validate_pyproject_toml_fields_no_flower_components() -> None: + """Test that validate_pyproject_toml_fields fails correctly.""" # Prepare config = { "project": { @@ -178,12 +227,12 @@ def test_validate_flower_toml_fields_no_flower_components() -> None: # Assert assert not is_valid - assert len(errors) == 1 + assert len(errors) == 2 assert len(warnings) == 0 -def test_validate_flower_toml_fields_no_server_and_client_app() -> None: - """Test that validate_flower_toml_fields fails correctly.""" +def test_validate_pyproject_toml_fields_no_server_and_client_app() -> None: + """Test that validate_pyproject_toml_fields fails correctly.""" # Prepare config = { "project": { @@ -201,12 +250,12 @@ def test_validate_flower_toml_fields_no_server_and_client_app() -> None: # Assert assert not is_valid - assert len(errors) == 2 + assert len(errors) == 3 assert len(warnings) == 0 -def test_validate_flower_toml_fields() -> None: - """Test that validate_flower_toml_fields succeeds correctly.""" +def test_validate_pyproject_toml_fields() -> None: + """Test that validate_pyproject_toml_fields succeeds correctly.""" # Prepare config = { "project": { @@ -216,7 +265,10 @@ def test_validate_flower_toml_fields() -> None: "license": "", "authors": [], }, - "flower": {"components": {"serverapp": "", "clientapp": ""}}, + "flower": { + "publisher": "flwrlabs", + "components": {"serverapp": "", "clientapp": ""}, + }, } # Execute @@ -228,8 +280,8 @@ def test_validate_flower_toml_fields() -> None: assert len(warnings) == 0 -def test_validate_flower_toml() -> None: - """Test that validate_flower_toml succeeds correctly.""" +def test_validate_pyproject_toml() -> None: + """Test that validate_pyproject_toml succeeds correctly.""" # Prepare config = { "project": { @@ -240,10 +292,11 @@ def test_validate_flower_toml() -> None: "authors": [], }, "flower": { + "publisher": "flwrlabs", "components": { "serverapp": "flwr.cli.run:run", "clientapp": "flwr.cli.run:run", - } + }, }, } @@ -256,8 +309,8 @@ def test_validate_flower_toml() -> None: assert not warnings -def test_validate_flower_toml_fail() -> None: - """Test that validate_flower_toml fails correctly.""" +def test_validate_pyproject_toml_fail() -> None: + """Test that validate_pyproject_toml fails correctly.""" # Prepare config = { "project": { @@ -268,10 +321,11 @@ def test_validate_flower_toml_fail() -> None: "authors": [], }, "flower": { + "publisher": "flwrlabs", "components": { "serverapp": "flwr.cli.run:run", "clientapp": "flwr.cli.run:runa", - } + }, }, } diff --git a/src/py/flwr/cli/example.py b/src/py/flwr/cli/example.py index 625ca8729640..4790e72d48bf 100644 --- a/src/py/flwr/cli/example.py +++ b/src/py/flwr/cli/example.py @@ -39,7 +39,9 @@ def example() -> None: with urllib.request.urlopen(examples_directory_url) as res: data = json.load(res) example_names = [ - item["path"] for item in data["tree"] if item["path"] not in [".gitignore"] + item["path"] + for item in data["tree"] + if item["path"] not in [".gitignore", "doc"] ] example_name = prompt_options( diff --git a/src/py/flwr/cli/new/new.py b/src/py/flwr/cli/new/new.py index 7eb47e3e3548..9bbc016de1a8 100644 --- a/src/py/flwr/cli/new/new.py +++ b/src/py/flwr/cli/new/new.py @@ -15,6 +15,7 @@ """Flower command line interface `new` command.""" import os +import re from enum import Enum from string import Template from typing import Dict, Optional @@ -22,7 +23,12 @@ import typer from typing_extensions import Annotated -from ..utils import prompt_options, prompt_text +from ..utils import ( + is_valid_project_name, + prompt_options, + prompt_text, + sanitize_project_name, +) class MlFramework(str, Enum): @@ -31,6 +37,10 @@ class MlFramework(str, Enum): NUMPY = "NumPy" PYTORCH = "PyTorch" TENSORFLOW = "TensorFlow" + JAX = "JAX" + HUGGINGFACE = "HF" + MLX = "MLX" + SKLEARN = "sklearn" class TemplateNotFound(Exception): @@ -53,8 +63,9 @@ def render_template(template: str, data: Dict[str, str]) -> str: """Render template.""" tpl_file = load_template(template) tpl = Template(tpl_file) - result = tpl.substitute(data) - return result + if ".gitignore" not in template: + return tpl.substitute(data) + return tpl.template def create_file(file_path: str, content: str) -> None: @@ -79,25 +90,31 @@ def new( Optional[MlFramework], typer.Option(case_sensitive=False, help="The ML framework to use"), ] = None, + username: Annotated[ + Optional[str], + typer.Option(case_sensitive=False, help="The Flower username of the author"), + ] = None, ) -> None: """Create new Flower project.""" - print( - typer.style( - f"🔨 Creating Flower project {project_name}...", - fg=typer.colors.GREEN, - bold=True, + if project_name is None: + project_name = prompt_text("Please provide the project name") + if not is_valid_project_name(project_name): + project_name = prompt_text( + "Please provide a name that only contains " + "characters in {'-', a-zA-Z', '0-9'}", + predicate=is_valid_project_name, + default=sanitize_project_name(project_name), ) - ) - if project_name is None: - project_name = prompt_text("Please provide project name") + if username is None: + username = prompt_text("Please provide your Flower username") if framework is not None: framework_str = str(framework.value) else: framework_value = prompt_options( "Please select ML framework by typing in the number", - [mlf.value for mlf in MlFramework], + sorted([mlf.value for mlf in MlFramework]), ) selected_value = [ name @@ -108,27 +125,53 @@ def new( framework_str = framework_str.lower() + print( + typer.style( + f"\n🔨 Creating Flower project {project_name}...", + fg=typer.colors.GREEN, + bold=True, + ) + ) + # Set project directory path cwd = os.getcwd() - pnl = project_name.lower() - project_dir = os.path.join(cwd, pnl) + package_name = re.sub(r"[-_.]+", "-", project_name).lower() + import_name = package_name.replace("-", "_") + project_dir = os.path.join(cwd, package_name) # List of files to render files = { + ".gitignore": {"template": "app/.gitignore.tpl"}, "README.md": {"template": "app/README.md.tpl"}, - "requirements.txt": {"template": f"app/requirements.{framework_str}.txt.tpl"}, - "flower.toml": {"template": "app/flower.toml.tpl"}, "pyproject.toml": {"template": f"app/pyproject.{framework_str}.toml.tpl"}, - f"{pnl}/__init__.py": {"template": "app/code/__init__.py.tpl"}, - f"{pnl}/server.py": {"template": f"app/code/server.{framework_str}.py.tpl"}, - f"{pnl}/client.py": {"template": f"app/code/client.{framework_str}.py.tpl"}, + f"{import_name}/__init__.py": {"template": "app/code/__init__.py.tpl"}, + f"{import_name}/server.py": { + "template": f"app/code/server.{framework_str}.py.tpl" + }, + f"{import_name}/client.py": { + "template": f"app/code/client.{framework_str}.py.tpl" + }, } - # In case framework is MlFramework.PYTORCH generate additionally the task.py file - if framework_str == MlFramework.PYTORCH.value.lower(): - files[f"{pnl}/task.py"] = {"template": f"app/code/task.{framework_str}.py.tpl"} - - context = {"project_name": project_name} + # Depending on the framework, generate task.py file + frameworks_with_tasks = [ + MlFramework.PYTORCH.value.lower(), + MlFramework.JAX.value.lower(), + MlFramework.HUGGINGFACE.value.lower(), + MlFramework.MLX.value.lower(), + MlFramework.TENSORFLOW.value.lower(), + ] + if framework_str in frameworks_with_tasks: + files[f"{import_name}/task.py"] = { + "template": f"app/code/task.{framework_str}.py.tpl" + } + + context = { + "project_name": project_name, + "package_name": package_name, + "import_name": import_name.replace("-", "_"), + "username": username, + } for file_path, value in files.items(): render_and_create( diff --git a/src/py/flwr/cli/new/new_test.py b/src/py/flwr/cli/new/new_test.py index cedcb09b7755..33ad745efa93 100644 --- a/src/py/flwr/cli/new/new_test.py +++ b/src/py/flwr/cli/new/new_test.py @@ -16,6 +16,8 @@ import os +import pytest + from .new import MlFramework, create_file, load_template, new, render_template @@ -35,7 +37,12 @@ def test_render_template() -> None: """Test if a string is correctly substituted.""" # Prepare filename = "app/README.md.tpl" - data = {"project_name": "FedGPT"} + data = { + "project_name": "FedGPT", + "package_name": "fedgpt", + "import_name": "fedgpt", + "username": "flwrlabs", + } # Execute result = render_template(filename, data) @@ -60,42 +67,74 @@ def test_create_file(tmp_path: str) -> None: assert text == "Foobar" -def test_new(tmp_path: str) -> None: - """Test if project is created for framework.""" +def test_new_correct_name(tmp_path: str) -> None: + """Test if project with correct name is created for framework.""" # Prepare - project_name = "FedGPT" framework = MlFramework.PYTORCH - expected_files_top_level = { - "requirements.txt", - "fedgpt", - "README.md", - "flower.toml", - "pyproject.toml", - } - expected_files_module = { - "__init__.py", - "server.py", - "client.py", - "task.py", - } + username = "flwrlabs" + expected_names = [ + ("FedGPT", "fedgpt", "fedgpt"), + ("My-Flower-App", "my-flower-app", "my_flower_app"), + ] + + for project_name, expected_top_level_dir, expected_module_dir in expected_names: + expected_files_top_level = { + expected_module_dir, + "README.md", + "pyproject.toml", + ".gitignore", + } + expected_files_module = { + "__init__.py", + "server.py", + "client.py", + "task.py", + } + + # Current directory + origin = os.getcwd() + + try: + # Change into the temprorary directory + os.chdir(tmp_path) + # Execute + new(project_name=project_name, framework=framework, username=username) + + # Assert + file_list = os.listdir(os.path.join(tmp_path, expected_top_level_dir)) + assert set(file_list) == expected_files_top_level + + file_list = os.listdir( + os.path.join(tmp_path, expected_top_level_dir, expected_module_dir) + ) + assert set(file_list) == expected_files_module + finally: + os.chdir(origin) + + +def test_new_incorrect_name(tmp_path: str) -> None: + """Test if project with incorrect name is created for framework.""" + framework = MlFramework.PYTORCH + username = "flwrlabs" + + for project_name in ["My_Flower_App", "My.Flower App"]: + # Current directory + origin = os.getcwd() - # Current directory - origin = os.getcwd() + try: + # Change into the temprorary directory + os.chdir(tmp_path) - try: - # Change into the temprorary directory - os.chdir(tmp_path) + with pytest.raises(OSError) as exc_info: - # Execute - new(project_name=project_name, framework=framework) + # Execute + new( + project_name=project_name, + framework=framework, + username=username, + ) - # Assert - file_list = os.listdir(os.path.join(tmp_path, project_name.lower())) - assert set(file_list) == expected_files_top_level + assert "Failed to read from stdin" in str(exc_info.value) - file_list = os.listdir( - os.path.join(tmp_path, project_name.lower(), project_name.lower()) - ) - assert set(file_list) == expected_files_module - finally: - os.chdir(origin) + finally: + os.chdir(origin) diff --git a/src/py/flwr/cli/new/templates/app/.gitignore.tpl b/src/py/flwr/cli/new/templates/app/.gitignore.tpl new file mode 100644 index 000000000000..68bc17f9ff21 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/.gitignore.tpl @@ -0,0 +1,160 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ diff --git a/src/py/flwr/cli/new/templates/app/README.md.tpl b/src/py/flwr/cli/new/templates/app/README.md.tpl index 516bed0f40c2..ddc42cafabc3 100644 --- a/src/py/flwr/cli/new/templates/app/README.md.tpl +++ b/src/py/flwr/cli/new/templates/app/README.md.tpl @@ -3,11 +3,7 @@ ## Install dependencies ```bash -# Using pip pip install . - -# Or using Poetry -poetry install ``` ## Run (Simulation Engine) diff --git a/src/py/flwr/cli/new/templates/app/code/client.hf.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.hf.py.tpl new file mode 100644 index 000000000000..314da2120c53 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/client.hf.py.tpl @@ -0,0 +1,55 @@ +"""$project_name: A Flower / HuggingFace Transformers app.""" + +from flwr.client import ClientApp, NumPyClient +from transformers import AutoModelForSequenceClassification + +from $import_name.task import ( + get_weights, + load_data, + set_weights, + train, + test, + CHECKPOINT, + DEVICE, +) + + +# Flower client +class FlowerClient(NumPyClient): + def __init__(self, net, trainloader, testloader): + self.net = net + self.trainloader = trainloader + self.testloader = testloader + + def get_parameters(self, config): + return get_weights(self.net) + + def set_parameters(self, parameters): + set_weights(self.net, parameters) + + def fit(self, parameters, config): + self.set_parameters(parameters) + train(self.net, self.trainloader, epochs=1) + return self.get_parameters(config={}), len(self.trainloader), {} + + def evaluate(self, parameters, config): + self.set_parameters(parameters) + loss, accuracy = test(self.net, self.testloader) + return float(loss), len(self.testloader), {"accuracy": accuracy} + + +def client_fn(cid): + # Load model and data + net = AutoModelForSequenceClassification.from_pretrained( + CHECKPOINT, num_labels=2 + ).to(DEVICE) + trainloader, valloader = load_data(int(cid), 2) + + # Return Client instance + return FlowerClient(net, trainloader, valloader).to_client() + + +# Flower ClientApp +app = ClientApp( + client_fn, +) diff --git a/src/py/flwr/cli/new/templates/app/code/client.jax.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.jax.py.tpl new file mode 100644 index 000000000000..3c6d2f03637a --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/client.jax.py.tpl @@ -0,0 +1,55 @@ +"""$project_name: A Flower / JAX app.""" + +import jax +from flwr.client import NumPyClient, ClientApp + +from $import_name.task import ( + evaluation, + get_params, + load_data, + load_model, + loss_fn, + set_params, + train, +) + + +# Define Flower Client and client_fn +class FlowerClient(NumPyClient): + def __init__(self): + self.train_x, self.train_y, self.test_x, self.test_y = load_data() + self.grad_fn = jax.grad(loss_fn) + model_shape = self.train_x.shape[1:] + + self.params = load_model(model_shape) + + def get_parameters(self, config): + return get_params(self.params) + + def set_parameters(self, parameters): + set_params(self.params, parameters) + + def fit(self, parameters, config): + self.set_parameters(parameters) + self.params, loss, num_examples = train( + self.params, self.grad_fn, self.train_x, self.train_y + ) + parameters = self.get_parameters(config={}) + return parameters, num_examples, {"loss": float(loss)} + + def evaluate(self, parameters, config): + self.set_parameters(parameters) + loss, num_examples = evaluation( + self.params, self.grad_fn, self.test_x, self.test_y + ) + return float(loss), num_examples, {"loss": float(loss)} + +def client_fn(cid): + # Return Client instance + return FlowerClient().to_client() + + +# Flower ClientApp +app = ClientApp( + client_fn, +) diff --git a/src/py/flwr/cli/new/templates/app/code/client.mlx.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.mlx.py.tpl new file mode 100644 index 000000000000..1722561370a8 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/client.mlx.py.tpl @@ -0,0 +1,70 @@ +"""$project_name: A Flower / MLX app.""" + +import mlx.core as mx +import mlx.nn as nn +import mlx.optimizers as optim +from flwr.client import NumPyClient, ClientApp + +from $import_name.task import ( + batch_iterate, + eval_fn, + get_params, + load_data, + loss_fn, + set_params, + MLP, +) + + +# Define Flower Client and client_fn +class FlowerClient(NumPyClient): + def __init__(self, data): + num_layers = 2 + hidden_dim = 32 + num_classes = 10 + batch_size = 256 + num_epochs = 1 + learning_rate = 1e-1 + + self.train_images, self.train_labels, self.test_images, self.test_labels = data + self.model = MLP(num_layers, self.train_images.shape[-1], hidden_dim, num_classes) + self.optimizer = optim.SGD(learning_rate=learning_rate) + self.loss_and_grad_fn = nn.value_and_grad(self.model, loss_fn) + self.num_epochs = num_epochs + self.batch_size = batch_size + + def get_parameters(self, config): + return get_params(self.model) + + def set_parameters(self, parameters): + set_params(self.model, parameters) + + def fit(self, parameters, config): + self.set_parameters(parameters) + for _ in range(self.num_epochs): + for X, y in batch_iterate( + self.batch_size, self.train_images, self.train_labels + ): + _, grads = self.loss_and_grad_fn(self.model, X, y) + self.optimizer.update(self.model, grads) + mx.eval(self.model.parameters(), self.optimizer.state) + return self.get_parameters(config={}), len(self.train_images), {} + + def evaluate(self, parameters, config): + self.set_parameters(parameters) + accuracy = eval_fn(self.model, self.test_images, self.test_labels) + loss = loss_fn(self.model, self.test_images, self.test_labels) + return loss.item(), len(self.test_images), {"accuracy": accuracy.item()} + + +def client_fn(cid): + data = load_data(int(cid), 2) + + # Return Client instance + return FlowerClient(data).to_client() + + +# Flower ClientApp +app = ClientApp( + client_fn, +) diff --git a/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl index 7137a7791683..c68974efaadf 100644 --- a/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl @@ -2,7 +2,7 @@ from flwr.client import NumPyClient, ClientApp -from $project_name.task import ( +from $import_name.task import ( Net, DEVICE, load_data, diff --git a/src/py/flwr/cli/new/templates/app/code/client.sklearn.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.sklearn.py.tpl new file mode 100644 index 000000000000..9181389cad1c --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/client.sklearn.py.tpl @@ -0,0 +1,94 @@ +"""$project_name: A Flower / Scikit-Learn app.""" + +import warnings + +import numpy as np +from flwr.client import NumPyClient, ClientApp +from flwr_datasets import FederatedDataset +from sklearn.linear_model import LogisticRegression +from sklearn.metrics import log_loss + + +def get_model_parameters(model): + if model.fit_intercept: + params = [ + model.coef_, + model.intercept_, + ] + else: + params = [model.coef_] + return params + + +def set_model_params(model, params): + model.coef_ = params[0] + if model.fit_intercept: + model.intercept_ = params[1] + return model + + +def set_initial_params(model): + n_classes = 10 # MNIST has 10 classes + n_features = 784 # Number of features in dataset + model.classes_ = np.array([i for i in range(10)]) + + model.coef_ = np.zeros((n_classes, n_features)) + if model.fit_intercept: + model.intercept_ = np.zeros((n_classes,)) + + +class FlowerClient(NumPyClient): + def __init__(self, model, X_train, X_test, y_train, y_test): + self.model = model + self.X_train = X_train + self.X_test = X_test + self.y_train = y_train + self.y_test = y_test + + def get_parameters(self, config): + return get_model_parameters(self.model) + + def fit(self, parameters, config): + set_model_params(self.model, parameters) + + # Ignore convergence failure due to low local epochs + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + self.model.fit(self.X_train, self.y_train) + + return get_model_parameters(self.model), len(self.X_train), {} + + def evaluate(self, parameters, config): + set_model_params(self.model, parameters) + + loss = log_loss(self.y_test, self.model.predict_proba(self.X_test)) + accuracy = self.model.score(self.X_test, self.y_test) + + return loss, len(self.X_test), {"accuracy": accuracy} + +fds = FederatedDataset(dataset="mnist", partitioners={"train": 2}) + +def client_fn(cid: str): + dataset = fds.load_partition(int(cid), "train").with_format("numpy") + + X, y = dataset["image"].reshape((len(dataset), -1)), dataset["label"] + + # Split the on edge data: 80% train, 20% test + X_train, X_test = X[: int(0.8 * len(X))], X[int(0.8 * len(X)) :] + y_train, y_test = y[: int(0.8 * len(y))], y[int(0.8 * len(y)) :] + + # Create LogisticRegression Model + model = LogisticRegression( + penalty="l2", + max_iter=1, # local epoch + warm_start=True, # prevent refreshing weights when fitting + ) + + # Setting initial parameters, akin to model.compile for keras models + set_initial_params(model) + + return FlowerClient(model, X_train, X_test, y_train, y_test).to_client() + + +# Flower ClientApp +app = ClientApp(client_fn=client_fn) diff --git a/src/py/flwr/cli/new/templates/app/code/client.tensorflow.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.tensorflow.py.tpl index cc00f8ff0b8c..dc55d4ca6569 100644 --- a/src/py/flwr/cli/new/templates/app/code/client.tensorflow.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/client.tensorflow.py.tpl @@ -1 +1,43 @@ """$project_name: A Flower / TensorFlow app.""" + +from flwr.client import NumPyClient, ClientApp + +from $import_name.task import load_data, load_model + + +# Define Flower Client and client_fn +class FlowerClient(NumPyClient): + def __init__(self, model, x_train, y_train, x_test, y_test): + self.model = model + self.x_train = x_train + self.y_train = y_train + self.x_test = x_test + self.y_test = y_test + + def get_parameters(self, config): + return self.model.get_weights() + + def fit(self, parameters, config): + self.model.set_weights(parameters) + self.model.fit(self.x_train, self.y_train, epochs=1, batch_size=32, verbose=0) + return self.model.get_weights(), len(self.x_train), {} + + def evaluate(self, parameters, config): + self.model.set_weights(parameters) + loss, accuracy = self.model.evaluate(self.x_test, self.y_test, verbose=0) + return loss, len(self.x_test), {"accuracy": accuracy} + + +def client_fn(cid): + # Load model and data + net = load_model() + x_train, y_train, x_test, y_test = load_data(int(cid), 2) + + # Return Client instance + return FlowerClient(net, x_train, y_train, x_test, y_test).to_client() + + +# Flower ClientApp +app = ClientApp( + client_fn=client_fn, +) diff --git a/src/py/flwr/cli/new/templates/app/code/server.hf.py.tpl b/src/py/flwr/cli/new/templates/app/code/server.hf.py.tpl new file mode 100644 index 000000000000..d7d86931335b --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/server.hf.py.tpl @@ -0,0 +1,17 @@ +"""$project_name: A Flower / HuggingFace Transformers app.""" + +from flwr.server.strategy import FedAvg +from flwr.server import ServerApp, ServerConfig + + +# Define strategy +strategy = FedAvg( + fraction_fit=1.0, + fraction_evaluate=1.0, +) + +# Start server +app = ServerApp( + config=ServerConfig(num_rounds=3), + strategy=strategy, +) diff --git a/src/py/flwr/cli/new/templates/app/code/server.jax.py.tpl b/src/py/flwr/cli/new/templates/app/code/server.jax.py.tpl new file mode 100644 index 000000000000..53cff7b905f4 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/server.jax.py.tpl @@ -0,0 +1,12 @@ +"""$project_name: A Flower / JAX app.""" + +import flwr as fl + +# Configure the strategy +strategy = fl.server.strategy.FedAvg() + +# Flower ServerApp +app = fl.server.ServerApp( + config=fl.server.ServerConfig(num_rounds=3), + strategy=strategy, +) diff --git a/src/py/flwr/cli/new/templates/app/code/server.mlx.py.tpl b/src/py/flwr/cli/new/templates/app/code/server.mlx.py.tpl new file mode 100644 index 000000000000..b475e0e7dc36 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/server.mlx.py.tpl @@ -0,0 +1,15 @@ +"""$project_name: A Flower / MLX app.""" + +from flwr.server import ServerApp, ServerConfig +from flwr.server.strategy import FedAvg + + +# Define strategy +strategy = FedAvg() + + +# Create ServerApp +app = ServerApp( + config=ServerConfig(num_rounds=3), + strategy=strategy, +) diff --git a/src/py/flwr/cli/new/templates/app/code/server.pytorch.py.tpl b/src/py/flwr/cli/new/templates/app/code/server.pytorch.py.tpl index cb04c052b429..dc635f79a664 100644 --- a/src/py/flwr/cli/new/templates/app/code/server.pytorch.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/server.pytorch.py.tpl @@ -4,7 +4,7 @@ from flwr.common import ndarrays_to_parameters from flwr.server import ServerApp, ServerConfig from flwr.server.strategy import FedAvg -from $project_name.task import Net, get_weights +from $import_name.task import Net, get_weights # Initialize model parameters diff --git a/src/py/flwr/cli/new/templates/app/code/server.sklearn.py.tpl b/src/py/flwr/cli/new/templates/app/code/server.sklearn.py.tpl new file mode 100644 index 000000000000..266a53ac5794 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/server.sklearn.py.tpl @@ -0,0 +1,17 @@ +"""$project_name: A Flower / Scikit-Learn app.""" + +from flwr.server import ServerApp, ServerConfig +from flwr.server.strategy import FedAvg + + +strategy = FedAvg( + fraction_fit=1.0, + fraction_evaluate=1.0, + min_available_clients=2, +) + +# Create ServerApp +app = ServerApp( + config=ServerConfig(num_rounds=3), + strategy=strategy, +) diff --git a/src/py/flwr/cli/new/templates/app/code/server.tensorflow.py.tpl b/src/py/flwr/cli/new/templates/app/code/server.tensorflow.py.tpl index cc00f8ff0b8c..8d092164a468 100644 --- a/src/py/flwr/cli/new/templates/app/code/server.tensorflow.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/server.tensorflow.py.tpl @@ -1 +1,27 @@ """$project_name: A Flower / TensorFlow app.""" + +from flwr.common import ndarrays_to_parameters +from flwr.server import ServerApp, ServerConfig +from flwr.server.strategy import FedAvg + +from $import_name.task import load_model + +# Define config +config = ServerConfig(num_rounds=3) + +parameters = ndarrays_to_parameters(load_model().get_weights()) + +# Define strategy +strategy = FedAvg( + fraction_fit=1.0, + fraction_evaluate=1.0, + min_available_clients=2, + initial_parameters=parameters, +) + + +# Create ServerApp +app = ServerApp( + config=config, + strategy=strategy, +) diff --git a/src/py/flwr/cli/new/templates/app/code/task.hf.py.tpl b/src/py/flwr/cli/new/templates/app/code/task.hf.py.tpl new file mode 100644 index 000000000000..8e89add66835 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/task.hf.py.tpl @@ -0,0 +1,87 @@ +"""$project_name: A Flower / HuggingFace Transformers app.""" + +import warnings +from collections import OrderedDict + +import torch +from evaluate import load as load_metric +from torch.optim import AdamW +from torch.utils.data import DataLoader +from transformers import AutoTokenizer, DataCollatorWithPadding + +from flwr_datasets import FederatedDataset + +warnings.filterwarnings("ignore", category=UserWarning) +DEVICE = torch.device("cpu") +CHECKPOINT = "distilbert-base-uncased" # transformer model checkpoint + + +def load_data(partition_id, num_clients): + """Load IMDB data (training and eval)""" + fds = FederatedDataset(dataset="imdb", partitioners={"train": num_clients}) + partition = fds.load_partition(partition_id) + # Divide data: 80% train, 20% test + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) + + tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT) + + def tokenize_function(examples): + return tokenizer(examples["text"], truncation=True) + + partition_train_test = partition_train_test.map(tokenize_function, batched=True) + partition_train_test = partition_train_test.remove_columns("text") + partition_train_test = partition_train_test.rename_column("label", "labels") + + data_collator = DataCollatorWithPadding(tokenizer=tokenizer) + trainloader = DataLoader( + partition_train_test["train"], + shuffle=True, + batch_size=32, + collate_fn=data_collator, + ) + + testloader = DataLoader( + partition_train_test["test"], batch_size=32, collate_fn=data_collator + ) + + return trainloader, testloader + + +def train(net, trainloader, epochs): + optimizer = AdamW(net.parameters(), lr=5e-5) + net.train() + for _ in range(epochs): + for batch in trainloader: + batch = {k: v.to(DEVICE) for k, v in batch.items()} + outputs = net(**batch) + loss = outputs.loss + loss.backward() + optimizer.step() + optimizer.zero_grad() + + +def test(net, testloader): + metric = load_metric("accuracy") + loss = 0 + net.eval() + for batch in testloader: + batch = {k: v.to(DEVICE) for k, v in batch.items()} + with torch.no_grad(): + outputs = net(**batch) + logits = outputs.logits + loss += outputs.loss.item() + predictions = torch.argmax(logits, dim=-1) + metric.add_batch(predictions=predictions, references=batch["labels"]) + loss /= len(testloader.dataset) + accuracy = metric.compute()["accuracy"] + return loss, accuracy + + +def get_weights(net): + return [val.cpu().numpy() for _, val in net.state_dict().items()] + + +def set_weights(net, parameters): + params_dict = zip(net.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + net.load_state_dict(state_dict, strict=True) diff --git a/src/py/flwr/cli/new/templates/app/code/task.jax.py.tpl b/src/py/flwr/cli/new/templates/app/code/task.jax.py.tpl new file mode 100644 index 000000000000..82f080ebcdcb --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/task.jax.py.tpl @@ -0,0 +1,57 @@ +"""$project_name: A Flower / JAX app.""" + +import jax +import jax.numpy as jnp +from sklearn.datasets import make_regression +from sklearn.model_selection import train_test_split +import numpy as np + +key = jax.random.PRNGKey(0) + + +def load_data(): + # Load dataset + X, y = make_regression(n_features=3, random_state=0) + X, X_test, y, y_test = train_test_split(X, y) + return X, y, X_test, y_test + + +def load_model(model_shape): + # Extract model parameters + params = {"b": jax.random.uniform(key), "w": jax.random.uniform(key, model_shape)} + return params + + +def loss_fn(params, X, y): + # Return MSE as loss + err = jnp.dot(X, params["w"]) + params["b"] - y + return jnp.mean(jnp.square(err)) + + +def train(params, grad_fn, X, y): + loss = 1_000_000 + num_examples = X.shape[0] + for epochs in range(50): + grads = grad_fn(params, X, y) + params = jax.tree.map(lambda p, g: p - 0.05 * g, params, grads) + loss = loss_fn(params, X, y) + return params, loss, num_examples + + +def evaluation(params, grad_fn, X_test, y_test): + num_examples = X_test.shape[0] + err_test = loss_fn(params, X_test, y_test) + loss_test = jnp.mean(jnp.square(err_test)) + return loss_test, num_examples + + +def get_params(params): + parameters = [] + for _, val in params.items(): + parameters.append(np.array(val)) + return parameters + + +def set_params(local_params, global_params): + for key, value in list(zip(local_params.keys(), global_params)): + local_params[key] = value diff --git a/src/py/flwr/cli/new/templates/app/code/task.mlx.py.tpl b/src/py/flwr/cli/new/templates/app/code/task.mlx.py.tpl new file mode 100644 index 000000000000..bcd4dde93310 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/task.mlx.py.tpl @@ -0,0 +1,89 @@ +"""$project_name: A Flower / MLX app.""" + +import mlx.core as mx +import mlx.nn as nn +import numpy as np +from datasets.utils.logging import disable_progress_bar +from flwr_datasets import FederatedDataset + + +disable_progress_bar() + +class MLP(nn.Module): + """A simple MLP.""" + + def __init__( + self, num_layers: int, input_dim: int, hidden_dim: int, output_dim: int + ): + super().__init__() + layer_sizes = [input_dim] + [hidden_dim] * num_layers + [output_dim] + self.layers = [ + nn.Linear(idim, odim) + for idim, odim in zip(layer_sizes[:-1], layer_sizes[1:]) + ] + + def __call__(self, x): + for l in self.layers[:-1]: + x = mx.maximum(l(x), 0.0) + return self.layers[-1](x) + + +def loss_fn(model, X, y): + return mx.mean(nn.losses.cross_entropy(model(X), y)) + + +def eval_fn(model, X, y): + return mx.mean(mx.argmax(model(X), axis=1) == y) + + +def batch_iterate(batch_size, X, y): + perm = mx.array(np.random.permutation(y.size)) + for s in range(0, y.size, batch_size): + ids = perm[s : s + batch_size] + yield X[ids], y[ids] + + +def load_data(partition_id, num_clients): + fds = FederatedDataset(dataset="mnist", partitioners={"train": num_clients}) + partition = fds.load_partition(partition_id) + partition_splits = partition.train_test_split(test_size=0.2, seed=42) + + partition_splits["train"].set_format("numpy") + partition_splits["test"].set_format("numpy") + + train_partition = partition_splits["train"].map( + lambda img: { + "img": img.reshape(-1, 28 * 28).squeeze().astype(np.float32) / 255.0 + }, + input_columns="image", + ) + test_partition = partition_splits["test"].map( + lambda img: { + "img": img.reshape(-1, 28 * 28).squeeze().astype(np.float32) / 255.0 + }, + input_columns="image", + ) + + data = ( + train_partition["img"], + train_partition["label"].astype(np.uint32), + test_partition["img"], + test_partition["label"].astype(np.uint32), + ) + + train_images, train_labels, test_images, test_labels = map(mx.array, data) + return train_images, train_labels, test_images, test_labels + + +def get_params(model): + layers = model.parameters()["layers"] + return [np.array(val) for layer in layers for _, val in layer.items()] + + +def set_params(model, parameters): + new_params = {} + new_params["layers"] = [ + {"weight": mx.array(parameters[i]), "bias": mx.array(parameters[i + 1])} + for i in range(0, len(parameters), 2) + ] + model.update(new_params) diff --git a/src/py/flwr/cli/new/templates/app/code/task.pytorch.py.tpl b/src/py/flwr/cli/new/templates/app/code/task.pytorch.py.tpl index 85460564b6ef..b30c65a285b5 100644 --- a/src/py/flwr/cli/new/templates/app/code/task.pytorch.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/task.pytorch.py.tpl @@ -39,7 +39,7 @@ def load_data(partition_id, num_partitions): fds = FederatedDataset(dataset="cifar10", partitioners={"train": num_partitions}) partition = fds.load_partition(partition_id) # Divide data on each node: 80% train, 20% test - partition_train_test = partition.train_test_split(test_size=0.2) + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) pytorch_transforms = Compose( [ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] ) diff --git a/src/py/flwr/cli/new/templates/app/code/task.tensorflow.py.tpl b/src/py/flwr/cli/new/templates/app/code/task.tensorflow.py.tpl new file mode 100644 index 000000000000..fa07f93713ed --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/task.tensorflow.py.tpl @@ -0,0 +1,29 @@ +"""$project_name: A Flower / TensorFlow app.""" + +import os + +import tensorflow as tf +from flwr_datasets import FederatedDataset + + +# Make TensorFlow log less verbose +os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" + +def load_model(): + # Load model and data (MobileNetV2, CIFAR-10) + model = tf.keras.applications.MobileNetV2((32, 32, 3), classes=10, weights=None) + model.compile("adam", "sparse_categorical_crossentropy", metrics=["accuracy"]) + return model + + +def load_data(partition_id, num_partitions): + # Download and partition dataset + fds = FederatedDataset(dataset="cifar10", partitioners={"train": num_partitions}) + partition = fds.load_partition(partition_id, "train") + partition.set_format("numpy") + + # Divide data on each node: 80% train, 20% test + partition = partition.train_test_split(test_size=0.2) + x_train, y_train = partition["train"]["img"] / 255.0, partition["train"]["label"] + x_test, y_test = partition["test"]["img"] / 255.0, partition["test"]["label"] + return x_train, y_train, x_test, y_test diff --git a/src/py/flwr/cli/new/templates/app/flower.toml.tpl b/src/py/flwr/cli/new/templates/app/flower.toml.tpl deleted file mode 100644 index 07a6ffaf9e49..000000000000 --- a/src/py/flwr/cli/new/templates/app/flower.toml.tpl +++ /dev/null @@ -1,13 +0,0 @@ -[project] -name = "$project_name" -version = "1.0.0" -description = "" -license = "Apache-2.0" -authors = [ - "The Flower Authors ", -] -readme = "README.md" - -[flower.components] -serverapp = "$project_name.server:app" -clientapp = "$project_name.client:app" diff --git a/src/py/flwr/cli/new/templates/app/pyproject.hf.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.hf.toml.tpl new file mode 100644 index 000000000000..3bd980b2340e --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/pyproject.hf.toml.tpl @@ -0,0 +1,31 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "$package_name" +version = "1.0.0" +description = "" +authors = [ + { name = "The Flower Authors", email = "hello@flower.ai" }, +] +license = { text = "Apache License (2.0)" } +dependencies = [ + "flwr[simulation]>=1.8.0,<2.0", + "flwr-datasets>=0.0.2,<1.0.0", + "torch==2.2.1", + "transformers>=4.30.0,<5.0" + "evaluate>=0.4.0,<1.0" + "datasets>=2.0.0, <3.0" + "scikit-learn>=1.3.1, <2.0" +] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[flower] +publisher = "$username" + +[flower.components] +serverapp = "$import_name.server:app" +clientapp = "$import_name.client:app" diff --git a/src/py/flwr/cli/new/templates/app/pyproject.jax.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.jax.toml.tpl new file mode 100644 index 000000000000..1d32cfd77481 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/pyproject.jax.toml.tpl @@ -0,0 +1,28 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "$package_name" +version = "1.0.0" +description = "" +authors = [ + { name = "The Flower Authors", email = "hello@flower.ai" }, +] +license = {text = "Apache License (2.0)"} +dependencies = [ + "flwr[simulation]>=1.8.0,<2.0", + "jax==0.4.26", + "jaxlib==0.4.26", + "scikit-learn==1.4.2", +] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[flower] +publisher = "$username" + +[flower.components] +serverapp = "$import_name.server:app" +clientapp = "$import_name.client:app" diff --git a/src/py/flwr/cli/new/templates/app/pyproject.mlx.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.mlx.toml.tpl new file mode 100644 index 000000000000..0f56dc7eacc5 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/pyproject.mlx.toml.tpl @@ -0,0 +1,28 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "$package_name" +version = "1.0.0" +description = "" +authors = [ + { name = "The Flower Authors", email = "hello@flower.ai" }, +] +license = { text = "Apache License (2.0)" } +dependencies = [ + "flwr[simulation]>=1.8.0,<2.0", + "flwr-datasets[vision]>=0.0.2,<1.0.0", + "mlx==0.10.0", + "numpy==1.24.4", +] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[flower] +publisher = "$username" + +[flower.components] +serverapp = "$import_name.server:app" +clientapp = "$import_name.client:app" diff --git a/src/py/flwr/cli/new/templates/app/pyproject.numpy.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.numpy.toml.tpl index 15d8211a1a25..bbf8463054f4 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.numpy.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.numpy.toml.tpl @@ -1,19 +1,26 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] -name = "$project_name" +[project] +name = "$package_name" version = "1.0.0" description = "" -license = "Apache-2.0" authors = [ - "The Flower Authors ", + { name = "The Flower Authors", email = "hello@flower.ai" }, ] -readme = "README.md" +license = { text = "Apache License (2.0)" } +dependencies = [ + "flwr[simulation]>=1.8.0,<2.0", + "numpy>=1.21.0", +] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[flower] +publisher = "$username" -[tool.poetry.dependencies] -python = "^3.9" -# Mandatory dependencies -numpy = "^1.21.0" -flwr = { version = "^1.8.0", extras = ["simulation"] } +[flower.components] +serverapp = "$import_name.server:app" +clientapp = "$import_name.client:app" diff --git a/src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl index da0e15b903f8..a41ce1a6a4c6 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl @@ -1,21 +1,28 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] -name = "$project_name" +[project] +name = "$package_name" version = "1.0.0" description = "" -license = "Apache-2.0" authors = [ - "The Flower Authors ", + { name = "The Flower Authors", email = "hello@flower.ai" }, ] -readme = "README.md" +license = { text = "Apache License (2.0)" } +dependencies = [ + "flwr[simulation]>=1.8.0,<2.0", + "flwr-datasets[vision]>=0.0.2,<1.0.0", + "torch==2.2.1", + "torchvision==0.17.1", +] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[flower] +publisher = "$username" -[tool.poetry.dependencies] -python = "^3.9" -# Mandatory dependencies -flwr-nightly = { version = "1.8.0.dev20240313", extras = ["simulation"] } -flwr-datasets = { version = "0.0.2", extras = ["vision"] } -torch = "2.2.1" -torchvision = "0.17.1" +[flower.components] +serverapp = "$import_name.server:app" +clientapp = "$import_name.client:app" diff --git a/src/py/flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl new file mode 100644 index 000000000000..25645f0cde1a --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl @@ -0,0 +1,27 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "$package_name" +version = "1.0.0" +description = "" +authors = [ + { name = "The Flower Authors", email = "hello@flower.ai" }, +] +license = { text = "Apache License (2.0)" } +dependencies = [ + "flwr[simulation]>=1.8.0,<2.0", + "flwr-datasets[vision]>=0.0.2,<1.0.0", + "scikit-learn>=1.1.1", +] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[flower] +publisher = "$username" + +[flower.components] +serverapp = "$import_name.server:app" +clientapp = "$import_name.client:app" diff --git a/src/py/flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl index f7383a78b7d5..3968e3aa327b 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl @@ -1,21 +1,27 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] -name = "$project_name" +[project] +name = "$package_name" version = "1.0.0" description = "" -license = "Apache-2.0" authors = [ - "The Flower Authors ", + { name = "The Flower Authors", email = "hello@flower.ai" }, ] -readme = "README.md" +license = { text = "Apache License (2.0)" } +dependencies = [ + "flwr[simulation]>=1.8.0,<2.0", + "flwr-datasets[vision]>=0.0.2,<1.0.0", + "tensorflow>=2.11.1", +] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[flower] +publisher = "$username" -[tool.poetry.dependencies] -python = ">=3.9,<3.11" -# Mandatory dependencies -flwr = { version = "^1.8.0", extras = ["simulation"] } -flwr-datasets = { version = "^0.0.2", extras = ["vision"] } -tensorflow-cpu = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "platform_machine == \"x86_64\"" } -tensorflow-macos = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "sys_platform == \"darwin\" and platform_machine == \"arm64\"" } +[flower.components] +serverapp = "$import_name.server:app" +clientapp = "$import_name.client:app" diff --git a/src/py/flwr/cli/new/templates/app/requirements.numpy.txt.tpl b/src/py/flwr/cli/new/templates/app/requirements.numpy.txt.tpl deleted file mode 100644 index 4b460798e96f..000000000000 --- a/src/py/flwr/cli/new/templates/app/requirements.numpy.txt.tpl +++ /dev/null @@ -1,2 +0,0 @@ -flwr>=1.8, <2.0 -numpy>=1.21.0 diff --git a/src/py/flwr/cli/new/templates/app/requirements.pytorch.txt.tpl b/src/py/flwr/cli/new/templates/app/requirements.pytorch.txt.tpl deleted file mode 100644 index ddb8a814447b..000000000000 --- a/src/py/flwr/cli/new/templates/app/requirements.pytorch.txt.tpl +++ /dev/null @@ -1,4 +0,0 @@ -flwr-nightly[simulation]==1.8.0.dev20240313 -flwr-datasets[vision]==0.0.2 -torch==2.2.1 -torchvision==0.17.1 diff --git a/src/py/flwr/cli/new/templates/app/requirements.tensorflow.txt.tpl b/src/py/flwr/cli/new/templates/app/requirements.tensorflow.txt.tpl deleted file mode 100644 index b6fb49a4bbcb..000000000000 --- a/src/py/flwr/cli/new/templates/app/requirements.tensorflow.txt.tpl +++ /dev/null @@ -1,4 +0,0 @@ -flwr>=1.8, <2.0 -flwr-datasets[vision]>=0.0.2, <1.0.0 -tensorflow-macos>=2.9.1, !=2.11.1 ; sys_platform == "darwin" and platform_machine == "arm64" -tensorflow-cpu>=2.9.1, !=2.11.1 ; platform_machine == "x86_64" diff --git a/src/py/flwr/cli/run/run.py b/src/py/flwr/cli/run/run.py index 98b5da1843a6..9c50c8cb1980 100644 --- a/src/py/flwr/cli/run/run.py +++ b/src/py/flwr/cli/run/run.py @@ -18,7 +18,7 @@ import typer -from flwr.cli import flower_toml +from flwr.cli import config_utils from flwr.simulation.run_simulation import _run_simulation @@ -26,11 +26,11 @@ def run() -> None: """Run Flower project.""" typer.secho("Loading project configuration... ", fg=typer.colors.BLUE) - config, errors, warnings = flower_toml.load_and_validate_with_defaults() + config, errors, warnings = config_utils.load_and_validate_with_defaults() if config is None: typer.secho( - "Project configuration could not be loaded.\nflower.toml is invalid:\n" + "Project configuration could not be loaded.\npyproject.toml is invalid:\n" + "\n".join([f"- {line}" for line in errors]), fg=typer.colors.RED, bold=True, diff --git a/src/py/flwr/cli/utils.py b/src/py/flwr/cli/utils.py index 4e86f0c3b8c8..6460b770b184 100644 --- a/src/py/flwr/cli/utils.py +++ b/src/py/flwr/cli/utils.py @@ -14,18 +14,24 @@ # ============================================================================== """Flower command line interface utils.""" -from typing import List, cast +import re +from typing import Callable, List, Optional, cast import typer -def prompt_text(text: str) -> str: +def prompt_text( + text: str, + predicate: Callable[[str], bool] = lambda _: True, + default: Optional[str] = None, +) -> str: """Ask user to enter text input.""" while True: result = typer.prompt( - typer.style(f"\n💬 {text}", fg=typer.colors.MAGENTA, bold=True) + typer.style(f"\n💬 {text}", fg=typer.colors.MAGENTA, bold=True), + default=default, ) - if len(result) > 0: + if predicate(result) and len(result) > 0: break print(typer.style("❌ Invalid entry", fg=typer.colors.RED, bold=True)) @@ -65,3 +71,54 @@ def prompt_options(text: str, options: List[str]) -> str: result = options[int(index)] return result + + +def is_valid_project_name(name: str) -> bool: + """Check if the given string is a valid Python project name. + + A valid project name must start with a letter and can only contain letters, digits, + and hyphens. + """ + if not name: + return False + + # Check if the first character is a letter + if not name[0].isalpha(): + return False + + # Check if the rest of the characters are valid (letter, digit, or dash) + for char in name[1:]: + if not (char.isalnum() or char in "-"): + return False + + return True + + +def sanitize_project_name(name: str) -> str: + """Sanitize the given string to make it a valid Python project name. + + This version replaces spaces, dots, slashes, and underscores with dashes, removes + any characters not allowed in Python project names, makes the string lowercase, and + ensures it starts with a valid character. + """ + # Replace whitespace with '_' + name_with_hyphens = re.sub(r"[ ./_]", "-", name) + + # Allowed characters in a module name: letters, digits, underscore + allowed_chars = set( + "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-" + ) + + # Make the string lowercase + sanitized_name = name_with_hyphens.lower() + + # Remove any characters not allowed in Python module names + sanitized_name = "".join(c for c in sanitized_name if c in allowed_chars) + + # Ensure the first character is a letter or underscore + while sanitized_name and ( + sanitized_name[0].isdigit() or sanitized_name[0] not in allowed_chars + ): + sanitized_name = sanitized_name[1:] + + return sanitized_name diff --git a/src/py/flwr/client/__init__.py b/src/py/flwr/client/__init__.py index a721fb584164..fd8647dbaf2e 100644 --- a/src/py/flwr/client/__init__.py +++ b/src/py/flwr/client/__init__.py @@ -15,12 +15,13 @@ """Flower client.""" -from .app import run_client_app as run_client_app from .app import start_client as start_client from .app import start_numpy_client as start_numpy_client from .client import Client as Client from .client_app import ClientApp as ClientApp from .numpy_client import NumPyClient as NumPyClient +from .supernode import run_client_app as run_client_app +from .supernode import run_supernode as run_supernode from .typing import ClientFn as ClientFn __all__ = [ @@ -29,6 +30,7 @@ "ClientFn", "NumPyClient", "run_client_app", + "run_supernode", "start_client", "start_numpy_client", ] diff --git a/src/py/flwr/client/app.py b/src/py/flwr/client/app.py index c8287afc0fd0..d7c05d8afbb2 100644 --- a/src/py/flwr/client/app.py +++ b/src/py/flwr/client/app.py @@ -14,14 +14,12 @@ # ============================================================================== """Flower client app.""" - -import argparse import sys import time -from logging import DEBUG, INFO, WARN -from pathlib import Path +from logging import DEBUG, ERROR, INFO, WARN from typing import Callable, ContextManager, Optional, Tuple, Type, Union +from cryptography.hazmat.primitives.asymmetric import ec from grpc import RpcError from flwr.client.client import Client @@ -35,10 +33,10 @@ TRANSPORT_TYPE_GRPC_RERE, TRANSPORT_TYPE_REST, TRANSPORT_TYPES, + ErrorCode, ) -from flwr.common.exit_handlers import register_exit_handlers -from flwr.common.logger import log, warn_deprecated_feature, warn_experimental_feature -from flwr.common.object_ref import load_app, validate +from flwr.common.logger import log, warn_deprecated_feature +from flwr.common.message import Error from flwr.common.retry_invoker import RetryInvoker, exponential from .grpc_client.connection import grpc_connection @@ -48,142 +46,6 @@ from .numpy_client import NumPyClient -def run_client_app() -> None: - """Run Flower client app.""" - event(EventType.RUN_CLIENT_APP_ENTER) - - log(INFO, "Long-running Flower client starting") - - args = _parse_args_run_client_app().parse_args() - - # Obtain certificates - if args.insecure: - if args.root_certificates is not None: - sys.exit( - "Conflicting options: The '--insecure' flag disables HTTPS, " - "but '--root-certificates' was also specified. Please remove " - "the '--root-certificates' option when running in insecure mode, " - "or omit '--insecure' to use HTTPS." - ) - log( - WARN, - "Option `--insecure` was set. " - "Starting insecure HTTP client connected to %s.", - args.server, - ) - root_certificates = None - else: - # Load the certificates if provided, or load the system certificates - cert_path = args.root_certificates - if cert_path is None: - root_certificates = None - else: - root_certificates = Path(cert_path).read_bytes() - log( - DEBUG, - "Starting secure HTTPS client connected to %s " - "with the following certificates: %s.", - args.server, - cert_path, - ) - - log( - DEBUG, - "Flower will load ClientApp `%s`", - getattr(args, "client-app"), - ) - - client_app_dir = args.dir - if client_app_dir is not None: - sys.path.insert(0, client_app_dir) - - app_ref: str = getattr(args, "client-app") - valid, error_msg = validate(app_ref) - if not valid and error_msg: - raise LoadClientAppError(error_msg) from None - - def _load() -> ClientApp: - client_app = load_app(app_ref, LoadClientAppError) - - if not isinstance(client_app, ClientApp): - raise LoadClientAppError( - f"Attribute {app_ref} is not of type {ClientApp}", - ) from None - - return client_app - - _start_client_internal( - server_address=args.server, - load_client_app_fn=_load, - transport="rest" if args.rest else "grpc-rere", - root_certificates=root_certificates, - insecure=args.insecure, - max_retries=args.max_retries, - max_wait_time=args.max_wait_time, - ) - register_exit_handlers(event_type=EventType.RUN_CLIENT_APP_LEAVE) - - -def _parse_args_run_client_app() -> argparse.ArgumentParser: - """Parse flower-client-app command line arguments.""" - parser = argparse.ArgumentParser( - description="Start a Flower client app", - ) - - parser.add_argument( - "client-app", - help="For example: `client:app` or `project.package.module:wrapper.app`", - ) - parser.add_argument( - "--insecure", - action="store_true", - help="Run the client without HTTPS. By default, the client runs with " - "HTTPS enabled. Use this flag only if you understand the risks.", - ) - parser.add_argument( - "--rest", - action="store_true", - help="Use REST as a transport layer for the client.", - ) - parser.add_argument( - "--root-certificates", - metavar="ROOT_CERT", - type=str, - help="Specifies the path to the PEM-encoded root certificate file for " - "establishing secure HTTPS connections.", - ) - parser.add_argument( - "--server", - default="0.0.0.0:9092", - help="Server address", - ) - parser.add_argument( - "--max-retries", - type=int, - default=None, - help="The maximum number of times the client will try to connect to the" - "server before giving up in case of a connection error. By default," - "it is set to None, meaning there is no limit to the number of tries.", - ) - parser.add_argument( - "--max-wait-time", - type=float, - default=None, - help="The maximum duration before the client stops trying to" - "connect to the server in case of connection error. By default, it" - "is set to None, meaning there is no limit to the total time.", - ) - parser.add_argument( - "--dir", - default="", - help="Add specified directory to the PYTHONPATH and load Flower " - "app from there." - " Default: current working directory.", - ) - - return parser - - def _check_actionable_client( client: Optional[Client], client_fn: Optional[ClientFn] ) -> None: @@ -212,6 +74,9 @@ def start_client( root_certificates: Optional[Union[bytes, str]] = None, insecure: Optional[bool] = None, transport: Optional[str] = None, + authentication_keys: Optional[ + Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] + ] = None, max_retries: Optional[int] = None, max_wait_time: Optional[float] = None, ) -> None: @@ -296,6 +161,7 @@ class `flwr.client.Client` (default: None) root_certificates=root_certificates, insecure=insecure, transport=transport, + authentication_keys=authentication_keys, max_retries=max_retries, max_wait_time=max_wait_time, ) @@ -316,6 +182,9 @@ def _start_client_internal( root_certificates: Optional[Union[bytes, str]] = None, insecure: Optional[bool] = None, transport: Optional[str] = None, + authentication_keys: Optional[ + Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] + ] = None, max_retries: Optional[int] = None, max_wait_time: Optional[float] = None, ) -> None: @@ -385,8 +254,6 @@ def _load_client_app() -> ClientApp: return ClientApp(client_fn=client_fn) load_client_app_fn = _load_client_app - else: - warn_experimental_feature("`load_client_app_fn`") # At this point, only `load_client_app_fn` should be used # Both `client` and `client_fn` must not be used directly @@ -397,7 +264,7 @@ def _load_client_app() -> ClientApp: ) retry_invoker = RetryInvoker( - wait_factory=exponential, + wait_gen_factory=exponential, recoverable_exceptions=connection_error_type, max_tries=max_retries, max_time=max_wait_time, @@ -442,8 +309,10 @@ def _load_client_app() -> ClientApp: retry_invoker, grpc_max_message_length, root_certificates, + authentication_keys, ) as conn: - receive, send, create_node, delete_node = conn + # pylint: disable-next=W0612 + receive, send, create_node, delete_node, get_run = conn # Register node if create_node is not None: @@ -457,12 +326,13 @@ def _load_client_app() -> ClientApp: continue log(INFO, "") - log( - INFO, - "[RUN %s, ROUND %s]", - message.metadata.run_id, - message.metadata.group_id, - ) + if len(message.metadata.group_id) > 0: + log( + INFO, + "[RUN %s, ROUND %s]", + message.metadata.run_id, + message.metadata.group_id, + ) log( INFO, "Received: %s message %s", @@ -482,32 +352,57 @@ def _load_client_app() -> ClientApp: # Retrieve context for this run context = node_state.retrieve_context(run_id=message.metadata.run_id) - # Load ClientApp instance - client_app: ClientApp = load_client_app_fn() - - # Handle task message - out_message = client_app(message=message, context=context) - - # Update node state - node_state.update_context( - run_id=message.metadata.run_id, - context=context, + # Create an error reply message that will never be used to prevent + # the used-before-assignment linting error + reply_message = message.create_error_reply( + error=Error(code=ErrorCode.UNKNOWN, reason="Unknown") ) + # Handle app loading and task message + try: + # Load ClientApp instance + client_app: ClientApp = load_client_app_fn() + + # Execute ClientApp + reply_message = client_app(message=message, context=context) + except Exception as ex: # pylint: disable=broad-exception-caught + + # Legacy grpc-bidi + if transport in ["grpc-bidi", None]: + log(ERROR, "Client raised an exception.", exc_info=ex) + # Raise exception, crash process + raise ex + + # Don't update/change NodeState + + e_code = ErrorCode.CLIENT_APP_RAISED_EXCEPTION + # Reason example: ":<'division by zero'>" + reason = str(type(ex)) + ":<'" + str(ex) + "'>" + exc_entity = "ClientApp" + if isinstance(ex, LoadClientAppError): + reason = ( + "An exception was raised when attempting to load " + "`ClientApp`" + ) + e_code = ErrorCode.LOAD_CLIENT_APP_EXCEPTION + exc_entity = "SuperNode" + + log(ERROR, "%s raised an exception", exc_entity, exc_info=ex) + + # Create error message + reply_message = message.create_error_reply( + error=Error(code=e_code, reason=reason) + ) + else: + # No exception, update node state + node_state.update_context( + run_id=message.metadata.run_id, + context=context, + ) + # Send - send(out_message) - log( - INFO, - "[RUN %s, ROUND %s]", - out_message.metadata.run_id, - out_message.metadata.group_id, - ) - log( - INFO, - "Sent: %s reply to message %s", - out_message.metadata.message_type, - message.metadata.message_id, - ) + send(reply_message) + log(INFO, "Sent reply") # Unregister node if delete_node is not None: @@ -628,13 +523,21 @@ def start_numpy_client( def _init_connection(transport: Optional[str], server_address: str) -> Tuple[ Callable[ - [str, bool, RetryInvoker, int, Union[bytes, str, None]], + [ + str, + bool, + RetryInvoker, + int, + Union[bytes, str, None], + Optional[Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey]], + ], ContextManager[ Tuple[ Callable[[], Optional[Message]], Callable[[Message], None], Optional[Callable[[], None]], Optional[Callable[[], None]], + Optional[Callable[[int], Tuple[str, str]]], ] ], ], diff --git a/src/py/flwr/client/client_app.py b/src/py/flwr/client/client_app.py index 0b56219807c6..c9d337700147 100644 --- a/src/py/flwr/client/client_app.py +++ b/src/py/flwr/client/client_app.py @@ -23,10 +23,20 @@ from flwr.client.mod.utils import make_ffn from flwr.client.typing import ClientFn, Mod from flwr.common import Context, Message, MessageType +from flwr.common.logger import warn_preview_feature from .typing import ClientAppCallable +class ClientAppException(Exception): + """Exception raised when an exception is raised while executing a ClientApp.""" + + def __init__(self, message: str): + ex_name = self.__class__.__name__ + self.message = f"\nException {ex_name} occurred. Message: " + message + super().__init__(self.message) + + class ClientApp: """Flower ClientApp. @@ -123,6 +133,8 @@ def train_decorator(train_fn: ClientAppCallable) -> ClientAppCallable: if self._call: raise _registration_error(MessageType.TRAIN) + warn_preview_feature("ClientApp-register-train-function") + # Register provided function with the ClientApp object # Wrap mods around the wrapped step function self._train = make_ffn(train_fn, self._mods) @@ -151,6 +163,8 @@ def evaluate_decorator(evaluate_fn: ClientAppCallable) -> ClientAppCallable: if self._call: raise _registration_error(MessageType.EVALUATE) + warn_preview_feature("ClientApp-register-evaluate-function") + # Register provided function with the ClientApp object # Wrap mods around the wrapped step function self._evaluate = make_ffn(evaluate_fn, self._mods) @@ -179,6 +193,8 @@ def query_decorator(query_fn: ClientAppCallable) -> ClientAppCallable: if self._call: raise _registration_error(MessageType.QUERY) + warn_preview_feature("ClientApp-register-query-function") + # Register provided function with the ClientApp object # Wrap mods around the wrapped step function self._query = make_ffn(query_fn, self._mods) diff --git a/src/py/flwr/client/grpc_client/connection.py b/src/py/flwr/client/grpc_client/connection.py index 4431b53d2592..6e5227cf5e5f 100644 --- a/src/py/flwr/client/grpc_client/connection.py +++ b/src/py/flwr/client/grpc_client/connection.py @@ -22,6 +22,8 @@ from queue import Queue from typing import Callable, Iterator, Optional, Tuple, Union, cast +from cryptography.hazmat.primitives.asymmetric import ec + from flwr.common import ( DEFAULT_TTL, GRPC_MAX_MESSAGE_LENGTH, @@ -56,18 +58,22 @@ def on_channel_state_change(channel_connectivity: str) -> None: @contextmanager -def grpc_connection( # pylint: disable=R0915 +def grpc_connection( # pylint: disable=R0913, R0915 server_address: str, insecure: bool, retry_invoker: RetryInvoker, # pylint: disable=unused-argument max_message_length: int = GRPC_MAX_MESSAGE_LENGTH, root_certificates: Optional[Union[bytes, str]] = None, + authentication_keys: Optional[ # pylint: disable=unused-argument + Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] + ] = None, ) -> Iterator[ Tuple[ Callable[[], Optional[Message]], Callable[[Message], None], Optional[Callable[[], None]], Optional[Callable[[], None]], + Optional[Callable[[int], Tuple[str, str]]], ] ]: """Establish a gRPC connection to a gRPC server. @@ -224,7 +230,7 @@ def send(message: Message) -> None: try: # Yield methods - yield (receive, send, None, None) + yield (receive, send, None, None, None) finally: # Make sure to have a final channel.close() diff --git a/src/py/flwr/client/grpc_client/connection_test.py b/src/py/flwr/client/grpc_client/connection_test.py index 061e7d4377a0..da7800b26639 100644 --- a/src/py/flwr/client/grpc_client/connection_test.py +++ b/src/py/flwr/client/grpc_client/connection_test.py @@ -132,13 +132,13 @@ def run_client() -> int: server_address=f"[::]:{port}", insecure=True, retry_invoker=RetryInvoker( - wait_factory=exponential, + wait_gen_factory=exponential, recoverable_exceptions=grpc.RpcError, max_tries=1, max_time=None, ), ) as conn: - receive, send, _, _ = conn + receive, send, _, _, _ = conn # Setup processing loop while True: diff --git a/src/py/flwr/client/grpc_rere_client/client_interceptor.py b/src/py/flwr/client/grpc_rere_client/client_interceptor.py new file mode 100644 index 000000000000..8bc55878971d --- /dev/null +++ b/src/py/flwr/client/grpc_rere_client/client_interceptor.py @@ -0,0 +1,158 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower client interceptor.""" + + +import base64 +import collections +from typing import Any, Callable, Optional, Sequence, Tuple, Union + +import grpc +from cryptography.hazmat.primitives.asymmetric import ec + +from flwr.common.secure_aggregation.crypto.symmetric_encryption import ( + bytes_to_public_key, + compute_hmac, + generate_shared_key, + public_key_to_bytes, +) +from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 + CreateNodeRequest, + DeleteNodeRequest, + GetRunRequest, + PingRequest, + PullTaskInsRequest, + PushTaskResRequest, +) + +_PUBLIC_KEY_HEADER = "public-key" +_AUTH_TOKEN_HEADER = "auth-token" + +Request = Union[ + CreateNodeRequest, + DeleteNodeRequest, + PullTaskInsRequest, + PushTaskResRequest, + GetRunRequest, + PingRequest, +] + + +def _get_value_from_tuples( + key_string: str, tuples: Sequence[Tuple[str, Union[str, bytes]]] +) -> bytes: + value = next((value for key, value in tuples if key == key_string), "") + if isinstance(value, str): + return value.encode() + + return value + + +class _ClientCallDetails( + collections.namedtuple( + "_ClientCallDetails", ("method", "timeout", "metadata", "credentials") + ), + grpc.ClientCallDetails, # type: ignore +): + """Details for each client call. + + The class will be passed on as the first argument in continuation function. + In our case, `AuthenticateClientInterceptor` adds new metadata to the construct. + """ + + +class AuthenticateClientInterceptor(grpc.UnaryUnaryClientInterceptor): # type: ignore + """Client interceptor for client authentication.""" + + def __init__( + self, + private_key: ec.EllipticCurvePrivateKey, + public_key: ec.EllipticCurvePublicKey, + ): + self.private_key = private_key + self.public_key = public_key + self.shared_secret: Optional[bytes] = None + self.server_public_key: Optional[ec.EllipticCurvePublicKey] = None + self.encoded_public_key = base64.urlsafe_b64encode( + public_key_to_bytes(self.public_key) + ) + + def intercept_unary_unary( + self, + continuation: Callable[[Any, Any], Any], + client_call_details: grpc.ClientCallDetails, + request: Request, + ) -> grpc.Call: + """Flower client interceptor. + + Intercept unary call from client and add necessary authentication header in the + RPC metadata. + """ + metadata = [] + postprocess = False + if client_call_details.metadata is not None: + metadata = list(client_call_details.metadata) + + # Always add the public key header + metadata.append( + ( + _PUBLIC_KEY_HEADER, + self.encoded_public_key, + ) + ) + + if isinstance(request, CreateNodeRequest): + postprocess = True + elif isinstance( + request, + ( + DeleteNodeRequest, + PullTaskInsRequest, + PushTaskResRequest, + GetRunRequest, + PingRequest, + ), + ): + if self.shared_secret is None: + raise RuntimeError("Failure to compute hmac") + + metadata.append( + ( + _AUTH_TOKEN_HEADER, + base64.urlsafe_b64encode( + compute_hmac( + self.shared_secret, request.SerializeToString(True) + ) + ), + ) + ) + + client_call_details = _ClientCallDetails( + client_call_details.method, + client_call_details.timeout, + metadata, + client_call_details.credentials, + ) + + response = continuation(client_call_details, request) + if postprocess: + server_public_key_bytes = base64.urlsafe_b64decode( + _get_value_from_tuples(_PUBLIC_KEY_HEADER, response.initial_metadata()) + ) + self.server_public_key = bytes_to_public_key(server_public_key_bytes) + self.shared_secret = generate_shared_key( + self.private_key, self.server_public_key + ) + return response diff --git a/src/py/flwr/client/grpc_rere_client/client_interceptor_test.py b/src/py/flwr/client/grpc_rere_client/client_interceptor_test.py new file mode 100644 index 000000000000..487361a06026 --- /dev/null +++ b/src/py/flwr/client/grpc_rere_client/client_interceptor_test.py @@ -0,0 +1,376 @@ +# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower client interceptor tests.""" + + +import base64 +import threading +import unittest +from concurrent import futures +from logging import DEBUG, INFO, WARN +from typing import Optional, Sequence, Tuple, Union + +import grpc + +from flwr.client.grpc_rere_client.connection import grpc_request_response +from flwr.common import GRPC_MAX_MESSAGE_LENGTH +from flwr.common.logger import log +from flwr.common.message import Message, Metadata +from flwr.common.record import RecordSet +from flwr.common.retry_invoker import RetryInvoker, exponential +from flwr.common.secure_aggregation.crypto.symmetric_encryption import ( + compute_hmac, + generate_key_pairs, + generate_shared_key, + public_key_to_bytes, +) +from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 + CreateNodeRequest, + CreateNodeResponse, + DeleteNodeRequest, + DeleteNodeResponse, + GetRunRequest, + GetRunResponse, + PullTaskInsRequest, + PullTaskInsResponse, + PushTaskResRequest, + PushTaskResResponse, +) + +from .client_interceptor import _AUTH_TOKEN_HEADER, _PUBLIC_KEY_HEADER, Request + + +class _MockServicer: + """Mock Servicer for Flower clients.""" + + def __init__(self) -> None: + """Initialize mock servicer.""" + self._lock = threading.Lock() + self._received_client_metadata: Optional[ + Sequence[Tuple[str, Union[str, bytes]]] + ] = None + self.server_private_key, self.server_public_key = generate_key_pairs() + self._received_message_bytes: bytes = b"" + + def unary_unary( + self, request: Request, context: grpc.ServicerContext + ) -> Union[ + CreateNodeResponse, DeleteNodeResponse, PushTaskResResponse, PullTaskInsResponse + ]: + """Handle unary call.""" + with self._lock: + self._received_client_metadata = context.invocation_metadata() + self._received_message_bytes = request.SerializeToString(True) + + if isinstance(request, CreateNodeRequest): + context.send_initial_metadata( + ((_PUBLIC_KEY_HEADER, self.server_public_key),) + ) + return CreateNodeResponse() + if isinstance(request, DeleteNodeRequest): + return DeleteNodeResponse() + if isinstance(request, PushTaskResRequest): + return PushTaskResResponse() + + return PullTaskInsResponse() + + def received_client_metadata( + self, + ) -> Optional[Sequence[Tuple[str, Union[str, bytes]]]]: + """Return received client metadata.""" + with self._lock: + return self._received_client_metadata + + def received_message_bytes(self) -> bytes: + """Return received message bytes.""" + with self._lock: + return self._received_message_bytes + + +def _add_generic_handler(servicer: _MockServicer, server: grpc.Server) -> None: + rpc_method_handlers = { + "CreateNode": grpc.unary_unary_rpc_method_handler( + servicer.unary_unary, + request_deserializer=CreateNodeRequest.FromString, + response_serializer=CreateNodeResponse.SerializeToString, + ), + "DeleteNode": grpc.unary_unary_rpc_method_handler( + servicer.unary_unary, + request_deserializer=DeleteNodeRequest.FromString, + response_serializer=DeleteNodeResponse.SerializeToString, + ), + "PullTaskIns": grpc.unary_unary_rpc_method_handler( + servicer.unary_unary, + request_deserializer=PullTaskInsRequest.FromString, + response_serializer=PullTaskInsResponse.SerializeToString, + ), + "PushTaskRes": grpc.unary_unary_rpc_method_handler( + servicer.unary_unary, + request_deserializer=PushTaskResRequest.FromString, + response_serializer=PushTaskResResponse.SerializeToString, + ), + "GetRun": grpc.unary_unary_rpc_method_handler( + servicer.unary_unary, + request_deserializer=GetRunRequest.FromString, + response_serializer=GetRunResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + "flwr.proto.Fleet", rpc_method_handlers + ) + server.add_generic_rpc_handlers((generic_handler,)) + + +def _init_retry_invoker() -> RetryInvoker: + return RetryInvoker( + wait_gen_factory=exponential, + recoverable_exceptions=grpc.RpcError, + max_tries=None, + max_time=None, + on_giveup=lambda retry_state: ( + log( + WARN, + "Giving up reconnection after %.2f seconds and %s tries.", + retry_state.elapsed_time, + retry_state.tries, + ) + if retry_state.tries > 1 + else None + ), + on_success=lambda retry_state: ( + log( + INFO, + "Connection successful after %.2f seconds and %s tries.", + retry_state.elapsed_time, + retry_state.tries, + ) + if retry_state.tries > 1 + else None + ), + on_backoff=lambda retry_state: ( + log(WARN, "Connection attempt failed, retrying...") + if retry_state.tries == 1 + else log( + DEBUG, + "Connection attempt failed, retrying in %.2f seconds", + retry_state.actual_wait, + ) + ), + ) + + +class TestAuthenticateClientInterceptor(unittest.TestCase): + """Test for client interceptor client authentication.""" + + def setUp(self) -> None: + """Initialize mock server and client.""" + self._server = grpc.server( + futures.ThreadPoolExecutor(max_workers=10), + options=(("grpc.so_reuseport", int(False)),), + ) + self._servicer = _MockServicer() + _add_generic_handler(self._servicer, self._server) + port = self._server.add_insecure_port("[::]:0") + self._server.start() + self._client_private_key, self._client_public_key = generate_key_pairs() + + self._connection = grpc_request_response + self._address = f"localhost:{port}" + + def test_client_auth_create_node(self) -> None: + """Test client authentication during create node.""" + # Prepare + retry_invoker = _init_retry_invoker() + + # Execute + with self._connection( + self._address, + True, + retry_invoker, + GRPC_MAX_MESSAGE_LENGTH, + None, + (self._client_private_key, self._client_public_key), + ) as conn: + _, _, create_node, _, _ = conn + assert create_node is not None + create_node() + expected_client_metadata = ( + _PUBLIC_KEY_HEADER, + base64.urlsafe_b64encode(public_key_to_bytes(self._client_public_key)), + ) + + # Assert + assert self._servicer.received_client_metadata() == expected_client_metadata + + def test_client_auth_delete_node(self) -> None: + """Test client authentication during delete node.""" + # Prepare + retry_invoker = _init_retry_invoker() + + # Execute + with self._connection( + self._address, + True, + retry_invoker, + GRPC_MAX_MESSAGE_LENGTH, + None, + (self._client_private_key, self._client_public_key), + ) as conn: + _, _, _, delete_node, _ = conn + assert delete_node is not None + delete_node() + shared_secret = generate_shared_key( + self._servicer.server_private_key, self._client_public_key + ) + expected_hmac = compute_hmac( + shared_secret, self._servicer.received_message_bytes() + ) + expected_client_metadata = ( + ( + _PUBLIC_KEY_HEADER, + base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) + ), + ), + ( + _AUTH_TOKEN_HEADER, + base64.urlsafe_b64encode(expected_hmac), + ), + ) + + # Assert + assert self._servicer.received_client_metadata() == expected_client_metadata + + def test_client_auth_receive(self) -> None: + """Test client authentication during receive node.""" + # Prepare + retry_invoker = _init_retry_invoker() + + # Execute + with self._connection( + self._address, + True, + retry_invoker, + GRPC_MAX_MESSAGE_LENGTH, + None, + (self._client_private_key, self._client_public_key), + ) as conn: + receive, _, _, _, _ = conn + assert receive is not None + receive() + shared_secret = generate_shared_key( + self._servicer.server_private_key, self._client_public_key + ) + expected_hmac = compute_hmac( + shared_secret, self._servicer.received_message_bytes() + ) + expected_client_metadata = ( + ( + _PUBLIC_KEY_HEADER, + base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) + ), + ), + ( + _AUTH_TOKEN_HEADER, + base64.urlsafe_b64encode(expected_hmac), + ), + ) + + # Assert + assert self._servicer.received_client_metadata() == expected_client_metadata + + def test_client_auth_send(self) -> None: + """Test client authentication during send node.""" + # Prepare + retry_invoker = _init_retry_invoker() + message = Message(Metadata(0, "1", 0, 0, "", "", 0, ""), RecordSet()) + + # Execute + with self._connection( + self._address, + True, + retry_invoker, + GRPC_MAX_MESSAGE_LENGTH, + None, + (self._client_private_key, self._client_public_key), + ) as conn: + _, send, _, _, _ = conn + assert send is not None + send(message) + shared_secret = generate_shared_key( + self._servicer.server_private_key, self._client_public_key + ) + expected_hmac = compute_hmac( + shared_secret, self._servicer.received_message_bytes() + ) + expected_client_metadata = ( + ( + _PUBLIC_KEY_HEADER, + base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) + ), + ), + ( + _AUTH_TOKEN_HEADER, + base64.urlsafe_b64encode(expected_hmac), + ), + ) + + # Assert + assert self._servicer.received_client_metadata() == expected_client_metadata + + def test_client_auth_get_run(self) -> None: + """Test client authentication during send node.""" + # Prepare + retry_invoker = _init_retry_invoker() + + # Execute + with self._connection( + self._address, + True, + retry_invoker, + GRPC_MAX_MESSAGE_LENGTH, + None, + (self._client_private_key, self._client_public_key), + ) as conn: + _, _, _, _, get_run = conn + assert get_run is not None + get_run(0) + shared_secret = generate_shared_key( + self._servicer.server_private_key, self._client_public_key + ) + expected_hmac = compute_hmac( + shared_secret, self._servicer.received_message_bytes() + ) + expected_client_metadata = ( + ( + _PUBLIC_KEY_HEADER, + base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) + ), + ), + ( + _AUTH_TOKEN_HEADER, + base64.urlsafe_b64encode(expected_hmac), + ), + ) + + # Assert + assert self._servicer.received_client_metadata() == expected_client_metadata + + +if __name__ == "__main__": + unittest.main(verbosity=2) diff --git a/src/py/flwr/client/grpc_rere_client/connection.py b/src/py/flwr/client/grpc_rere_client/connection.py index e6e22998b947..3778fd4061f9 100644 --- a/src/py/flwr/client/grpc_rere_client/connection.py +++ b/src/py/flwr/client/grpc_rere_client/connection.py @@ -15,23 +15,39 @@ """Contextmanager for a gRPC request-response channel to the Flower server.""" +import random +import threading from contextlib import contextmanager from copy import copy from logging import DEBUG, ERROR from pathlib import Path -from typing import Callable, Dict, Iterator, Optional, Tuple, Union, cast +from typing import Callable, Iterator, Optional, Sequence, Tuple, Union, cast +import grpc +from cryptography.hazmat.primitives.asymmetric import ec + +from flwr.client.heartbeat import start_ping_loop from flwr.client.message_handler.message_handler import validate_out_message from flwr.client.message_handler.task_handler import get_task_ins, validate_task_ins from flwr.common import GRPC_MAX_MESSAGE_LENGTH +from flwr.common.constant import ( + PING_BASE_MULTIPLIER, + PING_CALL_TIMEOUT, + PING_DEFAULT_INTERVAL, + PING_RANDOM_RANGE, +) from flwr.common.grpc import create_channel -from flwr.common.logger import log, warn_experimental_feature +from flwr.common.logger import log from flwr.common.message import Message, Metadata from flwr.common.retry_invoker import RetryInvoker from flwr.common.serde import message_from_taskins, message_to_taskres from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 CreateNodeRequest, DeleteNodeRequest, + GetRunRequest, + GetRunResponse, + PingRequest, + PingResponse, PullTaskInsRequest, PushTaskResRequest, ) @@ -39,8 +55,7 @@ from flwr.proto.node_pb2 import Node # pylint: disable=E0611 from flwr.proto.task_pb2 import TaskIns # pylint: disable=E0611 -KEY_NODE = "node" -KEY_METADATA = "in_message_metadata" +from .client_interceptor import AuthenticateClientInterceptor def on_channel_state_change(channel_connectivity: str) -> None: @@ -49,18 +64,22 @@ def on_channel_state_change(channel_connectivity: str) -> None: @contextmanager -def grpc_request_response( +def grpc_request_response( # pylint: disable=R0913, R0914, R0915 server_address: str, insecure: bool, retry_invoker: RetryInvoker, max_message_length: int = GRPC_MAX_MESSAGE_LENGTH, # pylint: disable=W0613 root_certificates: Optional[Union[bytes, str]] = None, + authentication_keys: Optional[ + Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] + ] = None, ) -> Iterator[ Tuple[ Callable[[], Optional[Message]], Callable[[Message], None], Optional[Callable[[], None]], Optional[Callable[[], None]], + Optional[Callable[[int], Tuple[str, str]]], ] ]: """Primitives for request/response-based interaction with a server. @@ -95,59 +114,98 @@ def grpc_request_response( create_node : Optional[Callable] delete_node : Optional[Callable] """ - warn_experimental_feature("`grpc-rere`") - if isinstance(root_certificates, str): root_certificates = Path(root_certificates).read_bytes() + interceptors: Optional[Sequence[grpc.UnaryUnaryClientInterceptor]] = None + if authentication_keys is not None: + interceptors = AuthenticateClientInterceptor( + authentication_keys[0], authentication_keys[1] + ) + channel = create_channel( server_address=server_address, insecure=insecure, root_certificates=root_certificates, max_message_length=max_message_length, + interceptors=interceptors, ) channel.subscribe(on_channel_state_change) - stub = FleetStub(channel) - # Necessary state to validate messages to be sent - state: Dict[str, Optional[Metadata]] = {KEY_METADATA: None} - - # Enable create_node and delete_node to store node - node_store: Dict[str, Optional[Node]] = {KEY_NODE: None} + # Shared variables for inner functions + stub = FleetStub(channel) + metadata: Optional[Metadata] = None + node: Optional[Node] = None + ping_thread: Optional[threading.Thread] = None + ping_stop_event = threading.Event() ########################################################################### - # receive/send functions + # ping/create_node/delete_node/receive/send/get_run functions ########################################################################### + def ping() -> None: + # Get Node + if node is None: + log(ERROR, "Node instance missing") + return + + # Construct the ping request + req = PingRequest(node=node, ping_interval=PING_DEFAULT_INTERVAL) + + # Call FleetAPI + res: PingResponse = stub.Ping(req, timeout=PING_CALL_TIMEOUT) + + # Check if success + if not res.success: + raise RuntimeError("Ping failed unexpectedly.") + + # Wait + rd = random.uniform(*PING_RANDOM_RANGE) + next_interval: float = PING_DEFAULT_INTERVAL - PING_CALL_TIMEOUT + next_interval *= PING_BASE_MULTIPLIER + rd + if not ping_stop_event.is_set(): + ping_stop_event.wait(next_interval) + def create_node() -> None: """Set create_node.""" - create_node_request = CreateNodeRequest() + # Call FleetAPI + create_node_request = CreateNodeRequest(ping_interval=PING_DEFAULT_INTERVAL) create_node_response = retry_invoker.invoke( stub.CreateNode, request=create_node_request, ) - node_store[KEY_NODE] = create_node_response.node + + # Remember the node and the ping-loop thread + nonlocal node, ping_thread + node = cast(Node, create_node_response.node) + ping_thread = start_ping_loop(ping, ping_stop_event) def delete_node() -> None: """Set delete_node.""" # Get Node - if node_store[KEY_NODE] is None: + nonlocal node + if node is None: log(ERROR, "Node instance missing") return - node: Node = cast(Node, node_store[KEY_NODE]) + # Stop the ping-loop thread + ping_stop_event.set() + if ping_thread is not None: + ping_thread.join() + + # Call FleetAPI delete_node_request = DeleteNodeRequest(node=node) retry_invoker.invoke(stub.DeleteNode, request=delete_node_request) - del node_store[KEY_NODE] + # Cleanup + node = None def receive() -> Optional[Message]: """Receive next task from server.""" # Get Node - if node_store[KEY_NODE] is None: + if node is None: log(ERROR, "Node instance missing") return None - node: Node = cast(Node, node_store[KEY_NODE]) # Request instructions (task) from server request = PullTaskInsRequest(node=node) @@ -167,7 +225,8 @@ def receive() -> Optional[Message]: in_message = message_from_taskins(task_ins) if task_ins else None # Remember `metadata` of the in message - state[KEY_METADATA] = copy(in_message.metadata) if in_message else None + nonlocal metadata + metadata = copy(in_message.metadata) if in_message else None # Return the message if available return in_message @@ -175,18 +234,18 @@ def receive() -> Optional[Message]: def send(message: Message) -> None: """Send task result back to server.""" # Get Node - if node_store[KEY_NODE] is None: + if node is None: log(ERROR, "Node instance missing") return - # Get incoming message - in_metadata = state[KEY_METADATA] - if in_metadata is None: + # Get the metadata of the incoming message + nonlocal metadata + if metadata is None: log(ERROR, "No current message") return # Validate out message - if not validate_out_message(message, in_metadata): + if not validate_out_message(message, metadata): log(ERROR, "Invalid out message") return @@ -197,10 +256,22 @@ def send(message: Message) -> None: request = PushTaskResRequest(task_res_list=[task_res]) _ = retry_invoker.invoke(stub.PushTaskRes, request) - state[KEY_METADATA] = None + # Cleanup + metadata = None + + def get_run(run_id: int) -> Tuple[str, str]: + # Call FleetAPI + get_run_request = GetRunRequest(run_id=run_id) + get_run_response: GetRunResponse = retry_invoker.invoke( + stub.GetRun, + request=get_run_request, + ) + + # Return fab_id and fab_version + return get_run_response.run.fab_id, get_run_response.run.fab_version try: # Yield methods - yield (receive, send, create_node, delete_node) + yield (receive, send, create_node, delete_node, get_run) except Exception as exc: # pylint: disable=broad-except log(ERROR, exc) diff --git a/src/py/flwr/client/heartbeat.py b/src/py/flwr/client/heartbeat.py new file mode 100644 index 000000000000..b68e6163cc01 --- /dev/null +++ b/src/py/flwr/client/heartbeat.py @@ -0,0 +1,74 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Heartbeat utility functions.""" + + +import threading +from typing import Callable + +import grpc + +from flwr.common.constant import PING_CALL_TIMEOUT +from flwr.common.retry_invoker import RetryInvoker, RetryState, exponential + + +def _ping_loop(ping_fn: Callable[[], None], stop_event: threading.Event) -> None: + def wait_fn(wait_time: float) -> None: + if not stop_event.is_set(): + stop_event.wait(wait_time) + + def on_backoff(state: RetryState) -> None: + err = state.exception + if not isinstance(err, grpc.RpcError): + return + status_code = err.code() + # If ping call timeout is triggered + if status_code == grpc.StatusCode.DEADLINE_EXCEEDED: + # Avoid long wait time. + if state.actual_wait is None: + return + state.actual_wait = max(state.actual_wait - PING_CALL_TIMEOUT, 0.0) + + def wrapped_ping() -> None: + if not stop_event.is_set(): + ping_fn() + + retrier = RetryInvoker( + exponential, + grpc.RpcError, + max_tries=None, + max_time=None, + on_backoff=on_backoff, + wait_function=wait_fn, + ) + while not stop_event.is_set(): + retrier.invoke(wrapped_ping) + + +def start_ping_loop( + ping_fn: Callable[[], None], stop_event: threading.Event +) -> threading.Thread: + """Start a ping loop in a separate thread. + + This function initializes a new thread that runs a ping loop, allowing for + asynchronous ping operations. The loop can be terminated through the provided stop + event. + """ + thread = threading.Thread( + target=_ping_loop, args=(ping_fn, stop_event), daemon=True + ) + thread.start() + + return thread diff --git a/src/py/flwr/client/heartbeat_test.py b/src/py/flwr/client/heartbeat_test.py new file mode 100644 index 000000000000..286429e075b1 --- /dev/null +++ b/src/py/flwr/client/heartbeat_test.py @@ -0,0 +1,59 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Unit tests for heartbeat utility functions.""" + + +import threading +import time +import unittest +from unittest.mock import MagicMock + +from .heartbeat import start_ping_loop + + +class TestStartPingLoopWithFailures(unittest.TestCase): + """Test heartbeat utility functions.""" + + def test_ping_loop_terminates(self) -> None: + """Test if the ping loop thread terminates when flagged.""" + # Prepare + ping_fn = MagicMock() + stop_event = threading.Event() + + # Execute + thread = start_ping_loop(ping_fn, stop_event) + time.sleep(1) + stop_event.set() + thread.join(timeout=1) + + # Assert + self.assertTrue(ping_fn.called) + self.assertFalse(thread.is_alive()) + + def test_ping_loop_with_failures_terminates(self) -> None: + """Test if the ping loop thread with failures terminates when flagged.""" + # Prepare + ping_fn = MagicMock(side_effect=RuntimeError()) + stop_event = threading.Event() + + # Execute + thread = start_ping_loop(ping_fn, stop_event) + time.sleep(1) + stop_event.set() + thread.join(timeout=1) + + # Assert + self.assertTrue(ping_fn.called) + self.assertFalse(thread.is_alive()) diff --git a/src/py/flwr/client/message_handler/message_handler.py b/src/py/flwr/client/message_handler/message_handler.py index 87014f436cf7..e5acbe0cc9d0 100644 --- a/src/py/flwr/client/message_handler/message_handler.py +++ b/src/py/flwr/client/message_handler/message_handler.py @@ -172,6 +172,7 @@ def validate_out_message(out_message: Message, in_message_metadata: Metadata) -> and out_meta.reply_to_message == in_meta.message_id and out_meta.group_id == in_meta.group_id and out_meta.message_type == in_meta.message_type + and out_meta.created_at > in_meta.created_at ): return True return False diff --git a/src/py/flwr/client/message_handler/message_handler_test.py b/src/py/flwr/client/message_handler/message_handler_test.py index e3f6487421cc..8a2db1804e4a 100644 --- a/src/py/flwr/client/message_handler/message_handler_test.py +++ b/src/py/flwr/client/message_handler/message_handler_test.py @@ -15,6 +15,7 @@ """Client-side message handler tests.""" +import time import unittest import uuid from copy import copy @@ -162,14 +163,25 @@ def test_client_without_get_properties() -> None: src_node_id=1123, dst_node_id=0, reply_to_message=message.metadata.message_id, - ttl=DEFAULT_TTL, + ttl=actual_msg.metadata.ttl, # computed based on [message].create_reply() message_type=MessageTypeLegacy.GET_PROPERTIES, ), content=expected_rs, ) assert actual_msg.content == expected_msg.content - assert actual_msg.metadata == expected_msg.metadata + # metadata.created_at will differ so let's exclude it from checks + attrs = vars(actual_msg.metadata) + attrs_keys = list(attrs.keys()) + attrs_keys.remove("_created_at") + # metadata.created_at will differ so let's exclude it from checks + for attr in attrs_keys: + assert getattr(actual_msg.metadata, attr) == getattr( + expected_msg.metadata, attr + ) + + # Ensure the message created last has a higher timestamp + assert actual_msg.metadata.created_at < expected_msg.metadata.created_at def test_client_with_get_properties() -> None: @@ -215,14 +227,24 @@ def test_client_with_get_properties() -> None: src_node_id=1123, dst_node_id=0, reply_to_message=message.metadata.message_id, - ttl=DEFAULT_TTL, + ttl=actual_msg.metadata.ttl, # computed based on [message].create_reply() message_type=MessageTypeLegacy.GET_PROPERTIES, ), content=expected_rs, ) assert actual_msg.content == expected_msg.content - assert actual_msg.metadata == expected_msg.metadata + attrs = vars(actual_msg.metadata) + attrs_keys = list(attrs.keys()) + attrs_keys.remove("_created_at") + # metadata.created_at will differ so let's exclude it from checks + for attr in attrs_keys: + assert getattr(actual_msg.metadata, attr) == getattr( + expected_msg.metadata, attr + ) + + # Ensure the message created last has a higher timestamp + assert actual_msg.metadata.created_at < expected_msg.metadata.created_at class TestMessageValidation(unittest.TestCase): @@ -241,6 +263,11 @@ def setUp(self) -> None: ttl=DEFAULT_TTL, message_type="mock", ) + # We need to set created_at in this way + # since this `self.in_metadata` is used for tests + # without it ever being part of a Message + self.in_metadata.created_at = time.time() + self.valid_out_metadata = Metadata( run_id=123, message_id="", @@ -281,11 +308,15 @@ def test_invalid_message_run_id(self) -> None: value = 999 elif isinstance(value, str): value = "999" + elif isinstance(value, float): + if attr == "_created_at": + # make it be in 1h the past + value = value - 3600 setattr(invalid_metadata, attr, value) # Add to list invalid_metadata_list.append(invalid_metadata) # Assert for invalid_metadata in invalid_metadata_list: - msg._metadata = invalid_metadata # pylint: disable=protected-access + msg.__dict__["_metadata"] = invalid_metadata self.assertFalse(validate_out_message(msg, self.in_metadata)) diff --git a/src/py/flwr/client/mod/centraldp_mods.py b/src/py/flwr/client/mod/centraldp_mods.py index 4f4a595e8d9c..e6276ccf2245 100644 --- a/src/py/flwr/client/mod/centraldp_mods.py +++ b/src/py/flwr/client/mod/centraldp_mods.py @@ -82,7 +82,9 @@ def fixedclipping_mod( clipping_norm, ) - log(INFO, "fixedclipping_mod: parameters are clipped by value: %s.", clipping_norm) + log( + INFO, "fixedclipping_mod: parameters are clipped by value: %.4f.", clipping_norm + ) fit_res.parameters = ndarrays_to_parameters(client_to_server_params) out_msg.content = compat.fitres_to_recordset(fit_res, keep_input=True) @@ -146,7 +148,7 @@ def adaptiveclipping_mod( ) log( INFO, - "adaptiveclipping_mod: parameters are clipped by value: %s.", + "adaptiveclipping_mod: parameters are clipped by value: %.4f.", clipping_norm, ) diff --git a/src/py/flwr/client/mod/comms_mods.py b/src/py/flwr/client/mod/comms_mods.py index 102d2f477262..058fd6a70001 100644 --- a/src/py/flwr/client/mod/comms_mods.py +++ b/src/py/flwr/client/mod/comms_mods.py @@ -29,7 +29,7 @@ def message_size_mod( ) -> Message: """Message size mod. - This mod logs the size in Bytes of the message being transmited. + This mod logs the size in bytes of the message being transmited. """ message_size_in_bytes = 0 @@ -42,7 +42,7 @@ def message_size_mod( for m_record in msg.content.metrics_records.values(): message_size_in_bytes += m_record.count_bytes() - log(INFO, "Message size: %i Bytes", message_size_in_bytes) + log(INFO, "Message size: %i bytes", message_size_in_bytes) return call_next(msg, ctxt) @@ -53,7 +53,7 @@ def parameters_size_mod( """Parameters size mod. This mod logs the number of parameters transmitted in the message as well as their - size in Bytes. + size in bytes. """ model_size_stats = {} parameters_size_in_bytes = 0 @@ -74,6 +74,6 @@ def parameters_size_mod( if model_size_stats: log(INFO, model_size_stats) - log(INFO, "Total parameters transmited: %i Bytes", parameters_size_in_bytes) + log(INFO, "Total parameters transmitted: %i bytes", parameters_size_in_bytes) return call_next(msg, ctxt) diff --git a/src/py/flwr/client/mod/localdp_mod.py b/src/py/flwr/client/mod/localdp_mod.py index 3b0311a612b9..fe369ad1c7e6 100644 --- a/src/py/flwr/client/mod/localdp_mod.py +++ b/src/py/flwr/client/mod/localdp_mod.py @@ -128,7 +128,9 @@ def __call__( self.clipping_norm, ) log( - INFO, "LocalDpMod: parameters are clipped by value: %s.", self.clipping_norm + INFO, + "LocalDpMod: parameters are clipped by value: %.4f.", + self.clipping_norm, ) fit_res.parameters = ndarrays_to_parameters(client_to_server_params) @@ -137,11 +139,14 @@ def __call__( add_localdp_gaussian_noise_to_params( fit_res.parameters, self.sensitivity, self.epsilon, self.delta ) + + noise_value_sd = ( + self.sensitivity * np.sqrt(2 * np.log(1.25 / self.delta)) / self.epsilon + ) log( INFO, - "LocalDpMod: local DP noise with " - "standard deviation: %s added to parameters.", - self.sensitivity * np.sqrt(2 * np.log(1.25 / self.delta)) / self.epsilon, + "LocalDpMod: local DP noise with %.4f stedv added to parameters", + noise_value_sd, ) out_msg.content = compat.fitres_to_recordset(fit_res, keep_input=True) diff --git a/src/py/flwr/client/rest_client/connection.py b/src/py/flwr/client/rest_client/connection.py index d2cc71ba3b3f..da8fbd351ab1 100644 --- a/src/py/flwr/client/rest_client/connection.py +++ b/src/py/flwr/client/rest_client/connection.py @@ -15,16 +15,28 @@ """Contextmanager for a REST request-response channel to the Flower server.""" +import random import sys +import threading from contextlib import contextmanager from copy import copy from logging import ERROR, INFO, WARN -from typing import Callable, Dict, Iterator, Optional, Tuple, Union, cast +from typing import Callable, Iterator, Optional, Tuple, Type, TypeVar, Union +from cryptography.hazmat.primitives.asymmetric import ec +from google.protobuf.message import Message as GrpcMessage + +from flwr.client.heartbeat import start_ping_loop from flwr.client.message_handler.message_handler import validate_out_message from flwr.client.message_handler.task_handler import get_task_ins, validate_task_ins from flwr.common import GRPC_MAX_MESSAGE_LENGTH -from flwr.common.constant import MISSING_EXTRA_REST +from flwr.common.constant import ( + MISSING_EXTRA_REST, + PING_BASE_MULTIPLIER, + PING_CALL_TIMEOUT, + PING_DEFAULT_INTERVAL, + PING_RANDOM_RANGE, +) from flwr.common.logger import log from flwr.common.message import Message, Metadata from flwr.common.retry_invoker import RetryInvoker @@ -33,6 +45,11 @@ CreateNodeRequest, CreateNodeResponse, DeleteNodeRequest, + DeleteNodeResponse, + GetRunRequest, + GetRunResponse, + PingRequest, + PingResponse, PullTaskInsRequest, PullTaskInsResponse, PushTaskResRequest, @@ -47,19 +64,18 @@ sys.exit(MISSING_EXTRA_REST) -KEY_NODE = "node" -KEY_METADATA = "in_message_metadata" - - PATH_CREATE_NODE: str = "api/v0/fleet/create-node" PATH_DELETE_NODE: str = "api/v0/fleet/delete-node" PATH_PULL_TASK_INS: str = "api/v0/fleet/pull-task-ins" PATH_PUSH_TASK_RES: str = "api/v0/fleet/push-task-res" +PATH_PING: str = "api/v0/fleet/ping" +PATH_GET_RUN: str = "/api/v0/fleet/get-run" + +T = TypeVar("T", bound=GrpcMessage) @contextmanager -# pylint: disable-next=too-many-statements -def http_request_response( +def http_request_response( # pylint: disable=,R0913, R0914, R0915 server_address: str, insecure: bool, # pylint: disable=unused-argument retry_invoker: RetryInvoker, @@ -67,12 +83,16 @@ def http_request_response( root_certificates: Optional[ Union[bytes, str] ] = None, # pylint: disable=unused-argument + authentication_keys: Optional[ # pylint: disable=unused-argument + Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] + ] = None, ) -> Iterator[ Tuple[ Callable[[], Optional[Message]], Callable[[Message], None], Optional[Callable[[], None]], Optional[Callable[[], None]], + Optional[Callable[[int], Tuple[str, str]]], ] ]: """Primitives for request/response-based interaction with a server. @@ -127,143 +147,142 @@ def http_request_response( "must be provided as a string path to the client.", ) - # Necessary state to validate messages to be sent - state: Dict[str, Optional[Metadata]] = {KEY_METADATA: None} - - # Enable create_node and delete_node to store node - node_store: Dict[str, Optional[Node]] = {KEY_NODE: None} + # Shared variables for inner functions + metadata: Optional[Metadata] = None + node: Optional[Node] = None + ping_thread: Optional[threading.Thread] = None + ping_stop_event = threading.Event() ########################################################################### - # receive/send functions + # ping/create_node/delete_node/receive/send/get_run functions ########################################################################### - def create_node() -> None: - """Set create_node.""" - create_node_req_proto = CreateNodeRequest() - create_node_req_bytes: bytes = create_node_req_proto.SerializeToString() - - res = retry_invoker.invoke( - requests.post, - url=f"{base_url}/{PATH_CREATE_NODE}", - headers={ - "Accept": "application/protobuf", - "Content-Type": "application/protobuf", - }, - data=create_node_req_bytes, - verify=verify, - timeout=None, - ) + def _request( + req: GrpcMessage, res_type: Type[T], api_path: str, retry: bool = True + ) -> Optional[T]: + # Serialize the request + req_bytes = req.SerializeToString() + + # Send the request + def post() -> requests.Response: + return requests.post( + f"{base_url}/{api_path}", + data=req_bytes, + headers={ + "Accept": "application/protobuf", + "Content-Type": "application/protobuf", + }, + verify=verify, + timeout=None, + ) + + if retry: + res: requests.Response = retry_invoker.invoke(post) + else: + res = post() # Check status code and headers if res.status_code != 200: - return + return None if "content-type" not in res.headers: log( WARN, "[Node] POST /%s: missing header `Content-Type`", - PATH_PULL_TASK_INS, + api_path, ) - return + return None if res.headers["content-type"] != "application/protobuf": log( WARN, "[Node] POST /%s: header `Content-Type` has wrong value", - PATH_PULL_TASK_INS, + api_path, ) - return + return None # Deserialize ProtoBuf from bytes - create_node_response_proto = CreateNodeResponse() - create_node_response_proto.ParseFromString(res.content) - # pylint: disable-next=no-member - node_store[KEY_NODE] = create_node_response_proto.node + grpc_res = res_type() + grpc_res.ParseFromString(res.content) + return grpc_res + + def ping() -> None: + # Get Node + if node is None: + log(ERROR, "Node instance missing") + return + + # Construct the ping request + req = PingRequest(node=node, ping_interval=PING_DEFAULT_INTERVAL) + + # Send the request + res = _request(req, PingResponse, PATH_PING, retry=False) + if res is None: + return + + # Check if success + if not res.success: + raise RuntimeError("Ping failed unexpectedly.") + + # Wait + rd = random.uniform(*PING_RANDOM_RANGE) + next_interval: float = PING_DEFAULT_INTERVAL - PING_CALL_TIMEOUT + next_interval *= PING_BASE_MULTIPLIER + rd + if not ping_stop_event.is_set(): + ping_stop_event.wait(next_interval) + + def create_node() -> None: + """Set create_node.""" + req = CreateNodeRequest(ping_interval=PING_DEFAULT_INTERVAL) + + # Send the request + res = _request(req, CreateNodeResponse, PATH_CREATE_NODE) + if res is None: + return + + # Remember the node and the ping-loop thread + nonlocal node, ping_thread + node = res.node + ping_thread = start_ping_loop(ping, ping_stop_event) def delete_node() -> None: """Set delete_node.""" - if node_store[KEY_NODE] is None: + nonlocal node + if node is None: log(ERROR, "Node instance missing") return - node: Node = cast(Node, node_store[KEY_NODE]) - delete_node_req_proto = DeleteNodeRequest(node=node) - delete_node_req_req_bytes: bytes = delete_node_req_proto.SerializeToString() - res = retry_invoker.invoke( - requests.post, - url=f"{base_url}/{PATH_DELETE_NODE}", - headers={ - "Accept": "application/protobuf", - "Content-Type": "application/protobuf", - }, - data=delete_node_req_req_bytes, - verify=verify, - timeout=None, - ) - # Check status code and headers - if res.status_code != 200: - return - if "content-type" not in res.headers: - log( - WARN, - "[Node] POST /%s: missing header `Content-Type`", - PATH_PULL_TASK_INS, - ) + # Stop the ping-loop thread + ping_stop_event.set() + if ping_thread is not None: + ping_thread.join() + + # Send DeleteNode request + req = DeleteNodeRequest(node=node) + + # Send the request + res = _request(req, DeleteNodeResponse, PATH_CREATE_NODE) + if res is None: return - if res.headers["content-type"] != "application/protobuf": - log( - WARN, - "[Node] POST /%s: header `Content-Type` has wrong value", - PATH_PULL_TASK_INS, - ) + + # Cleanup + node = None def receive() -> Optional[Message]: """Receive next task from server.""" # Get Node - if node_store[KEY_NODE] is None: + if node is None: log(ERROR, "Node instance missing") return None - node: Node = cast(Node, node_store[KEY_NODE]) - - # Request instructions (task) from server - pull_task_ins_req_proto = PullTaskInsRequest(node=node) - pull_task_ins_req_bytes: bytes = pull_task_ins_req_proto.SerializeToString() # Request instructions (task) from server - res = retry_invoker.invoke( - requests.post, - url=f"{base_url}/{PATH_PULL_TASK_INS}", - headers={ - "Accept": "application/protobuf", - "Content-Type": "application/protobuf", - }, - data=pull_task_ins_req_bytes, - verify=verify, - timeout=None, - ) + req = PullTaskInsRequest(node=node) - # Check status code and headers - if res.status_code != 200: + # Send the request + res = _request(req, PullTaskInsResponse, PATH_PULL_TASK_INS) + if res is None: return None - if "content-type" not in res.headers: - log( - WARN, - "[Node] POST /%s: missing header `Content-Type`", - PATH_PULL_TASK_INS, - ) - return None - if res.headers["content-type"] != "application/protobuf": - log( - WARN, - "[Node] POST /%s: header `Content-Type` has wrong value", - PATH_PULL_TASK_INS, - ) - return None - - # Deserialize ProtoBuf from bytes - pull_task_ins_response_proto = PullTaskInsResponse() - pull_task_ins_response_proto.ParseFromString(res.content) # Get the current TaskIns - task_ins: Optional[TaskIns] = get_task_ins(pull_task_ins_response_proto) + task_ins: Optional[TaskIns] = get_task_ins(res) # Discard the current TaskIns if not valid if task_ins is not None and not ( @@ -273,86 +292,64 @@ def receive() -> Optional[Message]: task_ins = None # Return the Message if available + nonlocal metadata message = None - state[KEY_METADATA] = None if task_ins is not None: message = message_from_taskins(task_ins) - state[KEY_METADATA] = copy(message.metadata) + metadata = copy(message.metadata) log(INFO, "[Node] POST /%s: success", PATH_PULL_TASK_INS) return message def send(message: Message) -> None: """Send task result back to server.""" # Get Node - if node_store[KEY_NODE] is None: + if node is None: log(ERROR, "Node instance missing") return # Get incoming message - in_metadata = state[KEY_METADATA] - if in_metadata is None: + nonlocal metadata + if metadata is None: log(ERROR, "No current message") return # Validate out message - if not validate_out_message(message, in_metadata): + if not validate_out_message(message, metadata): log(ERROR, "Invalid out message") return + metadata = None # Construct TaskRes task_res = message_to_taskres(message) # Serialize ProtoBuf to bytes - push_task_res_request_proto = PushTaskResRequest(task_res_list=[task_res]) - push_task_res_request_bytes: bytes = ( - push_task_res_request_proto.SerializeToString() - ) - - # Send ClientMessage to server - res = retry_invoker.invoke( - requests.post, - url=f"{base_url}/{PATH_PUSH_TASK_RES}", - headers={ - "Accept": "application/protobuf", - "Content-Type": "application/protobuf", - }, - data=push_task_res_request_bytes, - verify=verify, - timeout=None, - ) + req = PushTaskResRequest(task_res_list=[task_res]) - state[KEY_METADATA] = None - - # Check status code and headers - if res.status_code != 200: - return - if "content-type" not in res.headers: - log( - WARN, - "[Node] POST /%s: missing header `Content-Type`", - PATH_PUSH_TASK_RES, - ) - return - if res.headers["content-type"] != "application/protobuf": - log( - WARN, - "[Node] POST /%s: header `Content-Type` has wrong value", - PATH_PUSH_TASK_RES, - ) + # Send the request + res = _request(req, PushTaskResResponse, PATH_PUSH_TASK_RES) + if res is None: return - # Deserialize ProtoBuf from bytes - push_task_res_response_proto = PushTaskResResponse() - push_task_res_response_proto.ParseFromString(res.content) log( INFO, "[Node] POST /%s: success, created result %s", PATH_PUSH_TASK_RES, - push_task_res_response_proto.results, # pylint: disable=no-member + res.results, # pylint: disable=no-member ) + def get_run(run_id: int) -> Tuple[str, str]: + # Construct the request + req = GetRunRequest(run_id=run_id) + + # Send the request + res = _request(req, GetRunResponse, PATH_GET_RUN) + if res is None: + return "", "" + + return res.run.fab_id, res.run.fab_version + try: # Yield methods - yield (receive, send, create_node, delete_node) + yield (receive, send, create_node, delete_node, get_run) except Exception as exc: # pylint: disable=broad-except log(ERROR, exc) diff --git a/src/py/flwr_experimental/baseline/tf_hotkey/__init__.py b/src/py/flwr/client/supernode/__init__.py similarity index 72% rename from src/py/flwr_experimental/baseline/tf_hotkey/__init__.py rename to src/py/flwr/client/supernode/__init__.py index 7ebf8a732ab5..bc505f693875 100644 --- a/src/py/flwr_experimental/baseline/tf_hotkey/__init__.py +++ b/src/py/flwr/client/supernode/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,9 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Flower baseline using TensorFlow for Spoken Keyword classification.""" +"""Flower SuperNode.""" -DEFAULT_SERVER_ADDRESS = "[::]:8080" +from .app import run_client_app as run_client_app +from .app import run_supernode as run_supernode -SEED = 2020 +__all__ = [ + "run_client_app", + "run_supernode", +] diff --git a/src/py/flwr/client/supernode/app.py b/src/py/flwr/client/supernode/app.py new file mode 100644 index 000000000000..e46ed43cc676 --- /dev/null +++ b/src/py/flwr/client/supernode/app.py @@ -0,0 +1,281 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower SuperNode.""" + +import argparse +import sys +from logging import DEBUG, INFO, WARN +from pathlib import Path +from typing import Callable, Optional, Tuple + +from cryptography.hazmat.primitives.asymmetric import ec +from cryptography.hazmat.primitives.serialization import ( + load_ssh_private_key, + load_ssh_public_key, +) + +from flwr.client.client_app import ClientApp, LoadClientAppError +from flwr.common import EventType, event +from flwr.common.exit_handlers import register_exit_handlers +from flwr.common.logger import log +from flwr.common.object_ref import load_app, validate +from flwr.common.secure_aggregation.crypto.symmetric_encryption import ( + ssh_types_to_elliptic_curve, +) + +from ..app import _start_client_internal + + +def run_supernode() -> None: + """Run Flower SuperNode.""" + log(INFO, "Starting Flower SuperNode") + + event(EventType.RUN_SUPERNODE_ENTER) + + _ = _parse_args_run_supernode().parse_args() + + log( + DEBUG, + "Flower SuperNode starting...", + ) + + # Graceful shutdown + register_exit_handlers( + event_type=EventType.RUN_SUPERNODE_LEAVE, + ) + + +def run_client_app() -> None: + """Run Flower client app.""" + log(INFO, "Long-running Flower client starting") + + event(EventType.RUN_CLIENT_APP_ENTER) + + args = _parse_args_run_client_app().parse_args() + + root_certificates = _get_certificates(args) + log( + DEBUG, + "Flower will load ClientApp `%s`", + getattr(args, "client-app"), + ) + load_fn = _get_load_client_app_fn(args) + authentication_keys = _try_setup_client_authentication(args) + + _start_client_internal( + server_address=args.server, + load_client_app_fn=load_fn, + transport="rest" if args.rest else "grpc-rere", + root_certificates=root_certificates, + insecure=args.insecure, + authentication_keys=authentication_keys, + max_retries=args.max_retries, + max_wait_time=args.max_wait_time, + ) + register_exit_handlers(event_type=EventType.RUN_CLIENT_APP_LEAVE) + + +def _get_certificates(args: argparse.Namespace) -> Optional[bytes]: + """Load certificates if specified in args.""" + # Obtain certificates + if args.insecure: + if args.root_certificates is not None: + sys.exit( + "Conflicting options: The '--insecure' flag disables HTTPS, " + "but '--root-certificates' was also specified. Please remove " + "the '--root-certificates' option when running in insecure mode, " + "or omit '--insecure' to use HTTPS." + ) + log( + WARN, + "Option `--insecure` was set. " + "Starting insecure HTTP client connected to %s.", + args.server, + ) + root_certificates = None + else: + # Load the certificates if provided, or load the system certificates + cert_path = args.root_certificates + if cert_path is None: + root_certificates = None + else: + root_certificates = Path(cert_path).read_bytes() + log( + DEBUG, + "Starting secure HTTPS client connected to %s " + "with the following certificates: %s.", + args.server, + cert_path, + ) + return root_certificates + + +def _get_load_client_app_fn( + args: argparse.Namespace, +) -> Callable[[], ClientApp]: + """Get the load_client_app_fn function.""" + client_app_dir = args.dir + if client_app_dir is not None: + sys.path.insert(0, client_app_dir) + + app_ref: str = getattr(args, "client-app") + valid, error_msg = validate(app_ref) + if not valid and error_msg: + raise LoadClientAppError(error_msg) from None + + def _load() -> ClientApp: + client_app = load_app(app_ref, LoadClientAppError) + + if not isinstance(client_app, ClientApp): + raise LoadClientAppError( + f"Attribute {app_ref} is not of type {ClientApp}", + ) from None + + return client_app + + return _load + + +def _parse_args_run_supernode() -> argparse.ArgumentParser: + """Parse flower-supernode command line arguments.""" + parser = argparse.ArgumentParser( + description="Start a Flower SuperNode", + ) + + parser.add_argument( + "client-app", + nargs="?", + default="", + help="For example: `client:app` or `project.package.module:wrapper.app`. " + "This is optional and serves as the default ClientApp to be loaded when " + "the ServerApp does not specify `fab_id` and `fab_version`. " + "If not provided, defaults to an empty string.", + ) + _parse_args_common(parser) + parser.add_argument( + "--flwr-dir", + default=None, + help="""The path containing installed Flower Apps. + By default, this value isequal to: + + - `$FLWR_HOME/` if `$FLWR_HOME` is defined + - `$XDG_DATA_HOME/.flwr/` if `$XDG_DATA_HOME` is defined + - `$HOME/.flwr/` in all other cases + """, + ) + + return parser + + +def _parse_args_run_client_app() -> argparse.ArgumentParser: + """Parse flower-client-app command line arguments.""" + parser = argparse.ArgumentParser( + description="Start a Flower client app", + ) + + parser.add_argument( + "client-app", + help="For example: `client:app` or `project.package.module:wrapper.app`", + ) + _parse_args_common(parser=parser) + + return parser + + +def _parse_args_common(parser: argparse.ArgumentParser) -> None: + parser.add_argument( + "--insecure", + action="store_true", + help="Run the client without HTTPS. By default, the client runs with " + "HTTPS enabled. Use this flag only if you understand the risks.", + ) + parser.add_argument( + "--rest", + action="store_true", + help="Use REST as a transport layer for the client.", + ) + parser.add_argument( + "--root-certificates", + metavar="ROOT_CERT", + type=str, + help="Specifies the path to the PEM-encoded root certificate file for " + "establishing secure HTTPS connections.", + ) + parser.add_argument( + "--server", + default="0.0.0.0:9092", + help="Server address", + ) + parser.add_argument( + "--max-retries", + type=int, + default=None, + help="The maximum number of times the client will try to connect to the" + "server before giving up in case of a connection error. By default," + "it is set to None, meaning there is no limit to the number of tries.", + ) + parser.add_argument( + "--max-wait-time", + type=float, + default=None, + help="The maximum duration before the client stops trying to" + "connect to the server in case of connection error. By default, it" + "is set to None, meaning there is no limit to the total time.", + ) + parser.add_argument( + "--dir", + default="", + help="Add specified directory to the PYTHONPATH and load Flower " + "app from there." + " Default: current working directory.", + ) + parser.add_argument( + "--authentication-keys", + nargs=2, + metavar=("CLIENT_PRIVATE_KEY", "CLIENT_PUBLIC_KEY"), + type=str, + help="Provide two file paths: (1) the client's private " + "key file, and (2) the client's public key file.", + ) + + +def _try_setup_client_authentication( + args: argparse.Namespace, +) -> Optional[Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey]]: + if not args.authentication_keys: + return None + + ssh_private_key = load_ssh_private_key( + Path(args.authentication_keys[0]).read_bytes(), + None, + ) + ssh_public_key = load_ssh_public_key(Path(args.authentication_keys[1]).read_bytes()) + + try: + client_private_key, client_public_key = ssh_types_to_elliptic_curve( + ssh_private_key, ssh_public_key + ) + except TypeError: + sys.exit( + "The file paths provided could not be read as a private and public " + "key pair. Client authentication requires an elliptic curve public and " + "private key pair. Please provide the file paths containing elliptic " + "curve private and public keys to '--authentication-keys'." + ) + + return ( + client_private_key, + client_public_key, + ) diff --git a/src/py/flwr/common/constant.py b/src/py/flwr/common/constant.py index 7d30a10f5881..b6d39b6e8932 100644 --- a/src/py/flwr/common/constant.py +++ b/src/py/flwr/common/constant.py @@ -36,6 +36,13 @@ TRANSPORT_TYPE_VCE, ] +# Constants for ping +PING_DEFAULT_INTERVAL = 30 +PING_CALL_TIMEOUT = 5 +PING_BASE_MULTIPLIER = 0.8 +PING_RANDOM_RANGE = (-0.1, 0.1) +PING_MAX_INTERVAL = 1e300 + class MessageType: """Message type.""" @@ -68,3 +75,16 @@ class SType: def __new__(cls) -> SType: """Prevent instantiation.""" raise TypeError(f"{cls.__name__} cannot be instantiated.") + + +class ErrorCode: + """Error codes for Message's Error.""" + + UNKNOWN = 0 + LOAD_CLIENT_APP_EXCEPTION = 1 + CLIENT_APP_RAISED_EXCEPTION = 2 + NODE_UNAVAILABLE = 3 + + def __new__(cls) -> ErrorCode: + """Prevent instantiation.""" + raise TypeError(f"{cls.__name__} cannot be instantiated.") diff --git a/src/py/flwr/common/grpc.py b/src/py/flwr/common/grpc.py index 7d0eba078ab0..ead0329ca79c 100644 --- a/src/py/flwr/common/grpc.py +++ b/src/py/flwr/common/grpc.py @@ -16,7 +16,7 @@ from logging import DEBUG -from typing import Optional +from typing import Optional, Sequence import grpc @@ -30,6 +30,7 @@ def create_channel( insecure: bool, root_certificates: Optional[bytes] = None, max_message_length: int = GRPC_MAX_MESSAGE_LENGTH, + interceptors: Optional[Sequence[grpc.UnaryUnaryClientInterceptor]] = None, ) -> grpc.Channel: """Create a gRPC channel, either secure or insecure.""" # Check for conflicting parameters @@ -57,4 +58,7 @@ def create_channel( ) log(DEBUG, "Opened secure gRPC connection using certificates") + if interceptors is not None: + channel = grpc.intercept_channel(channel, interceptors) + return channel diff --git a/src/py/flwr/common/logger.py b/src/py/flwr/common/logger.py index 2bc41773ed61..7225b0663ae7 100644 --- a/src/py/flwr/common/logger.py +++ b/src/py/flwr/common/logger.py @@ -82,13 +82,20 @@ def format(self, record: LogRecord) -> str: return formatter.format(record) -def update_console_handler(level: int, timestamps: bool, colored: bool) -> None: +def update_console_handler( + level: Optional[int] = None, + timestamps: Optional[bool] = None, + colored: Optional[bool] = None, +) -> None: """Update the logging handler.""" for handler in logging.getLogger(LOGGER_NAME).handlers: if isinstance(handler, ConsoleHandler): - handler.setLevel(level) - handler.timestamps = timestamps - handler.colored = colored + if level is not None: + handler.setLevel(level) + if timestamps is not None: + handler.timestamps = timestamps + if colored is not None: + handler.colored = colored # Configure console logger @@ -164,13 +171,13 @@ def configure( log = logger.log # pylint: disable=invalid-name -def warn_experimental_feature(name: str) -> None: - """Warn the user when they use an experimental feature.""" +def warn_preview_feature(name: str) -> None: + """Warn the user when they use a preview feature.""" log( WARN, - """EXPERIMENTAL FEATURE: %s + """PREVIEW FEATURE: %s - This is an experimental feature. It could change significantly or be removed + This is a preview feature. It could change significantly or be removed entirely in future versions of Flower. """, name, @@ -188,3 +195,29 @@ def warn_deprecated_feature(name: str) -> None: """, name, ) + + +def set_logger_propagation( + child_logger: logging.Logger, value: bool = True +) -> logging.Logger: + """Set the logger propagation attribute. + + Parameters + ---------- + child_logger : logging.Logger + Child logger object + value : bool + Boolean setting for propagation. If True, both parent and child logger + display messages. Otherwise, only the child logger displays a message. + This False setting prevents duplicate logs in Colab notebooks. + Reference: https://stackoverflow.com/a/19561320 + + Returns + ------- + logging.Logger + Child logger object with updated propagation setting + """ + child_logger.propagate = value + if not child_logger.propagate: + child_logger.log(logging.DEBUG, "Logger propagate set to False") + return child_logger diff --git a/src/py/flwr/common/message.py b/src/py/flwr/common/message.py index 25607179764d..7f7a0e4dd995 100644 --- a/src/py/flwr/common/message.py +++ b/src/py/flwr/common/message.py @@ -16,14 +16,15 @@ from __future__ import annotations -from dataclasses import dataclass +import time +import warnings +from typing import Optional, cast from .record import RecordSet DEFAULT_TTL = 3600 -@dataclass class Metadata: # pylint: disable=too-many-instance-attributes """A dataclass holding metadata associated with the current message. @@ -53,16 +54,6 @@ class Metadata: # pylint: disable=too-many-instance-attributes is more relevant when conducting simulations. """ - _run_id: int - _message_id: str - _src_node_id: int - _dst_node_id: int - _reply_to_message: str - _group_id: str - _ttl: float - _message_type: str - _partition_id: int | None - def __init__( # pylint: disable=too-many-arguments self, run_id: int, @@ -75,88 +66,111 @@ def __init__( # pylint: disable=too-many-arguments message_type: str, partition_id: int | None = None, ) -> None: - self._run_id = run_id - self._message_id = message_id - self._src_node_id = src_node_id - self._dst_node_id = dst_node_id - self._reply_to_message = reply_to_message - self._group_id = group_id - self._ttl = ttl - self._message_type = message_type - self._partition_id = partition_id + var_dict = { + "_run_id": run_id, + "_message_id": message_id, + "_src_node_id": src_node_id, + "_dst_node_id": dst_node_id, + "_reply_to_message": reply_to_message, + "_group_id": group_id, + "_ttl": ttl, + "_message_type": message_type, + "_partition_id": partition_id, + } + self.__dict__.update(var_dict) @property def run_id(self) -> int: """An identifier for the current run.""" - return self._run_id + return cast(int, self.__dict__["_run_id"]) @property def message_id(self) -> str: """An identifier for the current message.""" - return self._message_id + return cast(str, self.__dict__["_message_id"]) @property def src_node_id(self) -> int: """An identifier for the node sending this message.""" - return self._src_node_id + return cast(int, self.__dict__["_src_node_id"]) @property def reply_to_message(self) -> str: """An identifier for the message this message replies to.""" - return self._reply_to_message + return cast(str, self.__dict__["_reply_to_message"]) @property def dst_node_id(self) -> int: """An identifier for the node receiving this message.""" - return self._dst_node_id + return cast(int, self.__dict__["_dst_node_id"]) @dst_node_id.setter def dst_node_id(self, value: int) -> None: """Set dst_node_id.""" - self._dst_node_id = value + self.__dict__["_dst_node_id"] = value @property def group_id(self) -> str: """An identifier for grouping messages.""" - return self._group_id + return cast(str, self.__dict__["_group_id"]) @group_id.setter def group_id(self, value: str) -> None: """Set group_id.""" - self._group_id = value + self.__dict__["_group_id"] = value + + @property + def created_at(self) -> float: + """Unix timestamp when the message was created.""" + return cast(float, self.__dict__["_created_at"]) + + @created_at.setter + def created_at(self, value: float) -> None: + """Set creation timestamp for this message.""" + self.__dict__["_created_at"] = value @property def ttl(self) -> float: """Time-to-live for this message.""" - return self._ttl + return cast(float, self.__dict__["_ttl"]) @ttl.setter def ttl(self, value: float) -> None: """Set ttl.""" - self._ttl = value + self.__dict__["_ttl"] = value @property def message_type(self) -> str: """A string that encodes the action to be executed on the receiving end.""" - return self._message_type + return cast(str, self.__dict__["_message_type"]) @message_type.setter def message_type(self, value: str) -> None: """Set message_type.""" - self._message_type = value + self.__dict__["_message_type"] = value @property def partition_id(self) -> int | None: """An identifier telling which data partition a ClientApp should use.""" - return self._partition_id + return cast(int, self.__dict__["_partition_id"]) @partition_id.setter def partition_id(self, value: int) -> None: - """Set patition_id.""" - self._partition_id = value + """Set partition_id.""" + self.__dict__["_partition_id"] = value + + def __repr__(self) -> str: + """Return a string representation of this instance.""" + view = ", ".join([f"{k.lstrip('_')}={v!r}" for k, v in self.__dict__.items()]) + return f"{self.__class__.__qualname__}({view})" + + def __eq__(self, other: object) -> bool: + """Compare two instances of the class.""" + if not isinstance(other, self.__class__): + raise NotImplementedError + return self.__dict__ == other.__dict__ -@dataclass class Error: """A dataclass that stores information about an error that occurred. @@ -168,25 +182,35 @@ class Error: A reason for why the error arose (e.g. an exception stack-trace) """ - _code: int - _reason: str | None = None - def __init__(self, code: int, reason: str | None = None) -> None: - self._code = code - self._reason = reason + var_dict = { + "_code": code, + "_reason": reason, + } + self.__dict__.update(var_dict) @property def code(self) -> int: """Error code.""" - return self._code + return cast(int, self.__dict__["_code"]) @property def reason(self) -> str | None: """Reason reported about the error.""" - return self._reason + return cast(Optional[str], self.__dict__["_reason"]) + + def __repr__(self) -> str: + """Return a string representation of this instance.""" + view = ", ".join([f"{k.lstrip('_')}={v!r}" for k, v in self.__dict__.items()]) + return f"{self.__class__.__qualname__}({view})" + + def __eq__(self, other: object) -> bool: + """Compare two instances of the class.""" + if not isinstance(other, self.__class__): + raise NotImplementedError + return self.__dict__ == other.__dict__ -@dataclass class Message: """State of your application from the viewpoint of the entity using it. @@ -202,102 +226,105 @@ class Message: when processing another message. """ - _metadata: Metadata - _content: RecordSet | None = None - _error: Error | None = None - def __init__( self, metadata: Metadata, content: RecordSet | None = None, error: Error | None = None, ) -> None: - self._metadata = metadata - if not (content is None) ^ (error is None): raise ValueError("Either `content` or `error` must be set, but not both.") - self._content = content - self._error = error + metadata.created_at = time.time() # Set the message creation timestamp + var_dict = { + "_metadata": metadata, + "_content": content, + "_error": error, + } + self.__dict__.update(var_dict) @property def metadata(self) -> Metadata: """A dataclass including information about the message to be executed.""" - return self._metadata + return cast(Metadata, self.__dict__["_metadata"]) @property def content(self) -> RecordSet: """The content of this message.""" - if self._content is None: + if self.__dict__["_content"] is None: raise ValueError( "Message content is None. Use .has_content() " "to check if a message has content." ) - return self._content + return cast(RecordSet, self.__dict__["_content"]) @content.setter def content(self, value: RecordSet) -> None: """Set content.""" - if self._error is None: - self._content = value + if self.__dict__["_error"] is None: + self.__dict__["_content"] = value else: raise ValueError("A message with an error set cannot have content.") @property def error(self) -> Error: """Error captured by this message.""" - if self._error is None: + if self.__dict__["_error"] is None: raise ValueError( "Message error is None. Use .has_error() " "to check first if a message carries an error." ) - return self._error + return cast(Error, self.__dict__["_error"]) @error.setter def error(self, value: Error) -> None: """Set error.""" if self.has_content(): raise ValueError("A message with content set cannot carry an error.") - self._error = value + self.__dict__["_error"] = value def has_content(self) -> bool: """Return True if message has content, else False.""" - return self._content is not None + return self.__dict__["_content"] is not None def has_error(self) -> bool: """Return True if message has an error, else False.""" - return self._error is not None - - def _create_reply_metadata(self, ttl: float) -> Metadata: - """Construct metadata for a reply message.""" - return Metadata( - run_id=self.metadata.run_id, - message_id="", - src_node_id=self.metadata.dst_node_id, - dst_node_id=self.metadata.src_node_id, - reply_to_message=self.metadata.message_id, - group_id=self.metadata.group_id, - ttl=ttl, - message_type=self.metadata.message_type, - partition_id=self.metadata.partition_id, - ) + return self.__dict__["_error"] is not None - def create_error_reply( - self, - error: Error, - ttl: float, - ) -> Message: + def create_error_reply(self, error: Error, ttl: float | None = None) -> Message: """Construct a reply message indicating an error happened. Parameters ---------- error : Error The error that was encountered. - ttl : float - Time-to-live for this message in seconds. + ttl : Optional[float] (default: None) + Time-to-live for this message in seconds. If unset, it will be set based + on the remaining time for the received message before it expires. This + follows the equation: + + ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at) """ + if ttl: + warnings.warn( + "A custom TTL was set, but note that the SuperLink does not enforce " + "the TTL yet. The SuperLink will start enforcing the TTL in a future " + "version of Flower.", + stacklevel=2, + ) + # If no TTL passed, use default for message creation (will update after + # message creation) + ttl_ = DEFAULT_TTL if ttl is None else ttl # Create reply with error - message = Message(metadata=self._create_reply_metadata(ttl), error=error) + message = Message(metadata=_create_reply_metadata(self, ttl_), error=error) + + if ttl is None: + # Set TTL equal to the remaining time for the received message to expire + ttl = self.metadata.ttl - ( + message.metadata.created_at - self.metadata.created_at + ) + message.metadata.ttl = ttl + return message def create_reply(self, content: RecordSet, ttl: float | None = None) -> Message: @@ -312,18 +339,64 @@ def create_reply(self, content: RecordSet, ttl: float | None = None) -> Message: content : RecordSet The content for the reply message. ttl : Optional[float] (default: None) - Time-to-live for this message in seconds. If unset, it will use - the `common.DEFAULT_TTL` value. + Time-to-live for this message in seconds. If unset, it will be set based + on the remaining time for the received message before it expires. This + follows the equation: + + ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at) Returns ------- Message A new `Message` instance representing the reply. """ - if ttl is None: - ttl = DEFAULT_TTL + if ttl: + warnings.warn( + "A custom TTL was set, but note that the SuperLink does not enforce " + "the TTL yet. The SuperLink will start enforcing the TTL in a future " + "version of Flower.", + stacklevel=2, + ) + # If no TTL passed, use default for message creation (will update after + # message creation) + ttl_ = DEFAULT_TTL if ttl is None else ttl - return Message( - metadata=self._create_reply_metadata(ttl), + message = Message( + metadata=_create_reply_metadata(self, ttl_), content=content, ) + + if ttl is None: + # Set TTL equal to the remaining time for the received message to expire + ttl = self.metadata.ttl - ( + message.metadata.created_at - self.metadata.created_at + ) + message.metadata.ttl = ttl + + return message + + def __repr__(self) -> str: + """Return a string representation of this instance.""" + view = ", ".join( + [ + f"{k.lstrip('_')}={v!r}" + for k, v in self.__dict__.items() + if v is not None + ] + ) + return f"{self.__class__.__qualname__}({view})" + + +def _create_reply_metadata(msg: Message, ttl: float) -> Metadata: + """Construct metadata for a reply message.""" + return Metadata( + run_id=msg.metadata.run_id, + message_id="", + src_node_id=msg.metadata.dst_node_id, + dst_node_id=msg.metadata.src_node_id, + reply_to_message=msg.metadata.message_id, + group_id=msg.metadata.group_id, + ttl=ttl, + message_type=msg.metadata.message_type, + partition_id=msg.metadata.partition_id, + ) diff --git a/src/py/flwr/common/message_test.py b/src/py/flwr/common/message_test.py index ba628bb3235a..19f8aeb1eb63 100644 --- a/src/py/flwr/common/message_test.py +++ b/src/py/flwr/common/message_test.py @@ -14,15 +14,16 @@ # ============================================================================== """Message tests.""" - +import time +from collections import namedtuple from contextlib import ExitStack -from typing import Any, Callable +from typing import Any, Callable, Dict, Optional import pytest # pylint: enable=E0611 from . import RecordSet -from .message import Error, Message +from .message import Error, Message, Metadata from .serde_test import RecordMaker @@ -62,24 +63,32 @@ def test_message_creation( if context: stack.enter_context(context) - _ = Message( + current_time = time.time() + message = Message( metadata=metadata, content=None if content_fn is None else content_fn(maker), error=None if error_fn is None else error_fn(0), ) + assert message.metadata.created_at > current_time + assert message.metadata.created_at < time.time() + -def create_message_with_content() -> Message: +def create_message_with_content(ttl: Optional[float] = None) -> Message: """Create a Message with content.""" maker = RecordMaker(state=2) metadata = maker.metadata() + if ttl: + metadata.ttl = ttl return Message(metadata=metadata, content=RecordSet()) -def create_message_with_error() -> Message: +def create_message_with_error(ttl: Optional[float] = None) -> Message: """Create a Message with error.""" maker = RecordMaker(state=2) metadata = maker.metadata() + if ttl: + metadata.ttl = ttl return Message(metadata=metadata, error=Error(code=1)) @@ -107,3 +116,90 @@ def test_altering_message( message.error = Error(code=123) if message.has_error(): message.content = RecordSet() + + +@pytest.mark.parametrize( + "message_creation_fn,ttl,reply_ttl", + [ + (create_message_with_content, 1e6, None), + (create_message_with_error, 1e6, None), + (create_message_with_content, 1e6, 3600), + (create_message_with_error, 1e6, 3600), + ], +) +def test_create_reply( + message_creation_fn: Callable[ + [float], + Message, + ], + ttl: float, + reply_ttl: Optional[float], +) -> None: + """Test reply creation from message.""" + message: Message = message_creation_fn(ttl) + + time.sleep(0.1) + + if message.has_error(): + dummy_error = Error(code=0, reason="it crashed") + reply_message = message.create_error_reply(dummy_error, ttl=reply_ttl) + else: + reply_message = message.create_reply(content=RecordSet(), ttl=reply_ttl) + + # Ensure reply has a higher timestamp + assert message.metadata.created_at < reply_message.metadata.created_at + if reply_ttl: + # Ensure the TTL is the one specify upon reply creation + assert reply_message.metadata.ttl == reply_ttl + else: + # Ensure reply ttl is lower (since it uses remaining time left) + assert message.metadata.ttl > reply_message.metadata.ttl + + assert message.metadata.src_node_id == reply_message.metadata.dst_node_id + assert message.metadata.dst_node_id == reply_message.metadata.src_node_id + assert reply_message.metadata.reply_to_message == message.metadata.message_id + + +@pytest.mark.parametrize( + "cls, kwargs", + [ + ( + Metadata, + { + "run_id": 123, + "message_id": "msg_456", + "src_node_id": 1, + "dst_node_id": 2, + "reply_to_message": "reply_789", + "group_id": "group_xyz", + "ttl": 10.0, + "message_type": "request", + "partition_id": None, + }, + ), + (Error, {"code": 1, "reason": "reason_098"}), + ( + Message, + { + "metadata": RecordMaker(1).metadata(), + "content": RecordMaker(1).recordset(1, 1, 1), + }, + ), + ( + Message, + { + "metadata": RecordMaker(2).metadata(), + "error": Error(0, "some reason"), + }, + ), + ], +) +def test_repr(cls: type, kwargs: Dict[str, Any]) -> None: + """Test string representations of Metadata/Message/Error.""" + # Prepare + anon_cls = namedtuple(cls.__qualname__, kwargs.keys()) # type: ignore + expected = anon_cls(**kwargs) + actual = cls(**kwargs) + + # Assert + assert str(actual) == str(expected) diff --git a/src/py/flwr/common/record/parametersrecord.py b/src/py/flwr/common/record/parametersrecord.py index a4a71f751f97..93db6d387b53 100644 --- a/src/py/flwr/common/record/parametersrecord.py +++ b/src/py/flwr/common/record/parametersrecord.py @@ -82,7 +82,6 @@ def _check_value(value: Array) -> None: ) -@dataclass class ParametersRecord(TypedDict[str, Array]): """Parameters record. diff --git a/src/py/flwr/common/record/recordset.py b/src/py/flwr/common/record/recordset.py index d8ef44ab15c2..74eed46ad86f 100644 --- a/src/py/flwr/common/record/recordset.py +++ b/src/py/flwr/common/record/recordset.py @@ -16,23 +16,21 @@ from dataclasses import dataclass -from typing import Callable, Dict, Optional, Type, TypeVar +from typing import Dict, Optional, cast from .configsrecord import ConfigsRecord from .metricsrecord import MetricsRecord from .parametersrecord import ParametersRecord from .typeddict import TypedDict -T = TypeVar("T") - @dataclass -class RecordSet: - """RecordSet stores groups of parameters, metrics and configs.""" +class RecordSetData: + """Inner data container for the RecordSet class.""" - _parameters_records: TypedDict[str, ParametersRecord] - _metrics_records: TypedDict[str, MetricsRecord] - _configs_records: TypedDict[str, ConfigsRecord] + parameters_records: TypedDict[str, ParametersRecord] + metrics_records: TypedDict[str, MetricsRecord] + configs_records: TypedDict[str, ConfigsRecord] def __init__( self, @@ -40,40 +38,93 @@ def __init__( metrics_records: Optional[Dict[str, MetricsRecord]] = None, configs_records: Optional[Dict[str, ConfigsRecord]] = None, ) -> None: - def _get_check_fn(__t: Type[T]) -> Callable[[T], None]: - def _check_fn(__v: T) -> None: - if not isinstance(__v, __t): - raise TypeError(f"Expected `{__t}`, but `{type(__v)}` was passed.") - - return _check_fn - - self._parameters_records = TypedDict[str, ParametersRecord]( - _get_check_fn(str), _get_check_fn(ParametersRecord) + self.parameters_records = TypedDict[str, ParametersRecord]( + self._check_fn_str, self._check_fn_params ) - self._metrics_records = TypedDict[str, MetricsRecord]( - _get_check_fn(str), _get_check_fn(MetricsRecord) + self.metrics_records = TypedDict[str, MetricsRecord]( + self._check_fn_str, self._check_fn_metrics ) - self._configs_records = TypedDict[str, ConfigsRecord]( - _get_check_fn(str), _get_check_fn(ConfigsRecord) + self.configs_records = TypedDict[str, ConfigsRecord]( + self._check_fn_str, self._check_fn_configs ) if parameters_records is not None: - self._parameters_records.update(parameters_records) + self.parameters_records.update(parameters_records) if metrics_records is not None: - self._metrics_records.update(metrics_records) + self.metrics_records.update(metrics_records) if configs_records is not None: - self._configs_records.update(configs_records) + self.configs_records.update(configs_records) + + def _check_fn_str(self, key: str) -> None: + if not isinstance(key, str): + raise TypeError( + f"Expected `{str.__name__}`, but " + f"received `{type(key).__name__}` for the key." + ) + + def _check_fn_params(self, record: ParametersRecord) -> None: + if not isinstance(record, ParametersRecord): + raise TypeError( + f"Expected `{ParametersRecord.__name__}`, but " + f"received `{type(record).__name__}` for the value." + ) + + def _check_fn_metrics(self, record: MetricsRecord) -> None: + if not isinstance(record, MetricsRecord): + raise TypeError( + f"Expected `{MetricsRecord.__name__}`, but " + f"received `{type(record).__name__}` for the value." + ) + + def _check_fn_configs(self, record: ConfigsRecord) -> None: + if not isinstance(record, ConfigsRecord): + raise TypeError( + f"Expected `{ConfigsRecord.__name__}`, but " + f"received `{type(record).__name__}` for the value." + ) + + +class RecordSet: + """RecordSet stores groups of parameters, metrics and configs.""" + + def __init__( + self, + parameters_records: Optional[Dict[str, ParametersRecord]] = None, + metrics_records: Optional[Dict[str, MetricsRecord]] = None, + configs_records: Optional[Dict[str, ConfigsRecord]] = None, + ) -> None: + data = RecordSetData( + parameters_records=parameters_records, + metrics_records=metrics_records, + configs_records=configs_records, + ) + self.__dict__["_data"] = data @property def parameters_records(self) -> TypedDict[str, ParametersRecord]: """Dictionary holding ParametersRecord instances.""" - return self._parameters_records + data = cast(RecordSetData, self.__dict__["_data"]) + return data.parameters_records @property def metrics_records(self) -> TypedDict[str, MetricsRecord]: """Dictionary holding MetricsRecord instances.""" - return self._metrics_records + data = cast(RecordSetData, self.__dict__["_data"]) + return data.metrics_records @property def configs_records(self) -> TypedDict[str, ConfigsRecord]: """Dictionary holding ConfigsRecord instances.""" - return self._configs_records + data = cast(RecordSetData, self.__dict__["_data"]) + return data.configs_records + + def __repr__(self) -> str: + """Return a string representation of this instance.""" + flds = ("parameters_records", "metrics_records", "configs_records") + view = ", ".join([f"{fld}={getattr(self, fld)!r}" for fld in flds]) + return f"{self.__class__.__qualname__}({view})" + + def __eq__(self, other: object) -> bool: + """Compare two instances of the class.""" + if not isinstance(other, self.__class__): + raise NotImplementedError + return self.__dict__ == other.__dict__ diff --git a/src/py/flwr/common/record/recordset_test.py b/src/py/flwr/common/record/recordset_test.py index 0e0b149881be..01260793cb41 100644 --- a/src/py/flwr/common/record/recordset_test.py +++ b/src/py/flwr/common/record/recordset_test.py @@ -14,6 +14,8 @@ # ============================================================================== """RecordSet tests.""" +import pickle +from collections import namedtuple from copy import deepcopy from typing import Callable, Dict, List, OrderedDict, Type, Union @@ -33,7 +35,7 @@ Parameters, ) -from . import Array, ConfigsRecord, MetricsRecord, ParametersRecord +from . import Array, ConfigsRecord, MetricsRecord, ParametersRecord, RecordSet def get_ndarrays() -> NDArrays: @@ -398,3 +400,33 @@ def test_count_bytes_configsrecord() -> None: record_bytest_count = c_record.count_bytes() assert bytes_in_dict == record_bytest_count + + +def test_record_is_picklable() -> None: + """Test if RecordSet and *Record are picklable.""" + # Prepare + p_record = ParametersRecord() + m_record = MetricsRecord({"aa": 123}) + c_record = ConfigsRecord({"cc": bytes(9)}) + rs = RecordSet() + rs.parameters_records["params"] = p_record + rs.metrics_records["metrics"] = m_record + rs.configs_records["configs"] = c_record + + # Execute + pickle.dumps((p_record, m_record, c_record, rs)) + + +def test_recordset_repr() -> None: + """Test the string representation of RecordSet.""" + # Prepare + kwargs = { + "parameters_records": {"params": ParametersRecord()}, + "metrics_records": {"metrics": MetricsRecord({"aa": 123})}, + "configs_records": {"configs": ConfigsRecord({"cc": bytes(9)})}, + } + rs = RecordSet(**kwargs) # type: ignore + expected = namedtuple("RecordSet", kwargs.keys())(**kwargs) + + # Assert + assert str(rs) == str(expected) diff --git a/src/py/flwr/common/retry_invoker.py b/src/py/flwr/common/retry_invoker.py index 5441e766983a..d12124b89840 100644 --- a/src/py/flwr/common/retry_invoker.py +++ b/src/py/flwr/common/retry_invoker.py @@ -107,7 +107,7 @@ class RetryInvoker: Parameters ---------- - wait_factory: Callable[[], Generator[float, None, None]] + wait_gen_factory: Callable[[], Generator[float, None, None]] A generator yielding successive wait times in seconds. If the generator is finite, the giveup event will be triggered when the generator raises `StopIteration`. @@ -129,12 +129,12 @@ class RetryInvoker: data class object detailing the invocation. on_giveup: Optional[Callable[[RetryState], None]] (default: None) A callable to be executed in the event that `max_tries` or `max_time` is - exceeded, `should_giveup` returns True, or `wait_factory()` generator raises + exceeded, `should_giveup` returns True, or `wait_gen_factory()` generator raises `StopInteration`. The parameter is a data class object detailing the invocation. jitter: Optional[Callable[[float], float]] (default: full_jitter) - A function of the value yielded by `wait_factory()` returning the actual time - to wait. This function helps distribute wait times stochastically to avoid + A function of the value yielded by `wait_gen_factory()` returning the actual + time to wait. This function helps distribute wait times stochastically to avoid timing collisions across concurrent clients. Wait times are jittered by default using the `full_jitter` function. To disable jittering, pass `jitter=None`. @@ -142,6 +142,13 @@ class RetryInvoker: A function accepting an exception instance, returning whether or not to give up prematurely before other give-up conditions are evaluated. If set to None, the strategy is to never give up prematurely. + wait_function: Optional[Callable[[float], None]] (default: None) + A function that defines how to wait between retry attempts. It accepts + one argument, the wait time in seconds, allowing the use of various waiting + mechanisms (e.g., asynchronous waits or event-based synchronization) suitable + for different execution environments. If set to `None`, the `wait_function` + defaults to `time.sleep`, which is ideal for synchronous operations. Custom + functions should manage execution flow to prevent blocking or interference. Examples -------- @@ -159,7 +166,7 @@ class RetryInvoker: # pylint: disable-next=too-many-arguments def __init__( self, - wait_factory: Callable[[], Generator[float, None, None]], + wait_gen_factory: Callable[[], Generator[float, None, None]], recoverable_exceptions: Union[Type[Exception], Tuple[Type[Exception], ...]], max_tries: Optional[int], max_time: Optional[float], @@ -169,8 +176,9 @@ def __init__( on_giveup: Optional[Callable[[RetryState], None]] = None, jitter: Optional[Callable[[float], float]] = full_jitter, should_giveup: Optional[Callable[[Exception], bool]] = None, + wait_function: Optional[Callable[[float], None]] = None, ) -> None: - self.wait_factory = wait_factory + self.wait_gen_factory = wait_gen_factory self.recoverable_exceptions = recoverable_exceptions self.max_tries = max_tries self.max_time = max_time @@ -179,6 +187,9 @@ def __init__( self.on_giveup = on_giveup self.jitter = jitter self.should_giveup = should_giveup + if wait_function is None: + wait_function = time.sleep + self.wait_function = wait_function # pylint: disable-next=too-many-locals def invoke( @@ -212,13 +223,13 @@ def invoke( Raises ------ Exception - If the number of tries exceeds `max_tries`, if the total time - exceeds `max_time`, if `wait_factory()` generator raises `StopInteration`, + If the number of tries exceeds `max_tries`, if the total time exceeds + `max_time`, if `wait_gen_factory()` generator raises `StopInteration`, or if the `should_giveup` returns True for a raised exception. Notes ----- - The time between retries is determined by the provided `wait_factory()` + The time between retries is determined by the provided `wait_gen_factory()` generator and can optionally be jittered using the `jitter` function. The recoverable exceptions that trigger a retry, as well as conditions to stop retries, are also determined by the class's initialization parameters. @@ -231,13 +242,13 @@ def try_call_event_handler( handler(cast(RetryState, ref_state[0])) try_cnt = 0 - wait_generator = self.wait_factory() - start = time.time() + wait_generator = self.wait_gen_factory() + start = time.monotonic() ref_state: List[Optional[RetryState]] = [None] while True: try_cnt += 1 - elapsed_time = time.time() - start + elapsed_time = time.monotonic() - start state = RetryState( target=target, args=args, @@ -250,6 +261,7 @@ def try_call_event_handler( try: ret = target(*args, **kwargs) except self.recoverable_exceptions as err: + state.exception = err # Check if giveup event should be triggered max_tries_exceeded = try_cnt == self.max_tries max_time_exceeded = ( @@ -282,7 +294,7 @@ def giveup_check(_exception: Exception) -> bool: try_call_event_handler(self.on_backoff) # Sleep - time.sleep(wait_time) + self.wait_function(state.actual_wait) else: # Trigger success event try_call_event_handler(self.on_success) diff --git a/src/py/flwr/common/retry_invoker_test.py b/src/py/flwr/common/retry_invoker_test.py index e67c0641e2ba..2259ae47ded4 100644 --- a/src/py/flwr/common/retry_invoker_test.py +++ b/src/py/flwr/common/retry_invoker_test.py @@ -35,8 +35,8 @@ def failing_function() -> None: @pytest.fixture(name="mock_time") def fixture_mock_time() -> Generator[MagicMock, None, None]: - """Mock time.time for controlled testing.""" - with patch("time.time") as mock_time: + """Mock time.monotonic for controlled testing.""" + with patch("time.monotonic") as mock_time: yield mock_time diff --git a/src/py/flwr/common/secure_aggregation/crypto/symmetric_encryption.py b/src/py/flwr/common/secure_aggregation/crypto/symmetric_encryption.py index 844a93f3bde9..9856b8b706f9 100644 --- a/src/py/flwr/common/secure_aggregation/crypto/symmetric_encryption.py +++ b/src/py/flwr/common/secure_aggregation/crypto/symmetric_encryption.py @@ -18,8 +18,9 @@ import base64 from typing import Tuple, cast +from cryptography.exceptions import InvalidSignature from cryptography.fernet import Fernet -from cryptography.hazmat.primitives import hashes, serialization +from cryptography.hazmat.primitives import hashes, hmac, serialization from cryptography.hazmat.primitives.asymmetric import ec from cryptography.hazmat.primitives.kdf.hkdf import HKDF @@ -98,3 +99,36 @@ def decrypt(key: bytes, ciphertext: bytes) -> bytes: # The input key must be url safe fernet = Fernet(key) return fernet.decrypt(ciphertext) + + +def compute_hmac(key: bytes, message: bytes) -> bytes: + """Compute hmac of a message using key as hash.""" + computed_hmac = hmac.HMAC(key, hashes.SHA256()) + computed_hmac.update(message) + return computed_hmac.finalize() + + +def verify_hmac(key: bytes, message: bytes, hmac_value: bytes) -> bool: + """Verify hmac of a message using key as hash.""" + computed_hmac = hmac.HMAC(key, hashes.SHA256()) + computed_hmac.update(message) + try: + computed_hmac.verify(hmac_value) + return True + except InvalidSignature: + return False + + +def ssh_types_to_elliptic_curve( + private_key: serialization.SSHPrivateKeyTypes, + public_key: serialization.SSHPublicKeyTypes, +) -> Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey]: + """Cast SSH key types to elliptic curve.""" + if isinstance(private_key, ec.EllipticCurvePrivateKey) and isinstance( + public_key, ec.EllipticCurvePublicKey + ): + return (private_key, public_key) + + raise TypeError( + "The provided key is not an EllipticCurvePrivateKey or EllipticCurvePublicKey" + ) diff --git a/src/py/flwr/common/secure_aggregation/crypto/symmetric_encryption_test.py b/src/py/flwr/common/secure_aggregation/crypto/symmetric_encryption_test.py new file mode 100644 index 000000000000..f62276b63ff3 --- /dev/null +++ b/src/py/flwr/common/secure_aggregation/crypto/symmetric_encryption_test.py @@ -0,0 +1,102 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Symmetric encryption tests.""" + + +from .symmetric_encryption import ( + compute_hmac, + generate_key_pairs, + generate_shared_key, + verify_hmac, +) + + +def test_generate_shared_key() -> None: + """Test util function generate_shared_key.""" + # Prepare + client_keys = generate_key_pairs() + server_keys = generate_key_pairs() + + # Execute + client_shared_secret = generate_shared_key(client_keys[0], server_keys[1]) + server_shared_secret = generate_shared_key(server_keys[0], client_keys[1]) + + # Assert + assert client_shared_secret == server_shared_secret + + +def test_wrong_secret_generate_shared_key() -> None: + """Test util function generate_shared_key with wrong secret.""" + # Prepare + client_keys = generate_key_pairs() + server_keys = generate_key_pairs() + other_keys = generate_key_pairs() + + # Execute + client_shared_secret = generate_shared_key(client_keys[0], other_keys[1]) + server_shared_secret = generate_shared_key(server_keys[0], client_keys[1]) + + # Assert + assert client_shared_secret != server_shared_secret + + +def test_hmac() -> None: + """Test util function compute and verify hmac.""" + # Prepare + client_keys = generate_key_pairs() + server_keys = generate_key_pairs() + client_shared_secret = generate_shared_key(client_keys[0], server_keys[1]) + server_shared_secret = generate_shared_key(server_keys[0], client_keys[1]) + message = b"Flower is the future of AI" + + # Execute + client_compute_hmac = compute_hmac(client_shared_secret, message) + + # Assert + assert verify_hmac(server_shared_secret, message, client_compute_hmac) + + +def test_wrong_secret_hmac() -> None: + """Test util function compute and verify hmac with wrong secret.""" + # Prepare + client_keys = generate_key_pairs() + server_keys = generate_key_pairs() + other_keys = generate_key_pairs() + client_shared_secret = generate_shared_key(client_keys[0], other_keys[1]) + server_shared_secret = generate_shared_key(server_keys[0], client_keys[1]) + message = b"Flower is the future of AI" + + # Execute + client_compute_hmac = compute_hmac(client_shared_secret, message) + + # Assert + assert verify_hmac(server_shared_secret, message, client_compute_hmac) is False + + +def test_wrong_message_hmac() -> None: + """Test util function compute and verify hmac with wrong message.""" + # Prepare + client_keys = generate_key_pairs() + server_keys = generate_key_pairs() + client_shared_secret = generate_shared_key(client_keys[0], server_keys[1]) + server_shared_secret = generate_shared_key(server_keys[0], client_keys[1]) + message = b"Flower is the future of AI" + other_message = b"Flower is not the future of AI" + + # Execute + client_compute_hmac = compute_hmac(client_shared_secret, other_message) + + # Assert + assert verify_hmac(server_shared_secret, message, client_compute_hmac) is False diff --git a/src/py/flwr/common/serde.py b/src/py/flwr/common/serde.py index 6c7a077d2f9f..84932b806aff 100644 --- a/src/py/flwr/common/serde.py +++ b/src/py/flwr/common/serde.py @@ -575,6 +575,7 @@ def message_to_taskins(message: Message) -> TaskIns: task=Task( producer=Node(node_id=0, anonymous=True), # Assume driver node consumer=Node(node_id=md.dst_node_id, anonymous=False), + created_at=md.created_at, ttl=md.ttl, ancestry=[md.reply_to_message] if md.reply_to_message != "" else [], task_type=md.message_type, @@ -601,7 +602,7 @@ def message_from_taskins(taskins: TaskIns) -> Message: ) # Construct Message - return Message( + message = Message( metadata=metadata, content=( recordset_from_proto(taskins.task.recordset) @@ -614,6 +615,8 @@ def message_from_taskins(taskins: TaskIns) -> Message: else None ), ) + message.metadata.created_at = taskins.task.created_at + return message def message_to_taskres(message: Message) -> TaskRes: @@ -626,6 +629,7 @@ def message_to_taskres(message: Message) -> TaskRes: task=Task( producer=Node(node_id=md.src_node_id, anonymous=False), consumer=Node(node_id=0, anonymous=True), # Assume driver node + created_at=md.created_at, ttl=md.ttl, ancestry=[md.reply_to_message] if md.reply_to_message != "" else [], task_type=md.message_type, @@ -652,7 +656,7 @@ def message_from_taskres(taskres: TaskRes) -> Message: ) # Construct the Message - return Message( + message = Message( metadata=metadata, content=( recordset_from_proto(taskres.task.recordset) @@ -665,3 +669,5 @@ def message_from_taskres(taskres: TaskRes) -> Message: else None ), ) + message.metadata.created_at = taskres.task.created_at + return message diff --git a/src/py/flwr/common/serde_test.py b/src/py/flwr/common/serde_test.py index fc12ce95328f..f9969426fc36 100644 --- a/src/py/flwr/common/serde_test.py +++ b/src/py/flwr/common/serde_test.py @@ -324,7 +324,7 @@ def test_message_to_and_from_taskins( maker = RecordMaker(state=1) metadata = maker.metadata() # pylint: disable-next=protected-access - metadata._src_node_id = 0 # Assume driver node + metadata.__dict__["_src_node_id"] = 0 # Assume driver node original = Message( metadata=metadata, diff --git a/src/py/flwr/common/telemetry.py b/src/py/flwr/common/telemetry.py index 8eb594085d31..41fe1508e652 100644 --- a/src/py/flwr/common/telemetry.py +++ b/src/py/flwr/common/telemetry.py @@ -160,6 +160,10 @@ def _generate_next_value_(name: str, start: int, count: int, last_values: List[A RUN_SERVER_APP_ENTER = auto() RUN_SERVER_APP_LEAVE = auto() + # SuperNode + RUN_SUPERNODE_ENTER = auto() + RUN_SUPERNODE_LEAVE = auto() + # Use the ThreadPoolExecutor with max_workers=1 to have a queue # and also ensure that telemetry calls are not blocking. diff --git a/src/py/flwr/common/telemetry_test.py b/src/py/flwr/common/telemetry_test.py index 006f4422bc1d..a5eea48443b5 100644 --- a/src/py/flwr/common/telemetry_test.py +++ b/src/py/flwr/common/telemetry_test.py @@ -47,8 +47,8 @@ def test_not_blocking(self) -> None: 0.001s. """ # Prepare - # Use 0.1ms as any blocking networked call would take longer. - duration_max = 0.001 + # Use 5ms as any blocking networked call would take longer. + duration_max = 0.005 start = time.time() # Execute diff --git a/src/py/flwr/proto/driver_pb2.py b/src/py/flwr/proto/driver_pb2.py index fe9c33da0fa9..b0caae58ff6f 100644 --- a/src/py/flwr/proto/driver_pb2.py +++ b/src/py/flwr/proto/driver_pb2.py @@ -16,7 +16,7 @@ from flwr.proto import task_pb2 as flwr_dot_proto_dot_task__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17\x66lwr/proto/driver.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x15\x66lwr/proto/task.proto\"\x12\n\x10\x43reateRunRequest\"#\n\x11\x43reateRunResponse\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\"!\n\x0fGetNodesRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\"3\n\x10GetNodesResponse\x12\x1f\n\x05nodes\x18\x01 \x03(\x0b\x32\x10.flwr.proto.Node\"@\n\x12PushTaskInsRequest\x12*\n\rtask_ins_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskIns\"\'\n\x13PushTaskInsResponse\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"F\n\x12PullTaskResRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"A\n\x13PullTaskResResponse\x12*\n\rtask_res_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskRes2\xc1\x02\n\x06\x44river\x12J\n\tCreateRun\x12\x1c.flwr.proto.CreateRunRequest\x1a\x1d.flwr.proto.CreateRunResponse\"\x00\x12G\n\x08GetNodes\x12\x1b.flwr.proto.GetNodesRequest\x1a\x1c.flwr.proto.GetNodesResponse\"\x00\x12P\n\x0bPushTaskIns\x12\x1e.flwr.proto.PushTaskInsRequest\x1a\x1f.flwr.proto.PushTaskInsResponse\"\x00\x12P\n\x0bPullTaskRes\x12\x1e.flwr.proto.PullTaskResRequest\x1a\x1f.flwr.proto.PullTaskResResponse\"\x00\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17\x66lwr/proto/driver.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x15\x66lwr/proto/task.proto\"7\n\x10\x43reateRunRequest\x12\x0e\n\x06\x66\x61\x62_id\x18\x01 \x01(\t\x12\x13\n\x0b\x66\x61\x62_version\x18\x02 \x01(\t\"#\n\x11\x43reateRunResponse\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\"!\n\x0fGetNodesRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\"3\n\x10GetNodesResponse\x12\x1f\n\x05nodes\x18\x01 \x03(\x0b\x32\x10.flwr.proto.Node\"@\n\x12PushTaskInsRequest\x12*\n\rtask_ins_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskIns\"\'\n\x13PushTaskInsResponse\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"F\n\x12PullTaskResRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"A\n\x13PullTaskResResponse\x12*\n\rtask_res_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskRes2\xc1\x02\n\x06\x44river\x12J\n\tCreateRun\x12\x1c.flwr.proto.CreateRunRequest\x1a\x1d.flwr.proto.CreateRunResponse\"\x00\x12G\n\x08GetNodes\x12\x1b.flwr.proto.GetNodesRequest\x1a\x1c.flwr.proto.GetNodesResponse\"\x00\x12P\n\x0bPushTaskIns\x12\x1e.flwr.proto.PushTaskInsRequest\x1a\x1f.flwr.proto.PushTaskInsResponse\"\x00\x12P\n\x0bPullTaskRes\x12\x1e.flwr.proto.PullTaskResRequest\x1a\x1f.flwr.proto.PullTaskResResponse\"\x00\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -24,21 +24,21 @@ if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None _globals['_CREATERUNREQUEST']._serialized_start=85 - _globals['_CREATERUNREQUEST']._serialized_end=103 - _globals['_CREATERUNRESPONSE']._serialized_start=105 - _globals['_CREATERUNRESPONSE']._serialized_end=140 - _globals['_GETNODESREQUEST']._serialized_start=142 - _globals['_GETNODESREQUEST']._serialized_end=175 - _globals['_GETNODESRESPONSE']._serialized_start=177 - _globals['_GETNODESRESPONSE']._serialized_end=228 - _globals['_PUSHTASKINSREQUEST']._serialized_start=230 - _globals['_PUSHTASKINSREQUEST']._serialized_end=294 - _globals['_PUSHTASKINSRESPONSE']._serialized_start=296 - _globals['_PUSHTASKINSRESPONSE']._serialized_end=335 - _globals['_PULLTASKRESREQUEST']._serialized_start=337 - _globals['_PULLTASKRESREQUEST']._serialized_end=407 - _globals['_PULLTASKRESRESPONSE']._serialized_start=409 - _globals['_PULLTASKRESRESPONSE']._serialized_end=474 - _globals['_DRIVER']._serialized_start=477 - _globals['_DRIVER']._serialized_end=798 + _globals['_CREATERUNREQUEST']._serialized_end=140 + _globals['_CREATERUNRESPONSE']._serialized_start=142 + _globals['_CREATERUNRESPONSE']._serialized_end=177 + _globals['_GETNODESREQUEST']._serialized_start=179 + _globals['_GETNODESREQUEST']._serialized_end=212 + _globals['_GETNODESRESPONSE']._serialized_start=214 + _globals['_GETNODESRESPONSE']._serialized_end=265 + _globals['_PUSHTASKINSREQUEST']._serialized_start=267 + _globals['_PUSHTASKINSREQUEST']._serialized_end=331 + _globals['_PUSHTASKINSRESPONSE']._serialized_start=333 + _globals['_PUSHTASKINSRESPONSE']._serialized_end=372 + _globals['_PULLTASKRESREQUEST']._serialized_start=374 + _globals['_PULLTASKRESREQUEST']._serialized_end=444 + _globals['_PULLTASKRESRESPONSE']._serialized_start=446 + _globals['_PULLTASKRESRESPONSE']._serialized_end=511 + _globals['_DRIVER']._serialized_start=514 + _globals['_DRIVER']._serialized_end=835 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/driver_pb2.pyi b/src/py/flwr/proto/driver_pb2.pyi index 8dc254a55e8c..2d8d11fb59a3 100644 --- a/src/py/flwr/proto/driver_pb2.pyi +++ b/src/py/flwr/proto/driver_pb2.pyi @@ -16,8 +16,16 @@ DESCRIPTOR: google.protobuf.descriptor.FileDescriptor class CreateRunRequest(google.protobuf.message.Message): """CreateRun""" DESCRIPTOR: google.protobuf.descriptor.Descriptor + FAB_ID_FIELD_NUMBER: builtins.int + FAB_VERSION_FIELD_NUMBER: builtins.int + fab_id: typing.Text + fab_version: typing.Text def __init__(self, + *, + fab_id: typing.Text = ..., + fab_version: typing.Text = ..., ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["fab_id",b"fab_id","fab_version",b"fab_version"]) -> None: ... global___CreateRunRequest = CreateRunRequest class CreateRunResponse(google.protobuf.message.Message): diff --git a/src/py/flwr/proto/fleet_pb2.py b/src/py/flwr/proto/fleet_pb2.py index dbf64fb850a5..42f3292d910d 100644 --- a/src/py/flwr/proto/fleet_pb2.py +++ b/src/py/flwr/proto/fleet_pb2.py @@ -16,7 +16,7 @@ from flwr.proto import task_pb2 as flwr_dot_proto_dot_task__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x16\x66lwr/proto/fleet.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x15\x66lwr/proto/task.proto\"\x13\n\x11\x43reateNodeRequest\"4\n\x12\x43reateNodeResponse\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"3\n\x11\x44\x65leteNodeRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"\x14\n\x12\x44\x65leteNodeResponse\"-\n\x0bPingRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"\x1f\n\x0cPingResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\"F\n\x12PullTaskInsRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"k\n\x13PullTaskInsResponse\x12(\n\treconnect\x18\x01 \x01(\x0b\x32\x15.flwr.proto.Reconnect\x12*\n\rtask_ins_list\x18\x02 \x03(\x0b\x32\x13.flwr.proto.TaskIns\"@\n\x12PushTaskResRequest\x12*\n\rtask_res_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskRes\"\xae\x01\n\x13PushTaskResResponse\x12(\n\treconnect\x18\x01 \x01(\x0b\x32\x15.flwr.proto.Reconnect\x12=\n\x07results\x18\x02 \x03(\x0b\x32,.flwr.proto.PushTaskResResponse.ResultsEntry\x1a.\n\x0cResultsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\r:\x02\x38\x01\"\x1e\n\tReconnect\x12\x11\n\treconnect\x18\x01 \x01(\x04\x32\x86\x03\n\x05\x46leet\x12M\n\nCreateNode\x12\x1d.flwr.proto.CreateNodeRequest\x1a\x1e.flwr.proto.CreateNodeResponse\"\x00\x12M\n\nDeleteNode\x12\x1d.flwr.proto.DeleteNodeRequest\x1a\x1e.flwr.proto.DeleteNodeResponse\"\x00\x12;\n\x04Ping\x12\x17.flwr.proto.PingRequest\x1a\x18.flwr.proto.PingResponse\"\x00\x12P\n\x0bPullTaskIns\x12\x1e.flwr.proto.PullTaskInsRequest\x1a\x1f.flwr.proto.PullTaskInsResponse\"\x00\x12P\n\x0bPushTaskRes\x12\x1e.flwr.proto.PushTaskResRequest\x1a\x1f.flwr.proto.PushTaskResResponse\"\x00\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x16\x66lwr/proto/fleet.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x15\x66lwr/proto/task.proto\"*\n\x11\x43reateNodeRequest\x12\x15\n\rping_interval\x18\x01 \x01(\x01\"4\n\x12\x43reateNodeResponse\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"3\n\x11\x44\x65leteNodeRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"\x14\n\x12\x44\x65leteNodeResponse\"D\n\x0bPingRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x15\n\rping_interval\x18\x02 \x01(\x01\"\x1f\n\x0cPingResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\"F\n\x12PullTaskInsRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"k\n\x13PullTaskInsResponse\x12(\n\treconnect\x18\x01 \x01(\x0b\x32\x15.flwr.proto.Reconnect\x12*\n\rtask_ins_list\x18\x02 \x03(\x0b\x32\x13.flwr.proto.TaskIns\"@\n\x12PushTaskResRequest\x12*\n\rtask_res_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskRes\"\xae\x01\n\x13PushTaskResResponse\x12(\n\treconnect\x18\x01 \x01(\x0b\x32\x15.flwr.proto.Reconnect\x12=\n\x07results\x18\x02 \x03(\x0b\x32,.flwr.proto.PushTaskResResponse.ResultsEntry\x1a.\n\x0cResultsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\r:\x02\x38\x01\":\n\x03Run\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\x12\x0e\n\x06\x66\x61\x62_id\x18\x02 \x01(\t\x12\x13\n\x0b\x66\x61\x62_version\x18\x03 \x01(\t\"\x1f\n\rGetRunRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\".\n\x0eGetRunResponse\x12\x1c\n\x03run\x18\x01 \x01(\x0b\x32\x0f.flwr.proto.Run\"\x1e\n\tReconnect\x12\x11\n\treconnect\x18\x01 \x01(\x04\x32\xc9\x03\n\x05\x46leet\x12M\n\nCreateNode\x12\x1d.flwr.proto.CreateNodeRequest\x1a\x1e.flwr.proto.CreateNodeResponse\"\x00\x12M\n\nDeleteNode\x12\x1d.flwr.proto.DeleteNodeRequest\x1a\x1e.flwr.proto.DeleteNodeResponse\"\x00\x12;\n\x04Ping\x12\x17.flwr.proto.PingRequest\x1a\x18.flwr.proto.PingResponse\"\x00\x12P\n\x0bPullTaskIns\x12\x1e.flwr.proto.PullTaskInsRequest\x1a\x1f.flwr.proto.PullTaskInsResponse\"\x00\x12P\n\x0bPushTaskRes\x12\x1e.flwr.proto.PushTaskResRequest\x1a\x1f.flwr.proto.PushTaskResResponse\"\x00\x12\x41\n\x06GetRun\x12\x19.flwr.proto.GetRunRequest\x1a\x1a.flwr.proto.GetRunResponse\"\x00\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -26,29 +26,35 @@ _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._options = None _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_options = b'8\001' _globals['_CREATENODEREQUEST']._serialized_start=84 - _globals['_CREATENODEREQUEST']._serialized_end=103 - _globals['_CREATENODERESPONSE']._serialized_start=105 - _globals['_CREATENODERESPONSE']._serialized_end=157 - _globals['_DELETENODEREQUEST']._serialized_start=159 - _globals['_DELETENODEREQUEST']._serialized_end=210 - _globals['_DELETENODERESPONSE']._serialized_start=212 - _globals['_DELETENODERESPONSE']._serialized_end=232 - _globals['_PINGREQUEST']._serialized_start=234 - _globals['_PINGREQUEST']._serialized_end=279 - _globals['_PINGRESPONSE']._serialized_start=281 - _globals['_PINGRESPONSE']._serialized_end=312 - _globals['_PULLTASKINSREQUEST']._serialized_start=314 - _globals['_PULLTASKINSREQUEST']._serialized_end=384 - _globals['_PULLTASKINSRESPONSE']._serialized_start=386 - _globals['_PULLTASKINSRESPONSE']._serialized_end=493 - _globals['_PUSHTASKRESREQUEST']._serialized_start=495 - _globals['_PUSHTASKRESREQUEST']._serialized_end=559 - _globals['_PUSHTASKRESRESPONSE']._serialized_start=562 - _globals['_PUSHTASKRESRESPONSE']._serialized_end=736 - _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_start=690 - _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_end=736 - _globals['_RECONNECT']._serialized_start=738 - _globals['_RECONNECT']._serialized_end=768 - _globals['_FLEET']._serialized_start=771 - _globals['_FLEET']._serialized_end=1161 + _globals['_CREATENODEREQUEST']._serialized_end=126 + _globals['_CREATENODERESPONSE']._serialized_start=128 + _globals['_CREATENODERESPONSE']._serialized_end=180 + _globals['_DELETENODEREQUEST']._serialized_start=182 + _globals['_DELETENODEREQUEST']._serialized_end=233 + _globals['_DELETENODERESPONSE']._serialized_start=235 + _globals['_DELETENODERESPONSE']._serialized_end=255 + _globals['_PINGREQUEST']._serialized_start=257 + _globals['_PINGREQUEST']._serialized_end=325 + _globals['_PINGRESPONSE']._serialized_start=327 + _globals['_PINGRESPONSE']._serialized_end=358 + _globals['_PULLTASKINSREQUEST']._serialized_start=360 + _globals['_PULLTASKINSREQUEST']._serialized_end=430 + _globals['_PULLTASKINSRESPONSE']._serialized_start=432 + _globals['_PULLTASKINSRESPONSE']._serialized_end=539 + _globals['_PUSHTASKRESREQUEST']._serialized_start=541 + _globals['_PUSHTASKRESREQUEST']._serialized_end=605 + _globals['_PUSHTASKRESRESPONSE']._serialized_start=608 + _globals['_PUSHTASKRESRESPONSE']._serialized_end=782 + _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_start=736 + _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_end=782 + _globals['_RUN']._serialized_start=784 + _globals['_RUN']._serialized_end=842 + _globals['_GETRUNREQUEST']._serialized_start=844 + _globals['_GETRUNREQUEST']._serialized_end=875 + _globals['_GETRUNRESPONSE']._serialized_start=877 + _globals['_GETRUNRESPONSE']._serialized_end=923 + _globals['_RECONNECT']._serialized_start=925 + _globals['_RECONNECT']._serialized_end=955 + _globals['_FLEET']._serialized_start=958 + _globals['_FLEET']._serialized_end=1415 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/fleet_pb2.pyi b/src/py/flwr/proto/fleet_pb2.pyi index 39edb61ca0d7..a6f38b703e76 100644 --- a/src/py/flwr/proto/fleet_pb2.pyi +++ b/src/py/flwr/proto/fleet_pb2.pyi @@ -16,8 +16,13 @@ DESCRIPTOR: google.protobuf.descriptor.FileDescriptor class CreateNodeRequest(google.protobuf.message.Message): """CreateNode messages""" DESCRIPTOR: google.protobuf.descriptor.Descriptor + PING_INTERVAL_FIELD_NUMBER: builtins.int + ping_interval: builtins.float def __init__(self, + *, + ping_interval: builtins.float = ..., ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["ping_interval",b"ping_interval"]) -> None: ... global___CreateNodeRequest = CreateNodeRequest class CreateNodeResponse(google.protobuf.message.Message): @@ -57,14 +62,17 @@ class PingRequest(google.protobuf.message.Message): """Ping messages""" DESCRIPTOR: google.protobuf.descriptor.Descriptor NODE_FIELD_NUMBER: builtins.int + PING_INTERVAL_FIELD_NUMBER: builtins.int @property def node(self) -> flwr.proto.node_pb2.Node: ... + ping_interval: builtins.float def __init__(self, *, node: typing.Optional[flwr.proto.node_pb2.Node] = ..., + ping_interval: builtins.float = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["node",b"node"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["node",b"node"]) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["node",b"node","ping_interval",b"ping_interval"]) -> None: ... global___PingRequest = PingRequest class PingResponse(google.protobuf.message.Message): @@ -156,6 +164,48 @@ class PushTaskResResponse(google.protobuf.message.Message): def ClearField(self, field_name: typing_extensions.Literal["reconnect",b"reconnect","results",b"results"]) -> None: ... global___PushTaskResResponse = PushTaskResResponse +class Run(google.protobuf.message.Message): + """GetRun messages""" + DESCRIPTOR: google.protobuf.descriptor.Descriptor + RUN_ID_FIELD_NUMBER: builtins.int + FAB_ID_FIELD_NUMBER: builtins.int + FAB_VERSION_FIELD_NUMBER: builtins.int + run_id: builtins.int + fab_id: typing.Text + fab_version: typing.Text + def __init__(self, + *, + run_id: builtins.int = ..., + fab_id: typing.Text = ..., + fab_version: typing.Text = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["fab_id",b"fab_id","fab_version",b"fab_version","run_id",b"run_id"]) -> None: ... +global___Run = Run + +class GetRunRequest(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + RUN_ID_FIELD_NUMBER: builtins.int + run_id: builtins.int + def __init__(self, + *, + run_id: builtins.int = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["run_id",b"run_id"]) -> None: ... +global___GetRunRequest = GetRunRequest + +class GetRunResponse(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + RUN_FIELD_NUMBER: builtins.int + @property + def run(self) -> global___Run: ... + def __init__(self, + *, + run: typing.Optional[global___Run] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["run",b"run"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["run",b"run"]) -> None: ... +global___GetRunResponse = GetRunResponse + class Reconnect(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor RECONNECT_FIELD_NUMBER: builtins.int diff --git a/src/py/flwr/proto/fleet_pb2_grpc.py b/src/py/flwr/proto/fleet_pb2_grpc.py index c31a4ec73f0e..16757eaed381 100644 --- a/src/py/flwr/proto/fleet_pb2_grpc.py +++ b/src/py/flwr/proto/fleet_pb2_grpc.py @@ -39,6 +39,11 @@ def __init__(self, channel): request_serializer=flwr_dot_proto_dot_fleet__pb2.PushTaskResRequest.SerializeToString, response_deserializer=flwr_dot_proto_dot_fleet__pb2.PushTaskResResponse.FromString, ) + self.GetRun = channel.unary_unary( + '/flwr.proto.Fleet/GetRun', + request_serializer=flwr_dot_proto_dot_fleet__pb2.GetRunRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_fleet__pb2.GetRunResponse.FromString, + ) class FleetServicer(object): @@ -80,6 +85,12 @@ def PushTaskRes(self, request, context): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def GetRun(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def add_FleetServicer_to_server(servicer, server): rpc_method_handlers = { @@ -108,6 +119,11 @@ def add_FleetServicer_to_server(servicer, server): request_deserializer=flwr_dot_proto_dot_fleet__pb2.PushTaskResRequest.FromString, response_serializer=flwr_dot_proto_dot_fleet__pb2.PushTaskResResponse.SerializeToString, ), + 'GetRun': grpc.unary_unary_rpc_method_handler( + servicer.GetRun, + request_deserializer=flwr_dot_proto_dot_fleet__pb2.GetRunRequest.FromString, + response_serializer=flwr_dot_proto_dot_fleet__pb2.GetRunResponse.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( 'flwr.proto.Fleet', rpc_method_handlers) @@ -202,3 +218,20 @@ def PushTaskRes(request, flwr_dot_proto_dot_fleet__pb2.PushTaskResResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def GetRun(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.Fleet/GetRun', + flwr_dot_proto_dot_fleet__pb2.GetRunRequest.SerializeToString, + flwr_dot_proto_dot_fleet__pb2.GetRunResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/src/py/flwr/proto/fleet_pb2_grpc.pyi b/src/py/flwr/proto/fleet_pb2_grpc.pyi index 33ba9440793a..f275cd149d69 100644 --- a/src/py/flwr/proto/fleet_pb2_grpc.pyi +++ b/src/py/flwr/proto/fleet_pb2_grpc.pyi @@ -36,6 +36,10 @@ class FleetStub: HTTP API path: /api/v1/fleet/push-task-res """ + GetRun: grpc.UnaryUnaryMultiCallable[ + flwr.proto.fleet_pb2.GetRunRequest, + flwr.proto.fleet_pb2.GetRunResponse] + class FleetServicer(metaclass=abc.ABCMeta): @abc.abstractmethod @@ -78,5 +82,11 @@ class FleetServicer(metaclass=abc.ABCMeta): """ pass + @abc.abstractmethod + def GetRun(self, + request: flwr.proto.fleet_pb2.GetRunRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.fleet_pb2.GetRunResponse: ... + def add_FleetServicer_to_server(servicer: FleetServicer, server: grpc.Server) -> None: ... diff --git a/src/py/flwr/proto/task_pb2.py b/src/py/flwr/proto/task_pb2.py index abf7d72d7174..5f6e9e7be583 100644 --- a/src/py/flwr/proto/task_pb2.py +++ b/src/py/flwr/proto/task_pb2.py @@ -18,7 +18,7 @@ from flwr.proto import error_pb2 as flwr_dot_proto_dot_error__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66lwr/proto/task.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x1a\x66lwr/proto/recordset.proto\x1a\x1a\x66lwr/proto/transport.proto\x1a\x16\x66lwr/proto/error.proto\"\xf6\x01\n\x04Task\x12\"\n\x08producer\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\"\n\x08\x63onsumer\x18\x02 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x12\n\ncreated_at\x18\x03 \x01(\t\x12\x14\n\x0c\x64\x65livered_at\x18\x04 \x01(\t\x12\x0b\n\x03ttl\x18\x05 \x01(\x01\x12\x10\n\x08\x61ncestry\x18\x06 \x03(\t\x12\x11\n\ttask_type\x18\x07 \x01(\t\x12(\n\trecordset\x18\x08 \x01(\x0b\x32\x15.flwr.proto.RecordSet\x12 \n\x05\x65rror\x18\t \x01(\x0b\x32\x11.flwr.proto.Error\"\\\n\x07TaskIns\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x12\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Task\"\\\n\x07TaskRes\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x12\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Taskb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66lwr/proto/task.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x1a\x66lwr/proto/recordset.proto\x1a\x1a\x66lwr/proto/transport.proto\x1a\x16\x66lwr/proto/error.proto\"\x89\x02\n\x04Task\x12\"\n\x08producer\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\"\n\x08\x63onsumer\x18\x02 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x12\n\ncreated_at\x18\x03 \x01(\x01\x12\x14\n\x0c\x64\x65livered_at\x18\x04 \x01(\t\x12\x11\n\tpushed_at\x18\x05 \x01(\x01\x12\x0b\n\x03ttl\x18\x06 \x01(\x01\x12\x10\n\x08\x61ncestry\x18\x07 \x03(\t\x12\x11\n\ttask_type\x18\x08 \x01(\t\x12(\n\trecordset\x18\t \x01(\x0b\x32\x15.flwr.proto.RecordSet\x12 \n\x05\x65rror\x18\n \x01(\x0b\x32\x11.flwr.proto.Error\"\\\n\x07TaskIns\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x12\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Task\"\\\n\x07TaskRes\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x12\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Taskb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -26,9 +26,9 @@ if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None _globals['_TASK']._serialized_start=141 - _globals['_TASK']._serialized_end=387 - _globals['_TASKINS']._serialized_start=389 - _globals['_TASKINS']._serialized_end=481 - _globals['_TASKRES']._serialized_start=483 - _globals['_TASKRES']._serialized_end=575 + _globals['_TASK']._serialized_end=406 + _globals['_TASKINS']._serialized_start=408 + _globals['_TASKINS']._serialized_end=500 + _globals['_TASKRES']._serialized_start=502 + _globals['_TASKRES']._serialized_end=594 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/task_pb2.pyi b/src/py/flwr/proto/task_pb2.pyi index 735400eca701..455791ac9e6e 100644 --- a/src/py/flwr/proto/task_pb2.pyi +++ b/src/py/flwr/proto/task_pb2.pyi @@ -20,6 +20,7 @@ class Task(google.protobuf.message.Message): CONSUMER_FIELD_NUMBER: builtins.int CREATED_AT_FIELD_NUMBER: builtins.int DELIVERED_AT_FIELD_NUMBER: builtins.int + PUSHED_AT_FIELD_NUMBER: builtins.int TTL_FIELD_NUMBER: builtins.int ANCESTRY_FIELD_NUMBER: builtins.int TASK_TYPE_FIELD_NUMBER: builtins.int @@ -29,8 +30,9 @@ class Task(google.protobuf.message.Message): def producer(self) -> flwr.proto.node_pb2.Node: ... @property def consumer(self) -> flwr.proto.node_pb2.Node: ... - created_at: typing.Text + created_at: builtins.float delivered_at: typing.Text + pushed_at: builtins.float ttl: builtins.float @property def ancestry(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[typing.Text]: ... @@ -43,8 +45,9 @@ class Task(google.protobuf.message.Message): *, producer: typing.Optional[flwr.proto.node_pb2.Node] = ..., consumer: typing.Optional[flwr.proto.node_pb2.Node] = ..., - created_at: typing.Text = ..., + created_at: builtins.float = ..., delivered_at: typing.Text = ..., + pushed_at: builtins.float = ..., ttl: builtins.float = ..., ancestry: typing.Optional[typing.Iterable[typing.Text]] = ..., task_type: typing.Text = ..., @@ -52,7 +55,7 @@ class Task(google.protobuf.message.Message): error: typing.Optional[flwr.proto.error_pb2.Error] = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["consumer",b"consumer","error",b"error","producer",b"producer","recordset",b"recordset"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["ancestry",b"ancestry","consumer",b"consumer","created_at",b"created_at","delivered_at",b"delivered_at","error",b"error","producer",b"producer","recordset",b"recordset","task_type",b"task_type","ttl",b"ttl"]) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["ancestry",b"ancestry","consumer",b"consumer","created_at",b"created_at","delivered_at",b"delivered_at","error",b"error","producer",b"producer","pushed_at",b"pushed_at","recordset",b"recordset","task_type",b"task_type","ttl",b"ttl"]) -> None: ... global___Task = Task class TaskIns(google.protobuf.message.Message): diff --git a/src/py/flwr/server/__init__.py b/src/py/flwr/server/__init__.py index 633bd668b520..875f66c43d03 100644 --- a/src/py/flwr/server/__init__.py +++ b/src/py/flwr/server/__init__.py @@ -24,7 +24,6 @@ from .client_manager import ClientManager as ClientManager from .client_manager import SimpleClientManager as SimpleClientManager from .compat import LegacyContext as LegacyContext -from .compat import start_driver as start_driver from .driver import Driver as Driver from .history import History as History from .run_serverapp import run_server_app as run_server_app @@ -45,7 +44,6 @@ "ServerApp", "ServerConfig", "SimpleClientManager", - "start_driver", "start_server", "strategy", "workflow", diff --git a/src/py/flwr/server/app.py b/src/py/flwr/server/app.py index e04cfb37e118..30e73fccb7df 100644 --- a/src/py/flwr/server/app.py +++ b/src/py/flwr/server/app.py @@ -16,15 +16,21 @@ import argparse import asyncio +import csv import importlib.util import sys import threading from logging import ERROR, INFO, WARN from os.path import isfile from pathlib import Path -from typing import List, Optional, Tuple +from typing import List, Optional, Sequence, Set, Tuple import grpc +from cryptography.hazmat.primitives.asymmetric import ec +from cryptography.hazmat.primitives.serialization import ( + load_ssh_private_key, + load_ssh_public_key, +) from flwr.common import GRPC_MAX_MESSAGE_LENGTH, EventType, event from flwr.common.address import parse_address @@ -35,7 +41,12 @@ TRANSPORT_TYPE_VCE, ) from flwr.common.exit_handlers import register_exit_handlers -from flwr.common.logger import log +from flwr.common.logger import log, warn_deprecated_feature +from flwr.common.secure_aggregation.crypto.symmetric_encryption import ( + private_key_to_bytes, + public_key_to_bytes, + ssh_types_to_elliptic_curve, +) from flwr.proto.fleet_pb2_grpc import ( # pylint: disable=E0611 add_FleetServicer_to_server, ) @@ -51,6 +62,7 @@ start_grpc_server, ) from .superlink.fleet.grpc_rere.fleet_servicer import FleetServicer +from .superlink.fleet.grpc_rere.server_interceptor import AuthenticateServerInterceptor from .superlink.fleet.vce import start_vce from .superlink.state import StateFactory @@ -184,6 +196,9 @@ def start_server( # pylint: disable=too-many-arguments,too-many-locals def run_driver_api() -> None: """Run Flower server (Driver API).""" log(INFO, "Starting Flower server (Driver API)") + # Running `flower-driver-api` is deprecated + warn_deprecated_feature("flower-driver-api") + log(WARN, "Use `flower-superlink` instead") event(EventType.RUN_DRIVER_API_ENTER) args = _parse_args_run_driver_api().parse_args() @@ -221,6 +236,9 @@ def run_driver_api() -> None: def run_fleet_api() -> None: """Run Flower server (Fleet API).""" log(INFO, "Starting Flower server (Fleet API)") + # Running `flower-fleet-api` is deprecated + warn_deprecated_feature("flower-fleet-api") + log(WARN, "Use `flower-superlink` instead") event(EventType.RUN_FLEET_API_ENTER) args = _parse_args_run_fleet_api().parse_args() @@ -291,9 +309,11 @@ def run_fleet_api() -> None: # pylint: disable=too-many-branches, too-many-locals, too-many-statements def run_superlink() -> None: - """Run Flower server (Driver API and Fleet API).""" - log(INFO, "Starting Flower server") + """Run Flower SuperLink (Driver API and Fleet API).""" + log(INFO, "Starting Flower SuperLink") + event(EventType.RUN_SUPERLINK_ENTER) + args = _parse_args_run_superlink().parse_args() # Parse IP address @@ -352,10 +372,33 @@ def run_superlink() -> None: sys.exit(f"Fleet IP address ({address_arg}) cannot be parsed.") host, port, is_v6 = parsed_address address = f"[{host}]:{port}" if is_v6 else f"{host}:{port}" + + maybe_keys = _try_setup_client_authentication(args, certificates) + interceptors: Optional[Sequence[grpc.ServerInterceptor]] = None + if maybe_keys is not None: + ( + client_public_keys, + server_private_key, + server_public_key, + ) = maybe_keys + state = state_factory.state() + state.store_client_public_keys(client_public_keys) + state.store_server_private_public_key( + private_key_to_bytes(server_private_key), + public_key_to_bytes(server_public_key), + ) + log( + INFO, + "Client authentication enabled with %d known public keys", + len(client_public_keys), + ) + interceptors = [AuthenticateServerInterceptor(state)] + fleet_server = _run_fleet_api_grpc_rere( address=address, state_factory=state_factory, certificates=certificates, + interceptors=interceptors, ) grpc_servers.append(fleet_server) elif args.fleet_api_type == TRANSPORT_TYPE_VCE: @@ -388,6 +431,70 @@ def run_superlink() -> None: driver_server.wait_for_termination(timeout=1) +def _try_setup_client_authentication( + args: argparse.Namespace, + certificates: Optional[Tuple[bytes, bytes, bytes]], +) -> Optional[Tuple[Set[bytes], ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey]]: + if not args.require_client_authentication: + return None + + if certificates is None: + sys.exit( + "Client authentication only works over secure connections. " + "Please provide certificate paths using '--certificates' when " + "enabling '--require-client-authentication'." + ) + + client_keys_file_path = Path(args.require_client_authentication[0]) + if not client_keys_file_path.exists(): + sys.exit( + "The provided path to the client public keys CSV file does not exist: " + f"{client_keys_file_path}. " + "Please provide the CSV file path containing known client public keys " + "to '--require-client-authentication'." + ) + + client_public_keys: Set[bytes] = set() + ssh_private_key = load_ssh_private_key( + Path(args.require_client_authentication[1]).read_bytes(), + None, + ) + ssh_public_key = load_ssh_public_key( + Path(args.require_client_authentication[2]).read_bytes() + ) + + try: + server_private_key, server_public_key = ssh_types_to_elliptic_curve( + ssh_private_key, ssh_public_key + ) + except TypeError: + sys.exit( + "The file paths provided could not be read as a private and public " + "key pair. Client authentication requires an elliptic curve public and " + "private key pair. Please provide the file paths containing elliptic " + "curve private and public keys to '--require-client-authentication'." + ) + + with open(client_keys_file_path, newline="", encoding="utf-8") as csvfile: + reader = csv.reader(csvfile) + for row in reader: + for element in row: + public_key = load_ssh_public_key(element.encode()) + if isinstance(public_key, ec.EllipticCurvePublicKey): + client_public_keys.add(public_key_to_bytes(public_key)) + else: + sys.exit( + "Error: Unable to parse the public keys in the .csv " + "file. Please ensure that the .csv file contains valid " + "SSH public keys and try again." + ) + return ( + client_public_keys, + server_private_key, + server_public_key, + ) + + def _try_obtain_certificates( args: argparse.Namespace, ) -> Optional[Tuple[bytes, bytes, bytes]]: @@ -415,6 +522,7 @@ def _run_fleet_api_grpc_rere( address: str, state_factory: StateFactory, certificates: Optional[Tuple[bytes, bytes, bytes]], + interceptors: Optional[Sequence[grpc.ServerInterceptor]] = None, ) -> grpc.Server: """Run Fleet API (gRPC, request-response).""" # Create Fleet API gRPC server @@ -427,6 +535,7 @@ def _run_fleet_api_grpc_rere( server_address=address, max_message_length=GRPC_MAX_MESSAGE_LENGTH, certificates=certificates, + interceptors=interceptors, ) log(INFO, "Flower ECE: Starting Fleet API (gRPC-rere) on %s", address) @@ -568,9 +677,7 @@ def _parse_args_run_fleet_api() -> argparse.ArgumentParser: def _parse_args_run_superlink() -> argparse.ArgumentParser: """Parse command line arguments for both Driver API and Fleet API.""" parser = argparse.ArgumentParser( - description="This will start a Flower server " - "(meaning, a Driver API and a Fleet API), " - "that clients will be able to connect to.", + description="Start a Flower SuperLink", ) _add_args_common(parser=parser) @@ -606,6 +713,15 @@ def _add_args_common(parser: argparse.ArgumentParser) -> None: "Flower will just create a state in memory.", default=DATABASE, ) + parser.add_argument( + "--require-client-authentication", + nargs=3, + metavar=("CLIENT_KEYS", "SERVER_PRIVATE_KEY", "SERVER_PUBLIC_KEY"), + type=str, + help="Provide three file paths: (1) a .csv file containing a list of " + "known client public keys for authentication, (2) the server's private " + "key file, and (3) the server's public key file.", + ) def _add_args_driver_api(parser: argparse.ArgumentParser) -> None: diff --git a/src/py/flwr/server/compat/app.py b/src/py/flwr/server/compat/app.py index ff1d99b5366e..4bb23b846ab7 100644 --- a/src/py/flwr/server/compat/app.py +++ b/src/py/flwr/server/compat/app.py @@ -15,14 +15,11 @@ """Flower driver app.""" -import sys from logging import INFO -from pathlib import Path -from typing import Optional, Union +from typing import Optional from flwr.common import EventType, event -from flwr.common.address import parse_address -from flwr.common.logger import log, warn_deprecated_feature +from flwr.common.logger import log from flwr.server.client_manager import ClientManager from flwr.server.history import History from flwr.server.server import Server, init_defaults, run_fl @@ -32,33 +29,21 @@ from ..driver import Driver from .app_utils import start_update_client_manager_thread -DEFAULT_SERVER_ADDRESS_DRIVER = "[::]:9091" - -ERROR_MESSAGE_DRIVER_NOT_CONNECTED = """ -[Driver] Error: Not connected. - -Call `connect()` on the `Driver` instance before calling any of the other `Driver` -methods. -""" - def start_driver( # pylint: disable=too-many-arguments, too-many-locals *, - server_address: str = DEFAULT_SERVER_ADDRESS_DRIVER, + driver: Driver, server: Optional[Server] = None, config: Optional[ServerConfig] = None, strategy: Optional[Strategy] = None, client_manager: Optional[ClientManager] = None, - root_certificates: Optional[Union[bytes, str]] = None, - driver: Optional[Driver] = None, ) -> History: """Start a Flower Driver API server. Parameters ---------- - server_address : Optional[str] - The IPv4 or IPv6 address of the Driver API server. - Defaults to `"[::]:8080"`. + driver : Driver + The Driver object to use. server : Optional[flwr.server.Server] (default: None) A server implementation, either `flwr.server.Server` or a subclass thereof. If no instance is provided, then `start_driver` will create @@ -74,50 +59,14 @@ def start_driver( # pylint: disable=too-many-arguments, too-many-locals An implementation of the class `flwr.server.ClientManager`. If no implementation is provided, then `start_driver` will use `flwr.server.SimpleClientManager`. - root_certificates : Optional[Union[bytes, str]] (default: None) - The PEM-encoded root certificates as a byte string or a path string. - If provided, a secure connection using the certificates will be - established to an SSL-enabled Flower server. - driver : Optional[Driver] (default: None) - The Driver object to use. Returns ------- hist : flwr.server.history.History Object containing training and evaluation metrics. - - Examples - -------- - Starting a driver that connects to an insecure server: - - >>> start_driver() - - Starting a driver that connects to an SSL-enabled server: - - >>> start_driver( - >>> root_certificates=Path("/crts/root.pem").read_bytes() - >>> ) """ event(EventType.START_DRIVER_ENTER) - if driver is None: - # Not passing a `Driver` object is deprecated - warn_deprecated_feature("start_driver") - - # Parse IP address - parsed_address = parse_address(server_address) - if not parsed_address: - sys.exit(f"Server IP address ({server_address}) cannot be parsed.") - host, port, is_v6 = parsed_address - address = f"[{host}]:{port}" if is_v6 else f"{host}:{port}" - - # Create the Driver - if isinstance(root_certificates, str): - root_certificates = Path(root_certificates).read_bytes() - driver = Driver( - driver_service_address=address, root_certificates=root_certificates - ) - # Initialize the Driver API server and config initialized_server, initialized_config = init_defaults( server=server, diff --git a/src/py/flwr/server/compat/app_utils.py b/src/py/flwr/server/compat/app_utils.py index 696ec1132c4a..1cdf1efbffb9 100644 --- a/src/py/flwr/server/compat/app_utils.py +++ b/src/py/flwr/server/compat/app_utils.py @@ -16,7 +16,6 @@ import threading -import time from typing import Dict, Tuple from ..client_manager import ClientManager @@ -60,6 +59,7 @@ def start_update_client_manager_thread( client_manager, f_stop, ), + daemon=True, ) thread.start() @@ -89,7 +89,7 @@ def _update_client_manager( for node_id in new_nodes: client_proxy = DriverClientProxy( node_id=node_id, - driver=driver.grpc_driver, # type: ignore + driver=driver, anonymous=False, run_id=driver.run_id, # type: ignore ) @@ -99,4 +99,5 @@ def _update_client_manager( raise RuntimeError("Could not register node.") # Sleep for 3 seconds - time.sleep(3) + if not f_stop.is_set(): + f_stop.wait(3) diff --git a/src/py/flwr/server/compat/app_utils_test.py b/src/py/flwr/server/compat/app_utils_test.py index 7e47e6eaaf32..023d65b0dc72 100644 --- a/src/py/flwr/server/compat/app_utils_test.py +++ b/src/py/flwr/server/compat/app_utils_test.py @@ -17,6 +17,8 @@ import time import unittest +from threading import Event +from typing import Optional from unittest.mock import Mock, patch from ..client_manager import SimpleClientManager @@ -29,9 +31,6 @@ class TestUtils(unittest.TestCase): def test_start_update_client_manager_thread(self) -> None: """Test start_update_client_manager_thread function.""" # Prepare - sleep = time.sleep - sleep_patch = patch("time.sleep", lambda x: sleep(x / 100)) - sleep_patch.start() expected_node_ids = list(range(100)) updated_expected_node_ids = list(range(80, 120)) driver = Mock() @@ -39,20 +38,30 @@ def test_start_update_client_manager_thread(self) -> None: driver.run_id = 123 driver.get_node_ids.return_value = expected_node_ids client_manager = SimpleClientManager() + original_wait = Event.wait + + def custom_wait(self: Event, timeout: Optional[float] = None) -> None: + if timeout is not None: + timeout /= 100 + original_wait(self, timeout) # Execute - thread, f_stop = start_update_client_manager_thread(driver, client_manager) - # Wait until all nodes are registered via `client_manager.sample()` - client_manager.sample(len(expected_node_ids)) - # Retrieve all nodes in `client_manager` - node_ids = {proxy.node_id for proxy in client_manager.all().values()} - # Update the GetNodesResponse and wait until the `client_manager` is updated - driver.get_node_ids.return_value = updated_expected_node_ids - sleep(0.1) - # Retrieve all nodes in `client_manager` - updated_node_ids = {proxy.node_id for proxy in client_manager.all().values()} - # Stop the thread - f_stop.set() + # Patching Event.wait with our custom function + with patch.object(Event, "wait", new=custom_wait): + thread, f_stop = start_update_client_manager_thread(driver, client_manager) + # Wait until all nodes are registered via `client_manager.sample()` + client_manager.sample(len(expected_node_ids)) + # Retrieve all nodes in `client_manager` + node_ids = {proxy.node_id for proxy in client_manager.all().values()} + # Update the GetNodesResponse and wait until the `client_manager` is updated + driver.get_node_ids.return_value = updated_expected_node_ids + time.sleep(0.1) + # Retrieve all nodes in `client_manager` + updated_node_ids = { + proxy.node_id for proxy in client_manager.all().values() + } + # Stop the thread + f_stop.set() # Assert assert node_ids == set(expected_node_ids) diff --git a/src/py/flwr/server/compat/driver_client_proxy.py b/src/py/flwr/server/compat/driver_client_proxy.py index 99ba50d3e2d1..150803786f98 100644 --- a/src/py/flwr/server/compat/driver_client_proxy.py +++ b/src/py/flwr/server/compat/driver_client_proxy.py @@ -16,16 +16,14 @@ import time -from typing import List, Optional +from typing import Optional from flwr import common -from flwr.common import DEFAULT_TTL, MessageType, MessageTypeLegacy, RecordSet +from flwr.common import Message, MessageType, MessageTypeLegacy, RecordSet from flwr.common import recordset_compat as compat -from flwr.common import serde -from flwr.proto import driver_pb2, node_pb2, task_pb2 # pylint: disable=E0611 from flwr.server.client_proxy import ClientProxy -from ..driver.grpc_driver import GrpcDriver +from ..driver.driver import Driver SLEEP_TIME = 1 @@ -33,7 +31,7 @@ class DriverClientProxy(ClientProxy): """Flower client proxy which delegates work using the Driver API.""" - def __init__(self, node_id: int, driver: GrpcDriver, anonymous: bool, run_id: int): + def __init__(self, node_id: int, driver: Driver, anonymous: bool, run_id: int): super().__init__(str(node_id)) self.node_id = node_id self.driver = driver @@ -114,56 +112,38 @@ def _send_receive_recordset( timeout: Optional[float], group_id: Optional[int], ) -> RecordSet: - task_ins = task_pb2.TaskIns( # pylint: disable=E1101 - task_id="", - group_id=str(group_id) if group_id is not None else "", - run_id=self.run_id, - task=task_pb2.Task( # pylint: disable=E1101 - producer=node_pb2.Node( # pylint: disable=E1101 - node_id=0, - anonymous=True, - ), - consumer=node_pb2.Node( # pylint: disable=E1101 - node_id=self.node_id, - anonymous=self.anonymous, - ), - task_type=task_type, - recordset=serde.recordset_to_proto(recordset), - ttl=DEFAULT_TTL, - ), - ) - push_task_ins_req = driver_pb2.PushTaskInsRequest( # pylint: disable=E1101 - task_ins_list=[task_ins] - ) - # Send TaskIns to Driver API - push_task_ins_res = self.driver.push_task_ins(req=push_task_ins_req) + # Create message + message = self.driver.create_message( + content=recordset, + message_type=task_type, + dst_node_id=self.node_id, + group_id=str(group_id) if group_id else "", + ttl=timeout, + ) - if len(push_task_ins_res.task_ids) != 1: - raise ValueError("Unexpected number of task_ids") + # Push message + message_ids = list(self.driver.push_messages(messages=[message])) + if len(message_ids) != 1: + raise ValueError("Unexpected number of message_ids") - task_id = push_task_ins_res.task_ids[0] - if task_id == "": - raise ValueError(f"Failed to schedule task for node {self.node_id}") + message_id = message_ids[0] + if message_id == "": + raise ValueError(f"Failed to send message to node {self.node_id}") if timeout: start_time = time.time() while True: - pull_task_res_req = driver_pb2.PullTaskResRequest( # pylint: disable=E1101 - node=node_pb2.Node(node_id=0, anonymous=True), # pylint: disable=E1101 - task_ids=[task_id], - ) - - # Ask Driver API for TaskRes - pull_task_res_res = self.driver.pull_task_res(req=pull_task_res_req) - - task_res_list: List[task_pb2.TaskRes] = list( # pylint: disable=E1101 - pull_task_res_res.task_res_list - ) - if len(task_res_list) == 1: - task_res = task_res_list[0] - return serde.recordset_from_proto(task_res.task.recordset) + messages = list(self.driver.pull_messages(message_ids)) + if len(messages) == 1: + msg: Message = messages[0] + if msg.has_error(): + raise ValueError( + f"Message contains an Error (reason: {msg.error.reason}). " + "It originated during client-side execution of a message." + ) + return msg.content if timeout is not None and time.time() > start_time + timeout: raise RuntimeError("Timeout reached") diff --git a/src/py/flwr/server/compat/driver_client_proxy_test.py b/src/py/flwr/server/compat/driver_client_proxy_test.py index 3494049c1064..d9e3d3bc0824 100644 --- a/src/py/flwr/server/compat/driver_client_proxy_test.py +++ b/src/py/flwr/server/compat/driver_client_proxy_test.py @@ -16,59 +16,43 @@ import unittest -from typing import Union, cast -from unittest.mock import MagicMock +import unittest.mock +from typing import Any, Callable, Iterable, Optional, Union, cast +from unittest.mock import Mock import numpy as np import flwr +from flwr.common import Error, Message, Metadata, RecordSet from flwr.common import recordset_compat as compat -from flwr.common import serde -from flwr.common.constant import MessageType, MessageTypeLegacy from flwr.common.typing import ( Code, Config, EvaluateIns, EvaluateRes, + FitIns, FitRes, GetParametersIns, GetParametersRes, + GetPropertiesIns, GetPropertiesRes, Parameters, Properties, Status, ) -from flwr.proto import driver_pb2, node_pb2, task_pb2 # pylint: disable=E0611 - -from .driver_client_proxy import DriverClientProxy +from flwr.server.compat.driver_client_proxy import DriverClientProxy MESSAGE_PARAMETERS = Parameters(tensors=[b"abc"], tensor_type="np") CLIENT_PROPERTIES = cast(Properties, {"tensor_type": "numpy.ndarray"}) CLIENT_STATUS = Status(code=Code.OK, message="OK") +ERROR_REPLY = Error(code=0, reason="mock error") -def _make_task( - res: Union[GetParametersRes, GetPropertiesRes, FitRes, EvaluateRes] -) -> task_pb2.Task: # pylint: disable=E1101 - if isinstance(res, GetParametersRes): - message_type = MessageTypeLegacy.GET_PARAMETERS - recordset = compat.getparametersres_to_recordset(res, True) - elif isinstance(res, GetPropertiesRes): - message_type = MessageTypeLegacy.GET_PROPERTIES - recordset = compat.getpropertiesres_to_recordset(res) - elif isinstance(res, FitRes): - message_type = MessageType.TRAIN - recordset = compat.fitres_to_recordset(res, True) - elif isinstance(res, EvaluateRes): - message_type = MessageType.EVALUATE - recordset = compat.evaluateres_to_recordset(res) - else: - raise ValueError(f"Unsupported type: {type(res)}") - return task_pb2.Task( # pylint: disable=E1101 - task_type=message_type, - recordset=serde.recordset_to_proto(recordset), - ) +RUN_ID = 61016 +NODE_ID = 1 +INSTRUCTION_MESSAGE_ID = "mock instruction message id" +REPLY_MESSAGE_ID = "mock reply message id" class DriverClientProxyTestCase(unittest.TestCase): @@ -76,170 +60,232 @@ class DriverClientProxyTestCase(unittest.TestCase): def setUp(self) -> None: """Set up mocks for tests.""" - self.driver = MagicMock() - self.driver.get_nodes.return_value = ( - driver_pb2.GetNodesResponse( # pylint: disable=E1101 - nodes=[ - node_pb2.Node(node_id=1, anonymous=False) # pylint: disable=E1101 - ] - ) + driver = Mock() + driver.get_node_ids.return_value = [1] + driver.create_message.side_effect = self._create_message_dummy + client = DriverClientProxy( + node_id=NODE_ID, driver=driver, anonymous=False, run_id=61016 ) + self.driver = driver + self.client = client + self.created_msg: Optional[Message] = None + self.called_times: int = 0 + def test_get_properties(self) -> None: """Test positive case.""" # Prepare - self.driver.push_task_ins.return_value = ( - driver_pb2.PushTaskInsResponse( # pylint: disable=E1101 - task_ids=["19341fd7-62e1-4eb4-beb4-9876d3acda32"] - ) - ) - self.driver.pull_task_res.return_value = ( - driver_pb2.PullTaskResResponse( # pylint: disable=E1101 - task_res_list=[ - task_pb2.TaskRes( # pylint: disable=E1101 - task_id="554bd3c8-8474-4b93-a7db-c7bec1bf0012", - group_id=str(0), - run_id=0, - task=_make_task( - GetPropertiesRes( - status=CLIENT_STATUS, properties=CLIENT_PROPERTIES - ) - ), - ) - ] - ) - ) - client = DriverClientProxy( - node_id=1, driver=self.driver, anonymous=True, run_id=0 - ) + res = GetPropertiesRes(status=CLIENT_STATUS, properties=CLIENT_PROPERTIES) + self.driver.push_messages.side_effect = self._get_push_messages(res) request_properties: Config = {"tensor_type": "str"} - ins: flwr.common.GetPropertiesIns = flwr.common.GetPropertiesIns( - config=request_properties - ) + ins = GetPropertiesIns(config=request_properties) # Execute - value: flwr.common.GetPropertiesRes = client.get_properties( - ins, timeout=None, group_id=0 - ) + value = self.client.get_properties(ins, timeout=None, group_id=0) # Assert - assert value.properties["tensor_type"] == "numpy.ndarray" + self._common_assertions(ins) + self.assertEqual(value.properties["tensor_type"], "numpy.ndarray") def test_get_parameters(self) -> None: """Test positive case.""" # Prepare - self.driver.push_task_ins.return_value = ( - driver_pb2.PushTaskInsResponse( # pylint: disable=E1101 - task_ids=["19341fd7-62e1-4eb4-beb4-9876d3acda32"] - ) - ) - self.driver.pull_task_res.return_value = ( - driver_pb2.PullTaskResResponse( # pylint: disable=E1101 - task_res_list=[ - task_pb2.TaskRes( # pylint: disable=E1101 - task_id="554bd3c8-8474-4b93-a7db-c7bec1bf0012", - group_id=str(0), - run_id=0, - task=_make_task( - GetParametersRes( - status=CLIENT_STATUS, - parameters=MESSAGE_PARAMETERS, - ) - ), - ) - ] - ) + res = GetParametersRes( + status=CLIENT_STATUS, + parameters=MESSAGE_PARAMETERS, ) - client = DriverClientProxy( - node_id=1, driver=self.driver, anonymous=True, run_id=0 - ) - get_parameters_ins = GetParametersIns(config={}) + self.driver.push_messages.side_effect = self._get_push_messages(res) + ins = GetParametersIns(config={}) # Execute - value: flwr.common.GetParametersRes = client.get_parameters( - ins=get_parameters_ins, timeout=None, group_id=0 - ) + value = self.client.get_parameters(ins, timeout=None, group_id=0) # Assert - assert value.parameters.tensors[0] == b"abc" + self._common_assertions(ins) + self.assertEqual(value, res) def test_fit(self) -> None: """Test positive case.""" # Prepare - self.driver.push_task_ins.return_value = ( - driver_pb2.PushTaskInsResponse( # pylint: disable=E1101 - task_ids=["19341fd7-62e1-4eb4-beb4-9876d3acda32"] - ) - ) - self.driver.pull_task_res.return_value = ( - driver_pb2.PullTaskResResponse( # pylint: disable=E1101 - task_res_list=[ - task_pb2.TaskRes( # pylint: disable=E1101 - task_id="554bd3c8-8474-4b93-a7db-c7bec1bf0012", - group_id=str(1), - run_id=0, - task=_make_task( - FitRes( - status=CLIENT_STATUS, - parameters=MESSAGE_PARAMETERS, - num_examples=10, - metrics={}, - ) - ), - ) - ] - ) - ) - client = DriverClientProxy( - node_id=1, driver=self.driver, anonymous=True, run_id=0 + res = FitRes( + status=CLIENT_STATUS, + parameters=MESSAGE_PARAMETERS, + num_examples=10, + metrics={}, ) + self.driver.push_messages.side_effect = self._get_push_messages(res) parameters = flwr.common.ndarrays_to_parameters([np.ones((2, 2))]) - ins: flwr.common.FitIns = flwr.common.FitIns(parameters, {}) + ins = FitIns(parameters, {}) # Execute - fit_res = client.fit(ins=ins, timeout=None, group_id=1) + value = self.client.fit(ins=ins, timeout=None, group_id=0) # Assert - assert fit_res.parameters.tensor_type == "np" - assert fit_res.parameters.tensors[0] == b"abc" - assert fit_res.num_examples == 10 + self._common_assertions(ins) + self.assertEqual(value, res) def test_evaluate(self) -> None: """Test positive case.""" # Prepare - self.driver.push_task_ins.return_value = ( - driver_pb2.PushTaskInsResponse( # pylint: disable=E1101 - task_ids=["19341fd7-62e1-4eb4-beb4-9876d3acda32"] - ) - ) - self.driver.pull_task_res.return_value = ( - driver_pb2.PullTaskResResponse( # pylint: disable=E1101 - task_res_list=[ - task_pb2.TaskRes( # pylint: disable=E1101 - task_id="554bd3c8-8474-4b93-a7db-c7bec1bf0012", - group_id=str(1), - run_id=0, - task=_make_task( - EvaluateRes( - status=CLIENT_STATUS, - loss=0.0, - num_examples=0, - metrics={}, - ) - ), - ) - ] - ) + res = EvaluateRes( + status=CLIENT_STATUS, + loss=0.0, + num_examples=0, + metrics={}, ) - client = DriverClientProxy( - node_id=1, driver=self.driver, anonymous=True, run_id=0 - ) - parameters = Parameters(tensors=[], tensor_type="np") - evaluate_ins = EvaluateIns(parameters, {}) + self.driver.push_messages.side_effect = self._get_push_messages(res) + parameters = Parameters(tensors=[b"random params%^&*F"], tensor_type="np") + ins = EvaluateIns(parameters, {}) # Execute - evaluate_res = client.evaluate(evaluate_ins, timeout=None, group_id=1) + value = self.client.evaluate(ins, timeout=None, group_id=0) # Assert - assert 0.0 == evaluate_res.loss - assert 0 == evaluate_res.num_examples + self._common_assertions(ins) + self.assertEqual(value, res) + + def test_get_properties_and_fail(self) -> None: + """Test negative case.""" + # Prepare + self.driver.push_messages.side_effect = self._get_push_messages( + None, error_reply=True + ) + request_properties: Config = {"tensor_type": "str"} + ins = GetPropertiesIns(config=request_properties) + + # Execute and assert + self.assertRaises( + Exception, self.client.get_properties, ins, timeout=None, group_id=0 + ) + self._common_assertions(ins) + + def test_get_parameters_and_fail(self) -> None: + """Test negative case.""" + # Prepare + self.driver.push_messages.side_effect = self._get_push_messages( + None, error_reply=True + ) + ins = GetParametersIns(config={}) + + # Execute and assert + self.assertRaises( + Exception, self.client.get_parameters, ins, timeout=None, group_id=0 + ) + self._common_assertions(ins) + + def test_fit_and_fail(self) -> None: + """Test negative case.""" + # Prepare + self.driver.push_messages.side_effect = self._get_push_messages( + None, error_reply=True + ) + parameters = flwr.common.ndarrays_to_parameters([np.ones((2, 2))]) + ins = FitIns(parameters, {}) + + # Execute and assert + self.assertRaises(Exception, self.client.fit, ins, timeout=None, group_id=0) + self._common_assertions(ins) + + def test_evaluate_and_fail(self) -> None: + """Test negative case.""" + # Prepare + self.driver.push_messages.side_effect = self._get_push_messages( + None, error_reply=True + ) + parameters = Parameters(tensors=[b"random params%^&*F"], tensor_type="np") + ins = EvaluateIns(parameters, {}) + + # Execute and assert + self.assertRaises( + Exception, self.client.evaluate, ins, timeout=None, group_id=0 + ) + self._common_assertions(ins) + + def _create_message_dummy( # pylint: disable=R0913 + self, + content: RecordSet, + message_type: str, + dst_node_id: int, + group_id: str, + ttl: Optional[float] = None, + ) -> Message: + """Create a new message. + + This is a method for the Mock object. + """ + self.called_times += 1 + ttl_ = 123456 if ttl is None else ttl + metadata = Metadata( + run_id=RUN_ID, + message_id="", # Will be set by the server + src_node_id=0, + dst_node_id=dst_node_id, + reply_to_message="", + group_id=group_id, + ttl=ttl_, + message_type=message_type, + ) + self.created_msg = Message(metadata=metadata, content=content) + return self.created_msg + + def _get_push_messages( + self, + res: Union[GetParametersRes, GetPropertiesRes, FitRes, EvaluateRes, None], + error_reply: bool = False, + ) -> Callable[[Iterable[Message]], Iterable[str]]: + """Get the push_messages function that sets the return value of pull_messages + when called.""" + + def push_messages(messages: Iterable[Message]) -> Iterable[str]: + msg = list(messages)[0] + if error_reply: + recordset = None + ret = msg.create_error_reply(ERROR_REPLY) + elif isinstance(res, GetParametersRes): + recordset = compat.getparametersres_to_recordset(res, True) + elif isinstance(res, GetPropertiesRes): + recordset = compat.getpropertiesres_to_recordset(res) + elif isinstance(res, FitRes): + recordset = compat.fitres_to_recordset(res, True) + elif isinstance(res, EvaluateRes): + recordset = compat.evaluateres_to_recordset(res) + else: + raise ValueError(f"Unsupported type: {type(res)}") + if recordset is not None: + ret = msg.create_reply(recordset) + ret.metadata.__dict__["_message_id"] = REPLY_MESSAGE_ID + + # Set the return value of `pull_messages` + self.driver.pull_messages.return_value = [ret] + return [INSTRUCTION_MESSAGE_ID] + + return push_messages + + def _common_assertions(self, original_ins: Any) -> None: + """Check common assertions.""" + # Check if the created message contains the orignal *Ins + assert self.created_msg is not None + actual_ins = { # type: ignore + GetPropertiesIns: compat.recordset_to_getpropertiesins, + GetParametersIns: compat.recordset_to_getparametersins, + FitIns: (lambda x: compat.recordset_to_fitins(x, True)), + EvaluateIns: (lambda x: compat.recordset_to_evaluateins(x, True)), + }[type(original_ins)](self.created_msg.content) + self.assertEqual(self.called_times, 1) + self.assertEqual(actual_ins, original_ins) + + # Check if push_messages is called once with expected args/kwargs. + self.driver.push_messages.assert_called_once() + try: + self.driver.push_messages.assert_any_call([self.created_msg]) + except AssertionError: + self.driver.push_messages.assert_any_call(messages=[self.created_msg]) + + # Check if pull_messages is called once with expected args/kwargs. + self.driver.pull_messages.assert_called_once() + try: + self.driver.pull_messages.assert_called_with([INSTRUCTION_MESSAGE_ID]) + except AssertionError: + self.driver.pull_messages.assert_called_with( + message_ids=[INSTRUCTION_MESSAGE_ID] + ) diff --git a/src/py/flwr/server/driver/__init__.py b/src/py/flwr/server/driver/__init__.py index b61f6eebf6a8..deaddff77702 100644 --- a/src/py/flwr/server/driver/__init__.py +++ b/src/py/flwr/server/driver/__init__.py @@ -17,8 +17,10 @@ from .driver import Driver from .grpc_driver import GrpcDriver +from .inmemory_driver import InMemoryDriver __all__ = [ "Driver", "GrpcDriver", + "InMemoryDriver", ] diff --git a/src/py/flwr/server/driver/driver.py b/src/py/flwr/server/driver/driver.py index afebd90ea265..b95cec95ab47 100644 --- a/src/py/flwr/server/driver/driver.py +++ b/src/py/flwr/server/driver/driver.py @@ -1,4 +1,4 @@ -# Copyright 2022 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,86 +12,26 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Flower driver service client.""" +"""Driver (abstract base class).""" -import time -from typing import Iterable, List, Optional, Tuple +from abc import ABC, abstractmethod +from typing import Iterable, List, Optional -from flwr.common import DEFAULT_TTL, Message, Metadata, RecordSet -from flwr.common.serde import message_from_taskres, message_to_taskins -from flwr.proto.driver_pb2 import ( # pylint: disable=E0611 - CreateRunRequest, - GetNodesRequest, - PullTaskResRequest, - PushTaskInsRequest, -) -from flwr.proto.node_pb2 import Node # pylint: disable=E0611 -from flwr.proto.task_pb2 import TaskIns # pylint: disable=E0611 +from flwr.common import Message, RecordSet -from .grpc_driver import DEFAULT_SERVER_ADDRESS_DRIVER, GrpcDriver +class Driver(ABC): + """Abstract base Driver class for the Driver API.""" -class Driver: - """`Driver` class provides an interface to the Driver API. - - Parameters - ---------- - driver_service_address : Optional[str] - The IPv4 or IPv6 address of the Driver API server. - Defaults to `"[::]:9091"`. - certificates : bytes (default: None) - Tuple containing root certificate, server certificate, and private key - to start a secure SSL-enabled server. The tuple is expected to have - three bytes elements in the following order: - - * CA certificate. - * server certificate. - * server private key. - """ - - def __init__( - self, - driver_service_address: str = DEFAULT_SERVER_ADDRESS_DRIVER, - root_certificates: Optional[bytes] = None, - ) -> None: - self.addr = driver_service_address - self.root_certificates = root_certificates - self.grpc_driver: Optional[GrpcDriver] = None - self.run_id: Optional[int] = None - self.node = Node(node_id=0, anonymous=True) - - def _get_grpc_driver_and_run_id(self) -> Tuple[GrpcDriver, int]: - # Check if the GrpcDriver is initialized - if self.grpc_driver is None or self.run_id is None: - # Connect and create run - self.grpc_driver = GrpcDriver( - driver_service_address=self.addr, - root_certificates=self.root_certificates, - ) - self.grpc_driver.connect() - res = self.grpc_driver.create_run(CreateRunRequest()) - self.run_id = res.run_id - return self.grpc_driver, self.run_id - - def _check_message(self, message: Message) -> None: - # Check if the message is valid - if not ( - message.metadata.run_id == self.run_id - and message.metadata.src_node_id == self.node.node_id - and message.metadata.message_id == "" - and message.metadata.reply_to_message == "" - and message.metadata.ttl > 0 - ): - raise ValueError(f"Invalid message: {message}") - + @abstractmethod def create_message( # pylint: disable=too-many-arguments self, content: RecordSet, message_type: str, dst_node_id: int, group_id: str, - ttl: float = DEFAULT_TTL, + ttl: Optional[float] = None, ) -> Message: """Create a new message with specified parameters. @@ -111,36 +51,23 @@ def create_message( # pylint: disable=too-many-arguments group_id : str The ID of the group to which this message is associated. In some settings, this is used as the FL round. - ttl : float (default: common.DEFAULT_TTL) + ttl : Optional[float] (default: None) Time-to-live for the round trip of this message, i.e., the time from sending this message to receiving a reply. It specifies in seconds the duration for - which the message and its potential reply are considered valid. + which the message and its potential reply are considered valid. If unset, + the default TTL (i.e., `common.DEFAULT_TTL`) will be used. Returns ------- message : Message A new `Message` instance with the specified content and metadata. """ - _, run_id = self._get_grpc_driver_and_run_id() - metadata = Metadata( - run_id=run_id, - message_id="", # Will be set by the server - src_node_id=self.node.node_id, - dst_node_id=dst_node_id, - reply_to_message="", - group_id=group_id, - ttl=ttl, - message_type=message_type, - ) - return Message(metadata=metadata, content=content) + @abstractmethod def get_node_ids(self) -> List[int]: """Get node IDs.""" - grpc_driver, run_id = self._get_grpc_driver_and_run_id() - # Call GrpcDriver method - res = grpc_driver.get_nodes(GetNodesRequest(run_id=run_id)) - return [node.node_id for node in res.nodes] + @abstractmethod def push_messages(self, messages: Iterable[Message]) -> Iterable[str]: """Push messages to specified node IDs. @@ -158,20 +85,8 @@ def push_messages(self, messages: Iterable[Message]) -> Iterable[str]: An iterable of IDs for the messages that were sent, which can be used to pull replies. """ - grpc_driver, _ = self._get_grpc_driver_and_run_id() - # Construct TaskIns - task_ins_list: List[TaskIns] = [] - for msg in messages: - # Check message - self._check_message(msg) - # Convert Message to TaskIns - taskins = message_to_taskins(msg) - # Add to list - task_ins_list.append(taskins) - # Call GrpcDriver method - res = grpc_driver.push_task_ins(PushTaskInsRequest(task_ins_list=task_ins_list)) - return list(res.task_ids) + @abstractmethod def pull_messages(self, message_ids: Iterable[str]) -> Iterable[Message]: """Pull messages based on message IDs. @@ -188,15 +103,8 @@ def pull_messages(self, message_ids: Iterable[str]) -> Iterable[Message]: messages : Iterable[Message] An iterable of messages received. """ - grpc_driver, _ = self._get_grpc_driver_and_run_id() - # Pull TaskRes - res = grpc_driver.pull_task_res( - PullTaskResRequest(node=self.node, task_ids=message_ids) - ) - # Convert TaskRes to Message - msgs = [message_from_taskres(taskres) for taskres in res.task_res_list] - return msgs + @abstractmethod def send_and_receive( self, messages: Iterable[Message], @@ -230,28 +138,3 @@ def send_and_receive( replies for all sent messages. A message remains valid until its TTL, which is not affected by `timeout`. """ - # Push messages - msg_ids = set(self.push_messages(messages)) - - # Pull messages - end_time = time.time() + (timeout if timeout is not None else 0.0) - ret: List[Message] = [] - while timeout is None or time.time() < end_time: - res_msgs = self.pull_messages(msg_ids) - ret.extend(res_msgs) - msg_ids.difference_update( - {msg.metadata.reply_to_message for msg in res_msgs} - ) - if len(msg_ids) == 0: - break - # Sleep - time.sleep(3) - return ret - - def close(self) -> None: - """Disconnect from the SuperLink if connected.""" - # Check if GrpcDriver is initialized - if self.grpc_driver is None: - return - # Disconnect - self.grpc_driver.disconnect() diff --git a/src/py/flwr/server/driver/grpc_driver.py b/src/py/flwr/server/driver/grpc_driver.py index b6e2b2602cd5..d339f1b232f9 100644 --- a/src/py/flwr/server/driver/grpc_driver.py +++ b/src/py/flwr/server/driver/grpc_driver.py @@ -1,4 +1,4 @@ -# Copyright 2023 Flower Labs GmbH. All Rights Reserved. +# Copyright 2022 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,17 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Flower driver service client.""" - +"""Flower gRPC Driver.""" +import time +import warnings from logging import DEBUG, ERROR, WARNING -from typing import Optional +from typing import Iterable, List, Optional, Tuple import grpc -from flwr.common import EventType, event +from flwr.common import DEFAULT_TTL, EventType, Message, Metadata, RecordSet, event from flwr.common.grpc import create_channel from flwr.common.logger import log +from flwr.common.serde import message_from_taskres, message_to_taskins from flwr.proto.driver_pb2 import ( # pylint: disable=E0611 CreateRunRequest, CreateRunResponse, @@ -34,19 +36,23 @@ PushTaskInsResponse, ) from flwr.proto.driver_pb2_grpc import DriverStub # pylint: disable=E0611 +from flwr.proto.node_pb2 import Node # pylint: disable=E0611 +from flwr.proto.task_pb2 import TaskIns # pylint: disable=E0611 + +from .driver import Driver DEFAULT_SERVER_ADDRESS_DRIVER = "[::]:9091" ERROR_MESSAGE_DRIVER_NOT_CONNECTED = """ [Driver] Error: Not connected. -Call `connect()` on the `GrpcDriver` instance before calling any of the other -`GrpcDriver` methods. +Call `connect()` on the `GrpcDriverHelper` instance before calling any of the other +`GrpcDriverHelper` methods. """ -class GrpcDriver: - """`GrpcDriver` provides access to the gRPC Driver API/service.""" +class GrpcDriverHelper: + """`GrpcDriverHelper` provides access to the gRPC Driver API/service.""" def __init__( self, @@ -89,7 +95,7 @@ def create_run(self, req: CreateRunRequest) -> CreateRunResponse: # Check if channel is open if self.stub is None: log(ERROR, ERROR_MESSAGE_DRIVER_NOT_CONNECTED) - raise ConnectionError("`GrpcDriver` instance not connected") + raise ConnectionError("`GrpcDriverHelper` instance not connected") # Call Driver API res: CreateRunResponse = self.stub.CreateRun(request=req) @@ -100,7 +106,7 @@ def get_nodes(self, req: GetNodesRequest) -> GetNodesResponse: # Check if channel is open if self.stub is None: log(ERROR, ERROR_MESSAGE_DRIVER_NOT_CONNECTED) - raise ConnectionError("`GrpcDriver` instance not connected") + raise ConnectionError("`GrpcDriverHelper` instance not connected") # Call gRPC Driver API res: GetNodesResponse = self.stub.GetNodes(request=req) @@ -111,7 +117,7 @@ def push_task_ins(self, req: PushTaskInsRequest) -> PushTaskInsResponse: # Check if channel is open if self.stub is None: log(ERROR, ERROR_MESSAGE_DRIVER_NOT_CONNECTED) - raise ConnectionError("`GrpcDriver` instance not connected") + raise ConnectionError("`GrpcDriverHelper` instance not connected") # Call gRPC Driver API res: PushTaskInsResponse = self.stub.PushTaskIns(request=req) @@ -122,8 +128,188 @@ def pull_task_res(self, req: PullTaskResRequest) -> PullTaskResResponse: # Check if channel is open if self.stub is None: log(ERROR, ERROR_MESSAGE_DRIVER_NOT_CONNECTED) - raise ConnectionError("`GrpcDriver` instance not connected") + raise ConnectionError("`GrpcDriverHelper` instance not connected") # Call Driver API res: PullTaskResResponse = self.stub.PullTaskRes(request=req) return res + + +class GrpcDriver(Driver): + """`Driver` class provides an interface to the Driver API. + + Parameters + ---------- + driver_service_address : Optional[str] + The IPv4 or IPv6 address of the Driver API server. + Defaults to `"[::]:9091"`. + certificates : bytes (default: None) + Tuple containing root certificate, server certificate, and private key + to start a secure SSL-enabled server. The tuple is expected to have + three bytes elements in the following order: + + * CA certificate. + * server certificate. + * server private key. + fab_id : str (default: None) + The identifier of the FAB used in the run. + fab_version : str (default: None) + The version of the FAB used in the run. + """ + + def __init__( + self, + driver_service_address: str = DEFAULT_SERVER_ADDRESS_DRIVER, + root_certificates: Optional[bytes] = None, + fab_id: Optional[str] = None, + fab_version: Optional[str] = None, + ) -> None: + self.addr = driver_service_address + self.root_certificates = root_certificates + self.driver_helper: Optional[GrpcDriverHelper] = None + self.run_id: Optional[int] = None + self.fab_id = fab_id if fab_id is not None else "" + self.fab_version = fab_version if fab_version is not None else "" + self.node = Node(node_id=0, anonymous=True) + + def _get_grpc_driver_helper_and_run_id(self) -> Tuple[GrpcDriverHelper, int]: + # Check if the GrpcDriverHelper is initialized + if self.driver_helper is None or self.run_id is None: + # Connect and create run + self.driver_helper = GrpcDriverHelper( + driver_service_address=self.addr, + root_certificates=self.root_certificates, + ) + self.driver_helper.connect() + req = CreateRunRequest(fab_id=self.fab_id, fab_version=self.fab_version) + res = self.driver_helper.create_run(req) + self.run_id = res.run_id + return self.driver_helper, self.run_id + + def _check_message(self, message: Message) -> None: + # Check if the message is valid + if not ( + message.metadata.run_id == self.run_id + and message.metadata.src_node_id == self.node.node_id + and message.metadata.message_id == "" + and message.metadata.reply_to_message == "" + and message.metadata.ttl > 0 + ): + raise ValueError(f"Invalid message: {message}") + + def create_message( # pylint: disable=too-many-arguments + self, + content: RecordSet, + message_type: str, + dst_node_id: int, + group_id: str, + ttl: Optional[float] = None, + ) -> Message: + """Create a new message with specified parameters. + + This method constructs a new `Message` with given content and metadata. + The `run_id` and `src_node_id` will be set automatically. + """ + _, run_id = self._get_grpc_driver_helper_and_run_id() + if ttl: + warnings.warn( + "A custom TTL was set, but note that the SuperLink does not enforce " + "the TTL yet. The SuperLink will start enforcing the TTL in a future " + "version of Flower.", + stacklevel=2, + ) + + ttl_ = DEFAULT_TTL if ttl is None else ttl + metadata = Metadata( + run_id=run_id, + message_id="", # Will be set by the server + src_node_id=self.node.node_id, + dst_node_id=dst_node_id, + reply_to_message="", + group_id=group_id, + ttl=ttl_, + message_type=message_type, + ) + return Message(metadata=metadata, content=content) + + def get_node_ids(self) -> List[int]: + """Get node IDs.""" + grpc_driver_helper, run_id = self._get_grpc_driver_helper_and_run_id() + # Call GrpcDriverHelper method + res = grpc_driver_helper.get_nodes(GetNodesRequest(run_id=run_id)) + return [node.node_id for node in res.nodes] + + def push_messages(self, messages: Iterable[Message]) -> Iterable[str]: + """Push messages to specified node IDs. + + This method takes an iterable of messages and sends each message + to the node specified in `dst_node_id`. + """ + grpc_driver_helper, _ = self._get_grpc_driver_helper_and_run_id() + # Construct TaskIns + task_ins_list: List[TaskIns] = [] + for msg in messages: + # Check message + self._check_message(msg) + # Convert Message to TaskIns + taskins = message_to_taskins(msg) + # Add to list + task_ins_list.append(taskins) + # Call GrpcDriverHelper method + res = grpc_driver_helper.push_task_ins( + PushTaskInsRequest(task_ins_list=task_ins_list) + ) + return list(res.task_ids) + + def pull_messages(self, message_ids: Iterable[str]) -> Iterable[Message]: + """Pull messages based on message IDs. + + This method is used to collect messages from the SuperLink that correspond to a + set of given message IDs. + """ + grpc_driver, _ = self._get_grpc_driver_helper_and_run_id() + # Pull TaskRes + res = grpc_driver.pull_task_res( + PullTaskResRequest(node=self.node, task_ids=message_ids) + ) + # Convert TaskRes to Message + msgs = [message_from_taskres(taskres) for taskres in res.task_res_list] + return msgs + + def send_and_receive( + self, + messages: Iterable[Message], + *, + timeout: Optional[float] = None, + ) -> Iterable[Message]: + """Push messages to specified node IDs and pull the reply messages. + + This method sends a list of messages to their destination node IDs and then + waits for the replies. It continues to pull replies until either all replies are + received or the specified timeout duration is exceeded. + """ + # Push messages + msg_ids = set(self.push_messages(messages)) + + # Pull messages + end_time = time.time() + (timeout if timeout is not None else 0.0) + ret: List[Message] = [] + while timeout is None or time.time() < end_time: + res_msgs = self.pull_messages(msg_ids) + ret.extend(res_msgs) + msg_ids.difference_update( + {msg.metadata.reply_to_message for msg in res_msgs} + ) + if len(msg_ids) == 0: + break + # Sleep + time.sleep(3) + return ret + + def close(self) -> None: + """Disconnect from the SuperLink if connected.""" + # Check if GrpcDriverHelper is initialized + if self.driver_helper is None: + return + # Disconnect + self.driver_helper.disconnect() diff --git a/src/py/flwr/server/driver/driver_test.py b/src/py/flwr/server/driver/grpc_driver_test.py similarity index 73% rename from src/py/flwr/server/driver/driver_test.py rename to src/py/flwr/server/driver/grpc_driver_test.py index 3f1cd552250f..fbead0e3043d 100644 --- a/src/py/flwr/server/driver/driver_test.py +++ b/src/py/flwr/server/driver/grpc_driver_test.py @@ -29,49 +29,50 @@ ) from flwr.proto.task_pb2 import Task, TaskRes # pylint: disable=E0611 -from .driver import Driver +from .grpc_driver import GrpcDriver -class TestDriver(unittest.TestCase): - """Tests for `Driver` class.""" +class TestGrpcDriver(unittest.TestCase): + """Tests for `GrpcDriver` class.""" def setUp(self) -> None: - """Initialize mock GrpcDriver and Driver instance before each test.""" + """Initialize mock GrpcDriverHelper and Driver instance before each test.""" mock_response = Mock() mock_response.run_id = 61016 - self.mock_grpc_driver = Mock() - self.mock_grpc_driver.create_run.return_value = mock_response + self.mock_grpc_driver_helper = Mock() + self.mock_grpc_driver_helper.create_run.return_value = mock_response self.patcher = patch( - "flwr.server.driver.driver.GrpcDriver", return_value=self.mock_grpc_driver + "flwr.server.driver.grpc_driver.GrpcDriverHelper", + return_value=self.mock_grpc_driver_helper, ) self.patcher.start() - self.driver = Driver() + self.driver = GrpcDriver() def tearDown(self) -> None: """Cleanup after each test.""" self.patcher.stop() def test_check_and_init_grpc_driver_already_initialized(self) -> None: - """Test that GrpcDriver doesn't initialize if run is created.""" + """Test that GrpcDriverHelper doesn't initialize if run is created.""" # Prepare - self.driver.grpc_driver = self.mock_grpc_driver + self.driver.driver_helper = self.mock_grpc_driver_helper self.driver.run_id = 61016 # Execute # pylint: disable-next=protected-access - self.driver._get_grpc_driver_and_run_id() + self.driver._get_grpc_driver_helper_and_run_id() # Assert - self.mock_grpc_driver.connect.assert_not_called() + self.mock_grpc_driver_helper.connect.assert_not_called() def test_check_and_init_grpc_driver_needs_initialization(self) -> None: - """Test GrpcDriver initialization when run is not created.""" + """Test GrpcDriverHelper initialization when run is not created.""" # Execute # pylint: disable-next=protected-access - self.driver._get_grpc_driver_and_run_id() + self.driver._get_grpc_driver_helper_and_run_id() # Assert - self.mock_grpc_driver.connect.assert_called_once() + self.mock_grpc_driver_helper.connect.assert_called_once() self.assertEqual(self.driver.run_id, 61016) def test_get_nodes(self) -> None: @@ -79,14 +80,14 @@ def test_get_nodes(self) -> None: # Prepare mock_response = Mock() mock_response.nodes = [Mock(node_id=404), Mock(node_id=200)] - self.mock_grpc_driver.get_nodes.return_value = mock_response + self.mock_grpc_driver_helper.get_nodes.return_value = mock_response # Execute node_ids = self.driver.get_node_ids() - args, kwargs = self.mock_grpc_driver.get_nodes.call_args + args, kwargs = self.mock_grpc_driver_helper.get_nodes.call_args # Assert - self.mock_grpc_driver.connect.assert_called_once() + self.mock_grpc_driver_helper.connect.assert_called_once() self.assertEqual(len(args), 1) self.assertEqual(len(kwargs), 0) self.assertIsInstance(args[0], GetNodesRequest) @@ -97,7 +98,7 @@ def test_push_messages_valid(self) -> None: """Test pushing valid messages.""" # Prepare mock_response = Mock(task_ids=["id1", "id2"]) - self.mock_grpc_driver.push_task_ins.return_value = mock_response + self.mock_grpc_driver_helper.push_task_ins.return_value = mock_response msgs = [ self.driver.create_message(RecordSet(), "", 0, "", DEFAULT_TTL) for _ in range(2) @@ -105,10 +106,10 @@ def test_push_messages_valid(self) -> None: # Execute msg_ids = self.driver.push_messages(msgs) - args, kwargs = self.mock_grpc_driver.push_task_ins.call_args + args, kwargs = self.mock_grpc_driver_helper.push_task_ins.call_args # Assert - self.mock_grpc_driver.connect.assert_called_once() + self.mock_grpc_driver_helper.connect.assert_called_once() self.assertEqual(len(args), 1) self.assertEqual(len(kwargs), 0) self.assertIsInstance(args[0], PushTaskInsRequest) @@ -120,13 +121,13 @@ def test_push_messages_invalid(self) -> None: """Test pushing invalid messages.""" # Prepare mock_response = Mock(task_ids=["id1", "id2"]) - self.mock_grpc_driver.push_task_ins.return_value = mock_response + self.mock_grpc_driver_helper.push_task_ins.return_value = mock_response msgs = [ self.driver.create_message(RecordSet(), "", 0, "", DEFAULT_TTL) for _ in range(2) ] # Use invalid run_id - msgs[1].metadata._run_id += 1 # pylint: disable=protected-access + msgs[1].metadata.__dict__["_run_id"] += 1 # pylint: disable=protected-access # Execute and assert with self.assertRaises(ValueError): @@ -144,16 +145,16 @@ def test_pull_messages_with_given_message_ids(self) -> None: ), TaskRes(task=Task(ancestry=["id3"], error=error_to_proto(Error(code=0)))), ] - self.mock_grpc_driver.pull_task_res.return_value = mock_response + self.mock_grpc_driver_helper.pull_task_res.return_value = mock_response msg_ids = ["id1", "id2", "id3"] # Execute msgs = self.driver.pull_messages(msg_ids) reply_tos = {msg.metadata.reply_to_message for msg in msgs} - args, kwargs = self.mock_grpc_driver.pull_task_res.call_args + args, kwargs = self.mock_grpc_driver_helper.pull_task_res.call_args # Assert - self.mock_grpc_driver.connect.assert_called_once() + self.mock_grpc_driver_helper.connect.assert_called_once() self.assertEqual(len(args), 1) self.assertEqual(len(kwargs), 0) self.assertIsInstance(args[0], PullTaskResRequest) @@ -164,14 +165,14 @@ def test_send_and_receive_messages_complete(self) -> None: """Test send and receive all messages successfully.""" # Prepare mock_response = Mock(task_ids=["id1"]) - self.mock_grpc_driver.push_task_ins.return_value = mock_response + self.mock_grpc_driver_helper.push_task_ins.return_value = mock_response # The response message must include either `content` (i.e. a recordset) or # an `Error`. We choose the latter in this case error_proto = error_to_proto(Error(code=0)) mock_response = Mock( task_res_list=[TaskRes(task=Task(ancestry=["id1"], error=error_proto))] ) - self.mock_grpc_driver.pull_task_res.return_value = mock_response + self.mock_grpc_driver_helper.pull_task_res.return_value = mock_response msgs = [self.driver.create_message(RecordSet(), "", 0, "", DEFAULT_TTL)] # Execute @@ -186,9 +187,9 @@ def test_send_and_receive_messages_timeout(self) -> None: # Prepare sleep_fn = time.sleep mock_response = Mock(task_ids=["id1"]) - self.mock_grpc_driver.push_task_ins.return_value = mock_response + self.mock_grpc_driver_helper.push_task_ins.return_value = mock_response mock_response = Mock(task_res_list=[]) - self.mock_grpc_driver.pull_task_res.return_value = mock_response + self.mock_grpc_driver_helper.pull_task_res.return_value = mock_response msgs = [self.driver.create_message(RecordSet(), "", 0, "", DEFAULT_TTL)] # Execute @@ -204,13 +205,13 @@ def test_del_with_initialized_driver(self) -> None: """Test cleanup behavior when Driver is initialized.""" # Prepare # pylint: disable-next=protected-access - self.driver._get_grpc_driver_and_run_id() + self.driver._get_grpc_driver_helper_and_run_id() # Execute self.driver.close() # Assert - self.mock_grpc_driver.disconnect.assert_called_once() + self.mock_grpc_driver_helper.disconnect.assert_called_once() def test_del_with_uninitialized_driver(self) -> None: """Test cleanup behavior when Driver is not initialized.""" @@ -218,4 +219,4 @@ def test_del_with_uninitialized_driver(self) -> None: self.driver.close() # Assert - self.mock_grpc_driver.disconnect.assert_not_called() + self.mock_grpc_driver_helper.disconnect.assert_not_called() diff --git a/src/py/flwr/server/driver/inmemory_driver.py b/src/py/flwr/server/driver/inmemory_driver.py new file mode 100644 index 000000000000..8c71b1067293 --- /dev/null +++ b/src/py/flwr/server/driver/inmemory_driver.py @@ -0,0 +1,181 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower in-memory Driver.""" + + +import time +import warnings +from typing import Iterable, List, Optional +from uuid import UUID + +from flwr.common import DEFAULT_TTL, Message, Metadata, RecordSet +from flwr.common.serde import message_from_taskres, message_to_taskins +from flwr.proto.node_pb2 import Node # pylint: disable=E0611 +from flwr.server.superlink.state import StateFactory + +from .driver import Driver + + +class InMemoryDriver(Driver): + """`InMemoryDriver` class provides an interface to the Driver API. + + Parameters + ---------- + state_factory : StateFactory + A StateFactory embedding a state that this driver can interface with. + fab_id : str (default: None) + The identifier of the FAB used in the run. + fab_version : str (default: None) + The version of the FAB used in the run. + """ + + def __init__( + self, + state_factory: StateFactory, + fab_id: Optional[str] = None, + fab_version: Optional[str] = None, + ) -> None: + self.run_id: Optional[int] = None + self.fab_id = fab_id if fab_id is not None else "" + self.fab_version = fab_version if fab_version is not None else "" + self.node = Node(node_id=0, anonymous=True) + self.state = state_factory.state() + + def _check_message(self, message: Message) -> None: + # Check if the message is valid + if not ( + message.metadata.run_id == self.run_id + and message.metadata.src_node_id == self.node.node_id + and message.metadata.message_id == "" + and message.metadata.reply_to_message == "" + and message.metadata.ttl > 0 + ): + raise ValueError(f"Invalid message: {message}") + + def _get_run_id(self) -> int: + """Return run_id. + + If unset, create a new run. + """ + if self.run_id is None: + self.run_id = self.state.create_run( + fab_id=self.fab_id, fab_version=self.fab_version + ) + return self.run_id + + def create_message( # pylint: disable=too-many-arguments + self, + content: RecordSet, + message_type: str, + dst_node_id: int, + group_id: str, + ttl: Optional[float] = None, + ) -> Message: + """Create a new message with specified parameters. + + This method constructs a new `Message` with given content and metadata. + The `run_id` and `src_node_id` will be set automatically. + """ + run_id = self._get_run_id() + if ttl: + warnings.warn( + "A custom TTL was set, but note that the SuperLink does not enforce " + "the TTL yet. The SuperLink will start enforcing the TTL in a future " + "version of Flower.", + stacklevel=2, + ) + ttl_ = DEFAULT_TTL if ttl is None else ttl + + metadata = Metadata( + run_id=run_id, + message_id="", # Will be set by the server + src_node_id=self.node.node_id, + dst_node_id=dst_node_id, + reply_to_message="", + group_id=group_id, + ttl=ttl_, + message_type=message_type, + ) + return Message(metadata=metadata, content=content) + + def get_node_ids(self) -> List[int]: + """Get node IDs.""" + run_id = self._get_run_id() + return list(self.state.get_nodes(run_id)) + + def push_messages(self, messages: Iterable[Message]) -> Iterable[str]: + """Push messages to specified node IDs. + + This method takes an iterable of messages and sends each message + to the node specified in `dst_node_id`. + """ + task_ids: List[str] = [] + for msg in messages: + # Check message + self._check_message(msg) + # Convert Message to TaskIns + taskins = message_to_taskins(msg) + # Store in state + taskins.task.pushed_at = time.time() + task_id = self.state.store_task_ins(taskins) + if task_id: + task_ids.append(str(task_id)) + + return task_ids + + def pull_messages(self, message_ids: Iterable[str]) -> Iterable[Message]: + """Pull messages based on message IDs. + + This method is used to collect messages from the SuperLink that correspond to a + set of given message IDs. + """ + msg_ids = {UUID(msg_id) for msg_id in message_ids} + # Pull TaskRes + task_res_list = self.state.get_task_res(task_ids=msg_ids, limit=len(msg_ids)) + # Delete tasks in state + self.state.delete_tasks(msg_ids) + # Convert TaskRes to Message + msgs = [message_from_taskres(taskres) for taskres in task_res_list] + return msgs + + def send_and_receive( + self, + messages: Iterable[Message], + *, + timeout: Optional[float] = None, + ) -> Iterable[Message]: + """Push messages to specified node IDs and pull the reply messages. + + This method sends a list of messages to their destination node IDs and then + waits for the replies. It continues to pull replies until either all replies are + received or the specified timeout duration is exceeded. + """ + # Push messages + msg_ids = set(self.push_messages(messages)) + + # Pull messages + end_time = time.time() + (timeout if timeout is not None else 0.0) + ret: List[Message] = [] + while timeout is None or time.time() < end_time: + res_msgs = self.pull_messages(msg_ids) + ret.extend(res_msgs) + msg_ids.difference_update( + {msg.metadata.reply_to_message for msg in res_msgs} + ) + if len(msg_ids) == 0: + break + # Sleep + time.sleep(3) + return ret diff --git a/src/py/flwr/server/driver/inmemory_driver_test.py b/src/py/flwr/server/driver/inmemory_driver_test.py new file mode 100644 index 000000000000..95c2a0b277af --- /dev/null +++ b/src/py/flwr/server/driver/inmemory_driver_test.py @@ -0,0 +1,247 @@ +# Copyright 2022 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for in-memory driver.""" + + +import os +import time +import unittest +from typing import Iterable, List, Tuple +from unittest.mock import MagicMock, patch +from uuid import uuid4 + +from flwr.common import RecordSet +from flwr.common.constant import PING_MAX_INTERVAL +from flwr.common.message import Error +from flwr.common.serde import ( + error_to_proto, + message_from_taskins, + message_to_taskres, + recordset_to_proto, +) +from flwr.proto.task_pb2 import Task, TaskRes # pylint: disable=E0611 +from flwr.server.superlink.state import StateFactory + +from .inmemory_driver import InMemoryDriver + + +def push_messages(driver: InMemoryDriver, num_nodes: int) -> Tuple[Iterable[str], int]: + """Help push messages to state.""" + for _ in range(num_nodes): + driver.state.create_node(ping_interval=PING_MAX_INTERVAL) + num_messages = 3 + node_id = 1 + msgs = [ + driver.create_message(RecordSet(), "message_type", node_id, "") + for _ in range(num_messages) + ] + + # Execute: push messages + return driver.push_messages(msgs), node_id + + +def get_replies( + driver: InMemoryDriver, msg_ids: Iterable[str], node_id: int +) -> List[str]: + """Help create message replies and pull taskres from state.""" + taskins = driver.state.get_task_ins(node_id, limit=len(list(msg_ids))) + for taskin in taskins: + msg = message_from_taskins(taskin) + reply_msg = msg.create_reply(RecordSet()) + task_res = message_to_taskres(reply_msg) + task_res.task.pushed_at = time.time() + driver.state.store_task_res(task_res=task_res) + + # Execute: Pull messages + pulled_msgs = driver.pull_messages(msg_ids) + return [msg.metadata.reply_to_message for msg in pulled_msgs] + + +class TestInMemoryDriver(unittest.TestCase): + """Tests for `InMemoryDriver` class.""" + + def setUp(self) -> None: + """Initialize State and Driver instance before each test. + + Driver uses the default StateFactory (i.e. SQLite) + """ + # Create driver + self.num_nodes = 42 + self.driver = InMemoryDriver(StateFactory("")) + self.driver.state = MagicMock() + self.driver.state.get_nodes.return_value = [ + int.from_bytes(os.urandom(8), "little", signed=True) + for _ in range(self.num_nodes) + ] + + def test_get_nodes(self) -> None: + """Test retrieval of nodes.""" + # Execute + node_ids = self.driver.get_node_ids() + + # Assert + self.assertEqual(len(node_ids), self.num_nodes) + + def test_push_messages_valid(self) -> None: + """Test pushing valid messages.""" + # Prepare + num_messages = 2 + msgs = [ + self.driver.create_message(RecordSet(), "message_type", 1, "") + for _ in range(num_messages) + ] + + taskins_ids = [uuid4() for _ in range(num_messages)] + self.driver.state.store_task_ins.side_effect = taskins_ids # type: ignore + + # Execute + msg_ids = list(self.driver.push_messages(msgs)) + + # Assert + self.assertEqual(len(msg_ids), 2) + self.assertEqual(msg_ids, [str(ids) for ids in taskins_ids]) + + def test_push_messages_invalid(self) -> None: + """Test pushing invalid messages.""" + # Prepare + msgs = [ + self.driver.create_message(RecordSet(), "message_type", 1, "") + for _ in range(2) + ] + # Use invalid run_id + msgs[1].metadata._run_id += 1 # type: ignore + + # Execute and assert + with self.assertRaises(ValueError): + self.driver.push_messages(msgs) + + def test_pull_messages_with_given_message_ids(self) -> None: + """Test pulling messages with specific message IDs.""" + # Prepare + msg_ids = [str(uuid4()) for _ in range(2)] + task_res_list = [ + TaskRes( + task=Task( + ancestry=[msg_ids[0]], recordset=recordset_to_proto(RecordSet()) + ) + ), + TaskRes( + task=Task(ancestry=[msg_ids[1]], error=error_to_proto(Error(code=0))) + ), + ] + self.driver.state.get_task_res.return_value = task_res_list # type: ignore + + # Execute + pulled_msgs = list(self.driver.pull_messages(msg_ids)) + reply_tos = [msg.metadata.reply_to_message for msg in pulled_msgs] + + # Assert + self.assertEqual(len(pulled_msgs), 2) + self.assertEqual(reply_tos, msg_ids) + + def test_send_and_receive_messages_complete(self) -> None: + """Test send and receive all messages successfully.""" + # Prepare + msgs = [self.driver.create_message(RecordSet(), "", 0, "")] + # Prepare + msg_ids = [str(uuid4()) for _ in range(2)] + task_res_list = [ + TaskRes( + task=Task( + ancestry=[msg_ids[0]], recordset=recordset_to_proto(RecordSet()) + ) + ), + TaskRes( + task=Task(ancestry=[msg_ids[1]], error=error_to_proto(Error(code=0))) + ), + ] + self.driver.state.store_task_ins.side_effect = msg_ids # type: ignore + self.driver.state.get_task_res.return_value = task_res_list # type: ignore + + # Execute + ret_msgs = list(self.driver.send_and_receive(msgs)) + reply_tos = [msg.metadata.reply_to_message for msg in ret_msgs] + # Assert + self.assertEqual(len(ret_msgs), 2) + self.assertEqual(reply_tos, msg_ids) + + def test_send_and_receive_messages_timeout(self) -> None: + """Test send and receive messages but time out.""" + # Prepare + msgs = [self.driver.create_message(RecordSet(), "", 0, "")] + # Prepare + msg_ids = [str(uuid4()) for _ in range(2)] + task_res_list = [ + TaskRes( + task=Task( + ancestry=[msg_ids[0]], recordset=recordset_to_proto(RecordSet()) + ) + ), + TaskRes( + task=Task(ancestry=[msg_ids[1]], error=error_to_proto(Error(code=0))) + ), + ] + self.driver.state.store_task_ins.side_effect = msg_ids # type: ignore + self.driver.state.get_task_res.return_value = task_res_list # type: ignore + + # Execute + with patch("time.sleep", side_effect=lambda t: time.sleep(t * 0.01)): + start_time = time.time() + ret_msgs = list(self.driver.send_and_receive(msgs, timeout=-1)) + + # Assert + self.assertLess(time.time() - start_time, 0.2) + self.assertEqual(len(ret_msgs), 0) + + def test_task_store_consistency_after_push_pull_sqlitestate(self) -> None: + """Test tasks are deleted in sqlite state once messages are pulled.""" + # Prepare + self.driver = InMemoryDriver(StateFactory("")) + msg_ids, node_id = push_messages(self.driver, self.num_nodes) + + # Check recorded + task_ins = self.driver.state.query("SELECT * FROM task_ins;") # type: ignore + self.assertEqual(len(task_ins), len(list(msg_ids))) + + # Prepare: create replies + reply_tos = get_replies(self.driver, msg_ids, node_id) + + # Query number of task_ins and task_res in State + task_res = self.driver.state.query("SELECT * FROM task_res;") # type: ignore + task_ins = self.driver.state.query("SELECT * FROM task_ins;") # type: ignore + + # Assert + self.assertEqual(reply_tos, msg_ids) + self.assertEqual(len(task_res), 0) + self.assertEqual(len(task_ins), 0) + + def test_task_store_consistency_after_push_pull_inmemory_state(self) -> None: + """Test tasks are deleted in in-memory state once messages are pulled.""" + # Prepare + self.driver = InMemoryDriver(StateFactory(":flwr-in-memory-state:")) + msg_ids, node_id = push_messages(self.driver, self.num_nodes) + + # Check recorded + self.assertEqual( + len(self.driver.state.task_ins_store), len(list(msg_ids)) # type: ignore + ) + + # Prepare: create replies + reply_tos = get_replies(self.driver, msg_ids, node_id) + + # Assert + self.assertEqual(reply_tos, msg_ids) + self.assertEqual(len(self.driver.state.task_res_store), 0) # type: ignore + self.assertEqual(len(self.driver.state.task_ins_store), 0) # type: ignore diff --git a/src/py/flwr/server/history.py b/src/py/flwr/server/history.py index c4298911d97b..291974a4323c 100644 --- a/src/py/flwr/server/history.py +++ b/src/py/flwr/server/history.py @@ -91,32 +91,32 @@ def __repr__(self) -> str: """ rep = "" if self.losses_distributed: - rep += "History (loss, distributed):\n" + pprint.pformat( - reduce( - lambda a, b: a + b, - [ - f"\tround {server_round}: {loss}\n" - for server_round, loss in self.losses_distributed - ], - ) + rep += "History (loss, distributed):\n" + reduce( + lambda a, b: a + b, + [ + f"\tround {server_round}: {loss}\n" + for server_round, loss in self.losses_distributed + ], ) if self.losses_centralized: - rep += "History (loss, centralized):\n" + pprint.pformat( - reduce( - lambda a, b: a + b, - [ - f"\tround {server_round}: {loss}\n" - for server_round, loss in self.losses_centralized - ], - ) + rep += "History (loss, centralized):\n" + reduce( + lambda a, b: a + b, + [ + f"\tround {server_round}: {loss}\n" + for server_round, loss in self.losses_centralized + ], ) if self.metrics_distributed_fit: - rep += "History (metrics, distributed, fit):\n" + pprint.pformat( - self.metrics_distributed_fit + rep += ( + "History (metrics, distributed, fit):\n" + + pprint.pformat(self.metrics_distributed_fit) + + "\n" ) if self.metrics_distributed: - rep += "History (metrics, distributed, evaluate):\n" + pprint.pformat( - self.metrics_distributed + rep += ( + "History (metrics, distributed, evaluate):\n" + + pprint.pformat(self.metrics_distributed) + + "\n" ) if self.metrics_centralized: rep += "History (metrics, centralized):\n" + pprint.pformat( diff --git a/src/py/flwr/server/run_serverapp.py b/src/py/flwr/server/run_serverapp.py index 2f0f1185847e..9cc7974d34da 100644 --- a/src/py/flwr/server/run_serverapp.py +++ b/src/py/flwr/server/run_serverapp.py @@ -25,7 +25,7 @@ from flwr.common.logger import log, update_console_handler from flwr.common.object_ref import load_app -from .driver.driver import Driver +from .driver import Driver, GrpcDriver from .server_app import LoadServerAppError, ServerApp @@ -128,13 +128,15 @@ def run_server_app() -> None: server_app_dir = args.dir server_app_attr = getattr(args, "server-app") - # Initialize Driver - driver = Driver( + # Initialize GrpcDriver + driver = GrpcDriver( driver_service_address=args.server, root_certificates=root_certificates, + fab_id=args.fab_id, + fab_version=args.fab_version, ) - # Run the Server App with the Driver + # Run the ServerApp with the Driver run(driver=driver, server_app_dir=server_app_dir, server_app_attr=server_app_attr) # Clean up @@ -183,5 +185,17 @@ def _parse_args_run_server_app() -> argparse.ArgumentParser: "app from there." " Default: current working directory.", ) + parser.add_argument( + "--fab-id", + default=None, + type=str, + help="The identifier of the FAB used in the run.", + ) + parser.add_argument( + "--fab-version", + default=None, + type=str, + help="The version of the FAB used in the run.", + ) return parser diff --git a/src/py/flwr/server/server.py b/src/py/flwr/server/server.py index 981325a6df08..f1bfb6f0533b 100644 --- a/src/py/flwr/server/server.py +++ b/src/py/flwr/server/server.py @@ -282,7 +282,14 @@ def _get_initial_parameters( get_parameters_res = random_client.get_parameters( ins=ins, timeout=timeout, group_id=server_round ) - log(INFO, "Received initial parameters from one random client") + if get_parameters_res.status.code == Code.OK: + log(INFO, "Received initial parameters from one random client") + else: + log( + WARN, + "Failed to receive initial parameters from the client." + " Empty initial parameters will be used.", + ) return get_parameters_res.parameters @@ -486,12 +493,9 @@ def run_fl( log(INFO, "") log(INFO, "[SUMMARY]") - log(INFO, "Run finished %s rounds in %.2fs", config.num_rounds, elapsed_time) - for idx, line in enumerate(io.StringIO(str(hist))): - if idx == 0: - log(INFO, "%s", line.strip("\n")) - else: - log(INFO, "\t%s", line.strip("\n")) + log(INFO, "Run finished %s round(s) in %.2fs", config.num_rounds, elapsed_time) + for line in io.StringIO(str(hist)): + log(INFO, "\t%s", line.strip("\n")) log(INFO, "") # Graceful shutdown diff --git a/src/py/flwr/server/server_app.py b/src/py/flwr/server/server_app.py index 1b2eab87fdaa..ea2eb3fd1a69 100644 --- a/src/py/flwr/server/server_app.py +++ b/src/py/flwr/server/server_app.py @@ -18,6 +18,7 @@ from typing import Callable, Optional from flwr.common import Context, RecordSet +from flwr.common.logger import warn_preview_feature from flwr.server.strategy import Strategy from .client_manager import ClientManager @@ -120,6 +121,8 @@ def main_decorator(main_fn: ServerAppCallable) -> ServerAppCallable: """, ) + warn_preview_feature("ServerApp-register-main-function") + # Register provided function with the ServerApp object self._main = main_fn diff --git a/src/py/flwr/server/server_test.py b/src/py/flwr/server/server_test.py index 274e5289fee1..51071c13f895 100644 --- a/src/py/flwr/server/server_test.py +++ b/src/py/flwr/server/server_test.py @@ -15,9 +15,22 @@ """Flower server tests.""" +import argparse +import csv +import tempfile +from pathlib import Path from typing import List, Optional import numpy as np +from cryptography.hazmat.primitives.asymmetric import ec +from cryptography.hazmat.primitives.serialization import ( + Encoding, + NoEncryption, + PrivateFormat, + PublicFormat, + load_ssh_private_key, + load_ssh_public_key, +) from flwr.common import ( Code, @@ -35,8 +48,14 @@ Status, ndarray_to_bytes, ) +from flwr.common.secure_aggregation.crypto.symmetric_encryption import ( + generate_key_pairs, + private_key_to_bytes, + public_key_to_bytes, +) from flwr.server.client_manager import SimpleClientManager +from .app import _try_setup_client_authentication from .client_proxy import ClientProxy from .server import Server, evaluate_clients, fit_clients @@ -182,3 +201,71 @@ def test_set_max_workers() -> None: # Assert assert server.max_workers == 42 + + +def test_setup_client_auth() -> None: # pylint: disable=R0914 + """Test setup client authentication.""" + # Prepare + _, first_public_key = generate_key_pairs() + private_key, public_key = generate_key_pairs() + + server_public_key = public_key.public_bytes( + encoding=Encoding.OpenSSH, format=PublicFormat.OpenSSH + ) + server_private_key = private_key.private_bytes( + Encoding.PEM, PrivateFormat.OpenSSH, NoEncryption() + ) + _, second_public_key = generate_key_pairs() + + # Execute + with tempfile.TemporaryDirectory() as temp_dir: + # Initialize temporary files + client_keys_file_path = Path(temp_dir) / "client_keys.csv" + server_private_key_path = Path(temp_dir) / "server_private_key" + server_public_key_path = Path(temp_dir) / "server_public_key" + + # Fill the files with relevant keys + with open(client_keys_file_path, "w", newline="", encoding="utf-8") as csvfile: + writer = csv.writer(csvfile) + writer.writerow( + [ + first_public_key.public_bytes( + encoding=Encoding.OpenSSH, format=PublicFormat.OpenSSH + ).decode(), + second_public_key.public_bytes( + encoding=Encoding.OpenSSH, format=PublicFormat.OpenSSH + ).decode(), + ] + ) + server_public_key_path.write_bytes(server_public_key) + server_private_key_path.write_bytes(server_private_key) + + # Mock argparse with `require-client-authentication`` flag + mock_args = argparse.Namespace( + require_client_authentication=[ + str(client_keys_file_path), + str(server_private_key_path), + str(server_public_key_path), + ] + ) + + # Run _try_setup_client_authentication + result = _try_setup_client_authentication(mock_args, (b"", b"", b"")) + + expected_private_key = load_ssh_private_key(server_private_key, None) + expected_public_key = load_ssh_public_key(server_public_key) + + # Assert + assert isinstance(expected_private_key, ec.EllipticCurvePrivateKey) + assert isinstance(expected_public_key, ec.EllipticCurvePublicKey) + assert result is not None + assert result[0] == { + public_key_to_bytes(first_public_key), + public_key_to_bytes(second_public_key), + } + assert private_key_to_bytes(result[1]) == private_key_to_bytes( + expected_private_key + ) + assert public_key_to_bytes(result[2]) == public_key_to_bytes( + expected_public_key + ) diff --git a/src/py/flwr/server/strategy/dp_adaptive_clipping.py b/src/py/flwr/server/strategy/dp_adaptive_clipping.py index 1acfd4613a0a..b25e1efdf0e9 100644 --- a/src/py/flwr/server/strategy/dp_adaptive_clipping.py +++ b/src/py/flwr/server/strategy/dp_adaptive_clipping.py @@ -200,7 +200,7 @@ def aggregate_fit( log( INFO, - "aggregate_fit: parameters are clipped by value: %s.", + "aggregate_fit: parameters are clipped by value: %.4f.", self.clipping_norm, ) @@ -234,7 +234,7 @@ def aggregate_fit( ) log( INFO, - "aggregate_fit: central DP noise with standard deviation: %s added to parameters.", + "aggregate_fit: central DP noise with %.4f stdev added", compute_stdv( self.noise_multiplier, self.clipping_norm, self.num_sampled_clients ), @@ -424,7 +424,7 @@ def aggregate_fit( ) log( INFO, - "aggregate_fit: central DP noise with standard deviation: %s added to parameters.", + "aggregate_fit: central DP noise with %.4f stdev added", compute_stdv( self.noise_multiplier, self.clipping_norm, self.num_sampled_clients ), diff --git a/src/py/flwr/server/strategy/dp_fixed_clipping.py b/src/py/flwr/server/strategy/dp_fixed_clipping.py index 61e8123e28d7..92b2845fd846 100644 --- a/src/py/flwr/server/strategy/dp_fixed_clipping.py +++ b/src/py/flwr/server/strategy/dp_fixed_clipping.py @@ -158,7 +158,7 @@ def aggregate_fit( ) log( INFO, - "aggregate_fit: parameters are clipped by value: %s.", + "aggregate_fit: parameters are clipped by value: %.4f.", self.clipping_norm, ) # Convert back to parameters @@ -180,7 +180,7 @@ def aggregate_fit( log( INFO, - "aggregate_fit: central DP noise with standard deviation: %s added to parameters.", + "aggregate_fit: central DP noise with %.4f stdev added", compute_stdv( self.noise_multiplier, self.clipping_norm, self.num_sampled_clients ), @@ -337,11 +337,12 @@ def aggregate_fit( ) log( INFO, - "aggregate_fit: central DP noise with standard deviation: %s added to parameters.", + "aggregate_fit: central DP noise with %.4f stdev added", compute_stdv( self.noise_multiplier, self.clipping_norm, self.num_sampled_clients ), ) + return aggregated_params, metrics def aggregate_evaluate( diff --git a/src/py/flwr/server/superlink/driver/driver_servicer.py b/src/py/flwr/server/superlink/driver/driver_servicer.py index 59e51ef52d8e..ce2d9d68d8ca 100644 --- a/src/py/flwr/server/superlink/driver/driver_servicer.py +++ b/src/py/flwr/server/superlink/driver/driver_servicer.py @@ -15,7 +15,8 @@ """Driver API servicer.""" -from logging import DEBUG, INFO +import time +from logging import DEBUG from typing import List, Optional, Set from uuid import UUID @@ -61,9 +62,9 @@ def CreateRun( self, request: CreateRunRequest, context: grpc.ServicerContext ) -> CreateRunResponse: """Create run ID.""" - log(INFO, "DriverServicer.CreateRun") + log(DEBUG, "DriverServicer.CreateRun") state: State = self.state_factory.state() - run_id = state.create_run() + run_id = state.create_run(request.fab_id, request.fab_version) return CreateRunResponse(run_id=run_id) def PushTaskIns( @@ -72,6 +73,11 @@ def PushTaskIns( """Push a set of TaskIns.""" log(DEBUG, "DriverServicer.PushTaskIns") + # Set pushed_at (timestamp in seconds) + pushed_at = time.time() + for task_ins in request.task_ins_list: + task_ins.task.pushed_at = pushed_at + # Validate request _raise_if(len(request.task_ins_list) == 0, "`task_ins_list` must not be empty") for task_ins in request.task_ins_list: diff --git a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server.py b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server.py index 82f049844bd6..6aeaa7ef413f 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server.py +++ b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server.py @@ -18,7 +18,7 @@ import concurrent.futures import sys from logging import ERROR -from typing import Any, Callable, Optional, Tuple, Union +from typing import Any, Callable, Optional, Sequence, Tuple, Union import grpc @@ -162,6 +162,7 @@ def generic_create_grpc_server( # pylint: disable=too-many-arguments max_message_length: int = GRPC_MAX_MESSAGE_LENGTH, keepalive_time_ms: int = 210000, certificates: Optional[Tuple[bytes, bytes, bytes]] = None, + interceptors: Optional[Sequence[grpc.ServerInterceptor]] = None, ) -> grpc.Server: """Create a gRPC server with a single servicer. @@ -249,6 +250,7 @@ def generic_create_grpc_server( # pylint: disable=too-many-arguments # returning RESOURCE_EXHAUSTED status, or None to indicate no limit. maximum_concurrent_rpcs=max_concurrent_workers, options=options, + interceptors=interceptors, ) add_servicer_to_server_fn(servicer, server) diff --git a/src/py/flwr/server/superlink/fleet/grpc_rere/fleet_servicer.py b/src/py/flwr/server/superlink/fleet/grpc_rere/fleet_servicer.py index eb8dd800ea37..03a2ec064213 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_rere/fleet_servicer.py +++ b/src/py/flwr/server/superlink/fleet/grpc_rere/fleet_servicer.py @@ -26,6 +26,8 @@ CreateNodeResponse, DeleteNodeRequest, DeleteNodeResponse, + GetRunRequest, + GetRunResponse, PingRequest, PingResponse, PullTaskInsRequest, @@ -90,3 +92,13 @@ def PushTaskRes( request=request, state=self.state_factory.state(), ) + + def GetRun( + self, request: GetRunRequest, context: grpc.ServicerContext + ) -> GetRunResponse: + """Get run information.""" + log(INFO, "FleetServicer.GetRun") + return message_handler.get_run( + request=request, + state=self.state_factory.state(), + ) diff --git a/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor.py b/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor.py new file mode 100644 index 000000000000..6a302679a235 --- /dev/null +++ b/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor.py @@ -0,0 +1,215 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower server interceptor.""" + + +import base64 +from logging import WARNING +from typing import Any, Callable, Optional, Sequence, Tuple, Union + +import grpc +from cryptography.hazmat.primitives.asymmetric import ec + +from flwr.common.logger import log +from flwr.common.secure_aggregation.crypto.symmetric_encryption import ( + bytes_to_private_key, + bytes_to_public_key, + generate_shared_key, + verify_hmac, +) +from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 + CreateNodeRequest, + CreateNodeResponse, + DeleteNodeRequest, + DeleteNodeResponse, + GetRunRequest, + GetRunResponse, + PingRequest, + PingResponse, + PullTaskInsRequest, + PullTaskInsResponse, + PushTaskResRequest, + PushTaskResResponse, +) +from flwr.proto.node_pb2 import Node # pylint: disable=E0611 +from flwr.server.superlink.state import State + +_PUBLIC_KEY_HEADER = "public-key" +_AUTH_TOKEN_HEADER = "auth-token" + +Request = Union[ + CreateNodeRequest, + DeleteNodeRequest, + PullTaskInsRequest, + PushTaskResRequest, + GetRunRequest, + PingRequest, +] + +Response = Union[ + CreateNodeResponse, + DeleteNodeResponse, + PullTaskInsResponse, + PushTaskResResponse, + GetRunResponse, + PingResponse, +] + + +def _get_value_from_tuples( + key_string: str, tuples: Sequence[Tuple[str, Union[str, bytes]]] +) -> bytes: + value = next((value for key, value in tuples if key == key_string), "") + if isinstance(value, str): + return value.encode() + + return value + + +class AuthenticateServerInterceptor(grpc.ServerInterceptor): # type: ignore + """Server interceptor for client authentication.""" + + def __init__(self, state: State): + self.state = state + + self.client_public_keys = state.get_client_public_keys() + if len(self.client_public_keys) == 0: + log(WARNING, "Authentication enabled, but no known public keys configured") + + private_key = self.state.get_server_private_key() + public_key = self.state.get_server_public_key() + + if private_key is None or public_key is None: + raise ValueError("Error loading authentication keys") + + self.server_private_key = bytes_to_private_key(private_key) + self.encoded_server_public_key = base64.urlsafe_b64encode(public_key) + + def intercept_service( + self, + continuation: Callable[[Any], Any], + handler_call_details: grpc.HandlerCallDetails, + ) -> grpc.RpcMethodHandler: + """Flower server interceptor authentication logic. + + Intercept all unary calls from clients and authenticate clients by validating + auth metadata sent by the client. Continue RPC call if client is authenticated, + else, terminate RPC call by setting context to abort. + """ + # One of the method handlers in + # `flwr.server.superlink.fleet.grpc_rere.fleet_server.FleetServicer` + method_handler: grpc.RpcMethodHandler = continuation(handler_call_details) + return self._generic_auth_unary_method_handler(method_handler) + + def _generic_auth_unary_method_handler( + self, method_handler: grpc.RpcMethodHandler + ) -> grpc.RpcMethodHandler: + def _generic_method_handler( + request: Request, + context: grpc.ServicerContext, + ) -> Response: + client_public_key_bytes = base64.urlsafe_b64decode( + _get_value_from_tuples( + _PUBLIC_KEY_HEADER, context.invocation_metadata() + ) + ) + if client_public_key_bytes not in self.client_public_keys: + context.abort(grpc.StatusCode.UNAUTHENTICATED, "Access denied") + + if isinstance(request, CreateNodeRequest): + return self._create_authenticated_node( + client_public_key_bytes, request, context + ) + + # Verify hmac value + hmac_value = base64.urlsafe_b64decode( + _get_value_from_tuples( + _AUTH_TOKEN_HEADER, context.invocation_metadata() + ) + ) + public_key = bytes_to_public_key(client_public_key_bytes) + + if not self._verify_hmac(public_key, request, hmac_value): + context.abort(grpc.StatusCode.UNAUTHENTICATED, "Access denied") + + # Verify node_id + node_id = self.state.get_node_id(client_public_key_bytes) + + if not self._verify_node_id(node_id, request): + context.abort(grpc.StatusCode.UNAUTHENTICATED, "Access denied") + + return method_handler.unary_unary(request, context) # type: ignore + + return grpc.unary_unary_rpc_method_handler( + _generic_method_handler, + request_deserializer=method_handler.request_deserializer, + response_serializer=method_handler.response_serializer, + ) + + def _verify_node_id( + self, + node_id: Optional[int], + request: Union[ + DeleteNodeRequest, + PullTaskInsRequest, + PushTaskResRequest, + GetRunRequest, + PingRequest, + ], + ) -> bool: + if node_id is None: + return False + if isinstance(request, PushTaskResRequest): + if len(request.task_res_list) == 0: + return False + return request.task_res_list[0].task.producer.node_id == node_id + if isinstance(request, GetRunRequest): + return node_id in self.state.get_nodes(request.run_id) + return request.node.node_id == node_id + + def _verify_hmac( + self, public_key: ec.EllipticCurvePublicKey, request: Request, hmac_value: bytes + ) -> bool: + shared_secret = generate_shared_key(self.server_private_key, public_key) + return verify_hmac(shared_secret, request.SerializeToString(True), hmac_value) + + def _create_authenticated_node( + self, + public_key_bytes: bytes, + request: CreateNodeRequest, + context: grpc.ServicerContext, + ) -> CreateNodeResponse: + context.send_initial_metadata( + ( + ( + _PUBLIC_KEY_HEADER, + self.encoded_server_public_key, + ), + ) + ) + + node_id = self.state.get_node_id(public_key_bytes) + + # Handle `CreateNode` here instead of calling the default method handler + # Return previously assigned `node_id` for the provided `public_key` + if node_id is not None: + self.state.acknowledge_ping(node_id, request.ping_interval) + return CreateNodeResponse(node=Node(node_id=node_id, anonymous=False)) + + # No `node_id` exists for the provided `public_key` + # Handle `CreateNode` here instead of calling the default method handler + # Note: the innermost `CreateNode` method will never be called + node_id = self.state.create_node(request.ping_interval, public_key_bytes) + return CreateNodeResponse(node=Node(node_id=node_id, anonymous=False)) diff --git a/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor_test.py b/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor_test.py new file mode 100644 index 000000000000..c4c71e5a8188 --- /dev/null +++ b/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor_test.py @@ -0,0 +1,501 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower server interceptor tests.""" + + +import base64 +import unittest + +import grpc + +from flwr.common.secure_aggregation.crypto.symmetric_encryption import ( + compute_hmac, + generate_key_pairs, + generate_shared_key, + private_key_to_bytes, + public_key_to_bytes, +) +from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 + CreateNodeRequest, + CreateNodeResponse, + DeleteNodeRequest, + DeleteNodeResponse, + GetRunRequest, + GetRunResponse, + PingRequest, + PingResponse, + PullTaskInsRequest, + PullTaskInsResponse, + PushTaskResRequest, + PushTaskResResponse, +) +from flwr.proto.node_pb2 import Node # pylint: disable=E0611 +from flwr.proto.task_pb2 import Task, TaskRes # pylint: disable=E0611 +from flwr.server.app import ADDRESS_FLEET_API_GRPC_RERE, _run_fleet_api_grpc_rere +from flwr.server.superlink.state.state_factory import StateFactory + +from .server_interceptor import ( + _AUTH_TOKEN_HEADER, + _PUBLIC_KEY_HEADER, + AuthenticateServerInterceptor, +) + + +class TestServerInterceptor(unittest.TestCase): # pylint: disable=R0902 + """Server interceptor tests.""" + + def setUp(self) -> None: + """Initialize mock stub and server interceptor.""" + self._client_private_key, self._client_public_key = generate_key_pairs() + self._server_private_key, self._server_public_key = generate_key_pairs() + + state_factory = StateFactory(":flwr-in-memory-state:") + self.state = state_factory.state() + self.state.store_server_private_public_key( + private_key_to_bytes(self._server_private_key), + public_key_to_bytes(self._server_public_key), + ) + self.state.store_client_public_keys( + {public_key_to_bytes(self._client_public_key)} + ) + + self._server_interceptor = AuthenticateServerInterceptor(self.state) + self._server: grpc.Server = _run_fleet_api_grpc_rere( + ADDRESS_FLEET_API_GRPC_RERE, state_factory, None, [self._server_interceptor] + ) + + self._channel = grpc.insecure_channel("localhost:9092") + self._create_node = self._channel.unary_unary( + "/flwr.proto.Fleet/CreateNode", + request_serializer=CreateNodeRequest.SerializeToString, + response_deserializer=CreateNodeResponse.FromString, + ) + self._delete_node = self._channel.unary_unary( + "/flwr.proto.Fleet/DeleteNode", + request_serializer=DeleteNodeRequest.SerializeToString, + response_deserializer=DeleteNodeResponse.FromString, + ) + self._pull_task_ins = self._channel.unary_unary( + "/flwr.proto.Fleet/PullTaskIns", + request_serializer=PullTaskInsRequest.SerializeToString, + response_deserializer=PullTaskInsResponse.FromString, + ) + self._push_task_res = self._channel.unary_unary( + "/flwr.proto.Fleet/PushTaskRes", + request_serializer=PushTaskResRequest.SerializeToString, + response_deserializer=PushTaskResResponse.FromString, + ) + self._get_run = self._channel.unary_unary( + "/flwr.proto.Fleet/GetRun", + request_serializer=GetRunRequest.SerializeToString, + response_deserializer=GetRunResponse.FromString, + ) + self._ping = self._channel.unary_unary( + "/flwr.proto.Fleet/Ping", + request_serializer=PingRequest.SerializeToString, + response_deserializer=PingResponse.FromString, + ) + + def tearDown(self) -> None: + """Clean up grpc server.""" + self._server.stop(None) + + def test_successful_create_node_with_metadata(self) -> None: + """Test server interceptor for creating node.""" + # Prepare + public_key_bytes = base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) + ) + + # Execute + response, call = self._create_node.with_call( + request=CreateNodeRequest(), + metadata=((_PUBLIC_KEY_HEADER, public_key_bytes),), + ) + + expected_metadata = ( + _PUBLIC_KEY_HEADER, + base64.urlsafe_b64encode( + public_key_to_bytes(self._server_public_key) + ).decode(), + ) + + # Assert + assert call.initial_metadata()[0] == expected_metadata + assert isinstance(response, CreateNodeResponse) + + def test_unsuccessful_create_node_with_metadata(self) -> None: + """Test server interceptor for creating node unsuccessfully.""" + # Prepare + _, client_public_key = generate_key_pairs() + public_key_bytes = base64.urlsafe_b64encode( + public_key_to_bytes(client_public_key) + ) + + # Execute & Assert + with self.assertRaises(grpc.RpcError): + self._create_node.with_call( + request=CreateNodeRequest(), + metadata=((_PUBLIC_KEY_HEADER, public_key_bytes),), + ) + + def test_successful_delete_node_with_metadata(self) -> None: + """Test server interceptor for deleting node.""" + # Prepare + node_id = self.state.create_node( + ping_interval=30, public_key=public_key_to_bytes(self._client_public_key) + ) + request = DeleteNodeRequest(node=Node(node_id=node_id)) + shared_secret = generate_shared_key( + self._client_private_key, self._server_public_key + ) + hmac_value = base64.urlsafe_b64encode( + compute_hmac(shared_secret, request.SerializeToString(True)) + ) + public_key_bytes = base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) + ) + + # Execute + response, call = self._delete_node.with_call( + request=request, + metadata=( + (_PUBLIC_KEY_HEADER, public_key_bytes), + (_AUTH_TOKEN_HEADER, hmac_value), + ), + ) + + # Assert + assert isinstance(response, DeleteNodeResponse) + assert grpc.StatusCode.OK == call.code() + + def test_unsuccessful_delete_node_with_metadata(self) -> None: + """Test server interceptor for deleting node unsuccessfully.""" + # Prepare + node_id = self.state.create_node( + ping_interval=30, public_key=public_key_to_bytes(self._client_public_key) + ) + request = DeleteNodeRequest(node=Node(node_id=node_id)) + client_private_key, _ = generate_key_pairs() + shared_secret = generate_shared_key(client_private_key, self._server_public_key) + hmac_value = base64.urlsafe_b64encode( + compute_hmac(shared_secret, request.SerializeToString(True)) + ) + public_key_bytes = base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) + ) + + # Execute & Assert + with self.assertRaises(grpc.RpcError): + self._delete_node.with_call( + request=request, + metadata=( + (_PUBLIC_KEY_HEADER, public_key_bytes), + (_AUTH_TOKEN_HEADER, hmac_value), + ), + ) + + def test_successful_pull_task_ins_with_metadata(self) -> None: + """Test server interceptor for pull task ins.""" + # Prepare + node_id = self.state.create_node( + ping_interval=30, public_key=public_key_to_bytes(self._client_public_key) + ) + request = PullTaskInsRequest(node=Node(node_id=node_id)) + shared_secret = generate_shared_key( + self._client_private_key, self._server_public_key + ) + hmac_value = base64.urlsafe_b64encode( + compute_hmac(shared_secret, request.SerializeToString(True)) + ) + public_key_bytes = base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) + ) + + # Execute + response, call = self._pull_task_ins.with_call( + request=request, + metadata=( + (_PUBLIC_KEY_HEADER, public_key_bytes), + (_AUTH_TOKEN_HEADER, hmac_value), + ), + ) + + # Assert + assert isinstance(response, PullTaskInsResponse) + assert grpc.StatusCode.OK == call.code() + + def test_unsuccessful_pull_task_ins_with_metadata(self) -> None: + """Test server interceptor for pull task ins unsuccessfully.""" + # Prepare + node_id = self.state.create_node( + ping_interval=30, public_key=public_key_to_bytes(self._client_public_key) + ) + request = PullTaskInsRequest(node=Node(node_id=node_id)) + client_private_key, _ = generate_key_pairs() + shared_secret = generate_shared_key(client_private_key, self._server_public_key) + hmac_value = base64.urlsafe_b64encode( + compute_hmac(shared_secret, request.SerializeToString(True)) + ) + public_key_bytes = base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) + ) + + # Execute & Assert + with self.assertRaises(grpc.RpcError): + self._pull_task_ins.with_call( + request=request, + metadata=( + (_PUBLIC_KEY_HEADER, public_key_bytes), + (_AUTH_TOKEN_HEADER, hmac_value), + ), + ) + + def test_successful_push_task_res_with_metadata(self) -> None: + """Test server interceptor for push task res.""" + # Prepare + node_id = self.state.create_node( + ping_interval=30, public_key=public_key_to_bytes(self._client_public_key) + ) + request = PushTaskResRequest( + task_res_list=[TaskRes(task=Task(producer=Node(node_id=node_id)))] + ) + shared_secret = generate_shared_key( + self._client_private_key, self._server_public_key + ) + hmac_value = base64.urlsafe_b64encode( + compute_hmac(shared_secret, request.SerializeToString(True)) + ) + public_key_bytes = base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) + ) + + # Execute + response, call = self._push_task_res.with_call( + request=request, + metadata=( + (_PUBLIC_KEY_HEADER, public_key_bytes), + (_AUTH_TOKEN_HEADER, hmac_value), + ), + ) + + # Assert + assert isinstance(response, PushTaskResResponse) + assert grpc.StatusCode.OK == call.code() + + def test_unsuccessful_push_task_res_with_metadata(self) -> None: + """Test server interceptor for push task res unsuccessfully.""" + # Prepare + node_id = self.state.create_node( + ping_interval=30, public_key=public_key_to_bytes(self._client_public_key) + ) + request = PushTaskResRequest( + task_res_list=[TaskRes(task=Task(producer=Node(node_id=node_id)))] + ) + client_private_key, _ = generate_key_pairs() + shared_secret = generate_shared_key(client_private_key, self._server_public_key) + hmac_value = base64.urlsafe_b64encode( + compute_hmac(shared_secret, request.SerializeToString(True)) + ) + public_key_bytes = base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) + ) + + # Execute & Assert + with self.assertRaises(grpc.RpcError): + self._push_task_res.with_call( + request=request, + metadata=( + (_PUBLIC_KEY_HEADER, public_key_bytes), + (_AUTH_TOKEN_HEADER, hmac_value), + ), + ) + + def test_successful_get_run_with_metadata(self) -> None: + """Test server interceptor for pull task ins.""" + # Prepare + self.state.create_node( + ping_interval=30, public_key=public_key_to_bytes(self._client_public_key) + ) + run_id = self.state.create_run("", "") + request = GetRunRequest(run_id=run_id) + shared_secret = generate_shared_key( + self._client_private_key, self._server_public_key + ) + hmac_value = base64.urlsafe_b64encode( + compute_hmac(shared_secret, request.SerializeToString(True)) + ) + public_key_bytes = base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) + ) + + # Execute + response, call = self._get_run.with_call( + request=request, + metadata=( + (_PUBLIC_KEY_HEADER, public_key_bytes), + (_AUTH_TOKEN_HEADER, hmac_value), + ), + ) + + # Assert + assert isinstance(response, GetRunResponse) + assert grpc.StatusCode.OK == call.code() + + def test_unsuccessful_get_run_with_metadata(self) -> None: + """Test server interceptor for pull task ins unsuccessfully.""" + # Prepare + self.state.create_node( + ping_interval=30, public_key=public_key_to_bytes(self._client_public_key) + ) + run_id = self.state.create_run("", "") + request = GetRunRequest(run_id=run_id) + client_private_key, _ = generate_key_pairs() + shared_secret = generate_shared_key(client_private_key, self._server_public_key) + hmac_value = base64.urlsafe_b64encode( + compute_hmac(shared_secret, request.SerializeToString(True)) + ) + public_key_bytes = base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) + ) + + # Execute & Assert + with self.assertRaises(grpc.RpcError): + self._get_run.with_call( + request=request, + metadata=( + (_PUBLIC_KEY_HEADER, public_key_bytes), + (_AUTH_TOKEN_HEADER, hmac_value), + ), + ) + + def test_successful_ping_with_metadata(self) -> None: + """Test server interceptor for pull task ins.""" + # Prepare + node_id = self.state.create_node( + ping_interval=30, public_key=public_key_to_bytes(self._client_public_key) + ) + request = PingRequest(node=Node(node_id=node_id)) + shared_secret = generate_shared_key( + self._client_private_key, self._server_public_key + ) + hmac_value = base64.urlsafe_b64encode( + compute_hmac(shared_secret, request.SerializeToString(True)) + ) + public_key_bytes = base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) + ) + + # Execute + response, call = self._ping.with_call( + request=request, + metadata=( + (_PUBLIC_KEY_HEADER, public_key_bytes), + (_AUTH_TOKEN_HEADER, hmac_value), + ), + ) + + # Assert + assert isinstance(response, PingResponse) + assert grpc.StatusCode.OK == call.code() + + def test_unsuccessful_ping_with_metadata(self) -> None: + """Test server interceptor for pull task ins unsuccessfully.""" + # Prepare + node_id = self.state.create_node( + ping_interval=30, public_key=public_key_to_bytes(self._client_public_key) + ) + request = PingRequest(node=Node(node_id=node_id)) + client_private_key, _ = generate_key_pairs() + shared_secret = generate_shared_key(client_private_key, self._server_public_key) + hmac_value = base64.urlsafe_b64encode( + compute_hmac(shared_secret, request.SerializeToString(True)) + ) + public_key_bytes = base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) + ) + + # Execute & Assert + with self.assertRaises(grpc.RpcError): + self._ping.with_call( + request=request, + metadata=( + (_PUBLIC_KEY_HEADER, public_key_bytes), + (_AUTH_TOKEN_HEADER, hmac_value), + ), + ) + + def test_successful_restore_node(self) -> None: + """Test server interceptor for restoring node.""" + public_key_bytes = base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) + ) + response, call = self._create_node.with_call( + request=CreateNodeRequest(), + metadata=((_PUBLIC_KEY_HEADER, public_key_bytes),), + ) + + expected_metadata = ( + _PUBLIC_KEY_HEADER, + base64.urlsafe_b64encode( + public_key_to_bytes(self._server_public_key) + ).decode(), + ) + + node = response.node + client_node_id = node.node_id + + assert call.initial_metadata()[0] == expected_metadata + assert isinstance(response, CreateNodeResponse) + + request = DeleteNodeRequest(node=node) + shared_secret = generate_shared_key( + self._client_private_key, self._server_public_key + ) + hmac_value = base64.urlsafe_b64encode( + compute_hmac(shared_secret, request.SerializeToString(True)) + ) + public_key_bytes = base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) + ) + response, call = self._delete_node.with_call( + request=request, + metadata=( + (_PUBLIC_KEY_HEADER, public_key_bytes), + (_AUTH_TOKEN_HEADER, hmac_value), + ), + ) + + assert isinstance(response, DeleteNodeResponse) + assert grpc.StatusCode.OK == call.code() + + public_key_bytes = base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) + ) + response, call = self._create_node.with_call( + request=CreateNodeRequest(), + metadata=((_PUBLIC_KEY_HEADER, public_key_bytes),), + ) + + expected_metadata = ( + _PUBLIC_KEY_HEADER, + base64.urlsafe_b64encode( + public_key_to_bytes(self._server_public_key) + ).decode(), + ) + + assert call.initial_metadata()[0] == expected_metadata + assert isinstance(response, CreateNodeResponse) + assert response.node.node_id == client_node_id diff --git a/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py b/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py index 2e696dde78e1..83b005a4cb8e 100644 --- a/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py +++ b/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py @@ -15,6 +15,7 @@ """Fleet API message handlers.""" +import time from typing import List, Optional from uuid import UUID @@ -23,6 +24,8 @@ CreateNodeResponse, DeleteNodeRequest, DeleteNodeResponse, + GetRunRequest, + GetRunResponse, PingRequest, PingResponse, PullTaskInsRequest, @@ -30,6 +33,7 @@ PushTaskResRequest, PushTaskResResponse, Reconnect, + Run, ) from flwr.proto.node_pb2 import Node # pylint: disable=E0611 from flwr.proto.task_pb2 import TaskIns, TaskRes # pylint: disable=E0611 @@ -42,7 +46,7 @@ def create_node( ) -> CreateNodeResponse: """.""" # Create node - node_id = state.create_node() + node_id = state.create_node(ping_interval=request.ping_interval) return CreateNodeResponse(node=Node(node_id=node_id, anonymous=False)) @@ -62,7 +66,8 @@ def ping( state: State, # pylint: disable=unused-argument ) -> PingResponse: """.""" - return PingResponse(success=True) + res = state.acknowledge_ping(request.node.node_id, request.ping_interval) + return PingResponse(success=res) def pull_task_ins(request: PullTaskInsRequest, state: State) -> PullTaskInsResponse: @@ -87,6 +92,9 @@ def push_task_res(request: PushTaskResRequest, state: State) -> PushTaskResRespo task_res: TaskRes = request.task_res_list[0] # pylint: enable=no-member + # Set pushed_at (timestamp in seconds) + task_res.task.pushed_at = time.time() + # Store TaskRes in State task_id: Optional[UUID] = state.store_task_res(task_res=task_res) @@ -96,3 +104,12 @@ def push_task_res(request: PushTaskResRequest, state: State) -> PushTaskResRespo results={str(task_id): 0}, ) return response + + +def get_run( + request: GetRunRequest, state: State # pylint: disable=W0613 +) -> GetRunResponse: + """Get run information.""" + run_id, fab_id, fab_version = state.get_run(request.run_id) + run = Run(run_id=run_id, fab_id=fab_id, fab_version=fab_version) + return GetRunResponse(run=run) diff --git a/src/py/flwr/server/superlink/fleet/rest_rere/rest_api.py b/src/py/flwr/server/superlink/fleet/rest_rere/rest_api.py index b022b34c68c8..8ac7c6cfc613 100644 --- a/src/py/flwr/server/superlink/fleet/rest_rere/rest_api.py +++ b/src/py/flwr/server/superlink/fleet/rest_rere/rest_api.py @@ -21,6 +21,8 @@ from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 CreateNodeRequest, DeleteNodeRequest, + GetRunRequest, + PingRequest, PullTaskInsRequest, PushTaskResRequest, ) @@ -152,11 +154,67 @@ async def push_task_res(request: Request) -> Response: # Check if token is need ) +async def ping(request: Request) -> Response: + """Ping.""" + _check_headers(request.headers) + + # Get the request body as raw bytes + ping_request_bytes: bytes = await request.body() + + # Deserialize ProtoBuf + ping_request_proto = PingRequest() + ping_request_proto.ParseFromString(ping_request_bytes) + + # Get state from app + state: State = app.state.STATE_FACTORY.state() + + # Handle message + ping_response_proto = message_handler.ping(request=ping_request_proto, state=state) + + # Return serialized ProtoBuf + ping_response_bytes = ping_response_proto.SerializeToString() + return Response( + status_code=200, + content=ping_response_bytes, + headers={"Content-Type": "application/protobuf"}, + ) + + +async def get_run(request: Request) -> Response: + """GetRun.""" + _check_headers(request.headers) + + # Get the request body as raw bytes + get_run_request_bytes: bytes = await request.body() + + # Deserialize ProtoBuf + get_run_request_proto = GetRunRequest() + get_run_request_proto.ParseFromString(get_run_request_bytes) + + # Get state from app + state: State = app.state.STATE_FACTORY.state() + + # Handle message + get_run_response_proto = message_handler.get_run( + request=get_run_request_proto, state=state + ) + + # Return serialized ProtoBuf + get_run_response_bytes = get_run_response_proto.SerializeToString() + return Response( + status_code=200, + content=get_run_response_bytes, + headers={"Content-Type": "application/protobuf"}, + ) + + routes = [ Route("/api/v0/fleet/create-node", create_node, methods=["POST"]), Route("/api/v0/fleet/delete-node", delete_node, methods=["POST"]), Route("/api/v0/fleet/pull-task-ins", pull_task_ins, methods=["POST"]), Route("/api/v0/fleet/push-task-res", push_task_res, methods=["POST"]), + Route("/api/v0/fleet/ping", ping, methods=["POST"]), + Route("/api/v0/fleet/get-run", get_run, methods=["POST"]), ] app: Starlette = Starlette( diff --git a/src/py/flwr/server/superlink/fleet/vce/backend/raybackend.py b/src/py/flwr/server/superlink/fleet/vce/backend/raybackend.py index 8ef0d54622ae..93aca583af9c 100644 --- a/src/py/flwr/server/superlink/fleet/vce/backend/raybackend.py +++ b/src/py/flwr/server/superlink/fleet/vce/backend/raybackend.py @@ -15,12 +15,12 @@ """Ray backend for the Fleet API using the Simulation Engine.""" import pathlib -from logging import ERROR, INFO +from logging import DEBUG, ERROR, WARNING from typing import Callable, Dict, List, Tuple, Union import ray -from flwr.client.client_app import ClientApp, LoadClientAppError +from flwr.client.client_app import ClientApp from flwr.common.context import Context from flwr.common.logger import log from flwr.common.message import Message @@ -45,8 +45,8 @@ def __init__( work_dir: str, ) -> None: """Prepare RayBackend by initialising Ray and creating the ActorPool.""" - log(INFO, "Initialising: %s", self.__class__.__name__) - log(INFO, "Backend config: %s", backend_config) + log(DEBUG, "Initialising: %s", self.__class__.__name__) + log(DEBUG, "Backend config: %s", backend_config) if not pathlib.Path(work_dir).exists(): raise ValueError(f"Specified work_dir {work_dir} does not exist.") @@ -55,7 +55,15 @@ def __init__( runtime_env = ( self._configure_runtime_env(work_dir=work_dir) if work_dir else None ) - init_ray(runtime_env=runtime_env) + + if backend_config.get("mute_logging", False): + init_ray( + logging_level=WARNING, log_to_driver=False, runtime_env=runtime_env + ) + elif backend_config.get("silent", False): + init_ray(logging_level=WARNING, log_to_driver=True, runtime_env=runtime_env) + else: + init_ray(runtime_env=runtime_env) # Validate client resources self.client_resources_key = "client_resources" @@ -109,7 +117,7 @@ def _validate_client_resources(self, config: BackendConfig) -> ClientResourcesDi else: client_resources = {"num_cpus": 2, "num_gpus": 0.0} log( - INFO, + DEBUG, "`%s` not specified in backend config. Applying default setting: %s", self.client_resources_key, client_resources, @@ -129,7 +137,7 @@ def is_worker_idle(self) -> bool: async def build(self) -> None: """Build pool of Ray actors that this backend will submit jobs to.""" await self.pool.add_actors_to_pool(self.pool.actors_capacity) - log(INFO, "Constructed ActorPool with: %i actors", self.pool.num_actors) + log(DEBUG, "Constructed ActorPool with: %i actors", self.pool.num_actors) async def process_message( self, @@ -151,7 +159,6 @@ async def process_message( ) await future - # Fetch result ( out_mssg, @@ -160,16 +167,18 @@ async def process_message( return out_mssg, updated_context - except LoadClientAppError as load_ex: + except Exception as ex: log( ERROR, "An exception was raised when processing a message by %s", self.__class__.__name__, ) - raise load_ex + # add actor back into pool + await self.pool.add_actor_back_to_pool(future) + raise ex async def terminate(self) -> None: """Terminate all actors in actor pool.""" await self.pool.terminate_all_actors() ray.shutdown() - log(INFO, "Terminated %s", self.__class__.__name__) + log(DEBUG, "Terminated %s", self.__class__.__name__) diff --git a/src/py/flwr/server/superlink/fleet/vce/vce_api.py b/src/py/flwr/server/superlink/fleet/vce/vce_api.py index a693c968d0e8..cc3e85b28097 100644 --- a/src/py/flwr/server/superlink/fleet/vce/vce_api.py +++ b/src/py/flwr/server/superlink/fleet/vce/vce_api.py @@ -14,16 +14,19 @@ # ============================================================================== """Fleet Simulation Engine API.""" - import asyncio import json +import sys +import time import traceback from logging import DEBUG, ERROR, INFO, WARN from typing import Callable, Dict, List, Optional -from flwr.client.client_app import ClientApp, LoadClientAppError +from flwr.client.client_app import ClientApp, ClientAppException, LoadClientAppError from flwr.client.node_state import NodeState +from flwr.common.constant import PING_MAX_INTERVAL, ErrorCode from flwr.common.logger import log +from flwr.common.message import Error from flwr.common.object_ref import load_app from flwr.common.serde import message_from_taskins, message_to_taskres from flwr.proto.task_pb2 import TaskIns # pylint: disable=E0611 @@ -41,9 +44,9 @@ def _register_nodes( nodes_mapping: NodeToPartitionMapping = {} state = state_factory.state() for i in range(num_nodes): - node_id = state.create_node() + node_id = state.create_node(ping_interval=PING_MAX_INTERVAL) nodes_mapping[node_id] = i - log(INFO, "Registered %i nodes", len(nodes_mapping)) + log(DEBUG, "Registered %i nodes", len(nodes_mapping)) return nodes_mapping @@ -59,6 +62,7 @@ async def worker( """Get TaskIns from queue and pass it to an actor in the pool to execute it.""" state = state_factory.state() while True: + out_mssg = None try: task_ins: TaskIns = await queue.get() node_id = task_ins.task.consumer.node_id @@ -82,24 +86,34 @@ async def worker( task_ins.run_id, context=updated_context ) - # Convert to TaskRes - task_res = message_to_taskres(out_mssg) - # Store TaskRes in state - state.store_task_res(task_res) - except asyncio.CancelledError as e: - log(DEBUG, "Async worker: %s", e) + log(DEBUG, "Terminating async worker: %s", e) break - except LoadClientAppError as app_ex: - log(ERROR, "Async worker: %s", app_ex) - log(ERROR, traceback.format_exc()) - raise - + # Exceptions aren't raised but reported as an error message except Exception as ex: # pylint: disable=broad-exception-caught log(ERROR, ex) log(ERROR, traceback.format_exc()) - break + + if isinstance(ex, ClientAppException): + e_code = ErrorCode.CLIENT_APP_RAISED_EXCEPTION + elif isinstance(ex, LoadClientAppError): + e_code = ErrorCode.LOAD_CLIENT_APP_EXCEPTION + else: + e_code = ErrorCode.UNKNOWN + + reason = str(type(ex)) + ":<'" + str(ex) + "'>" + out_mssg = message.create_error_reply( + error=Error(code=e_code, reason=reason) + ) + + finally: + if out_mssg: + # Convert to TaskRes + task_res = message_to_taskres(out_mssg) + # Store TaskRes in state + task_res.task.pushed_at = time.time() + state.store_task_res(task_res) async def add_taskins_to_queue( @@ -218,7 +232,8 @@ async def run( await backend.terminate() -# pylint: disable=too-many-arguments,unused-argument,too-many-locals +# pylint: disable=too-many-arguments,unused-argument,too-many-locals,too-many-branches +# pylint: disable=too-many-statements def start_vce( backend_name: str, backend_config_json_stream: str, @@ -278,7 +293,7 @@ def start_vce( node_states[node_id] = NodeState() # Load backend config - log(INFO, "Supported backends: %s", list(supported_backends.keys())) + log(DEBUG, "Supported backends: %s", list(supported_backends.keys())) backend_config = json.loads(backend_config_json_stream) try: @@ -300,12 +315,14 @@ def backend_fn() -> Backend: """Instantiate a Backend.""" return backend_type(backend_config, work_dir=app_dir) - log(INFO, "client_app_attr = %s", client_app_attr) - # Load ClientApp if needed def _load() -> ClientApp: if client_app_attr: + + if app_dir is not None: + sys.path.insert(0, app_dir) + app: ClientApp = load_app(client_app_attr, LoadClientAppError) if not isinstance(app, ClientApp): @@ -319,13 +336,30 @@ def _load() -> ClientApp: app_fn = _load - asyncio.run( - run( - app_fn, - backend_fn, - nodes_mapping, - state_factory, - node_states, - f_stop, + try: + # Test if ClientApp can be loaded + _ = app_fn() + + # Run main simulation loop + asyncio.run( + run( + app_fn, + backend_fn, + nodes_mapping, + state_factory, + node_states, + f_stop, + ) ) - ) + except LoadClientAppError as loadapp_ex: + f_stop_delay = 10 + log( + ERROR, + "LoadClientAppError exception encountered. Terminating simulation in %is", + f_stop_delay, + ) + time.sleep(f_stop_delay) + f_stop.set() # set termination event + raise loadapp_ex + except Exception as ex: + raise ex diff --git a/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py b/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py index 2c917c3eed27..1da726f88f1e 100644 --- a/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py +++ b/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py @@ -17,6 +17,7 @@ import asyncio import threading +import time from itertools import cycle from json import JSONDecodeError from math import pi @@ -26,6 +27,7 @@ from unittest import IsolatedAsyncioTestCase from uuid import UUID +from flwr.client.client_app import LoadClientAppError from flwr.common import ( DEFAULT_TTL, GetPropertiesIns, @@ -52,7 +54,6 @@ def terminate_simulation(f_stop: asyncio.Event, sleep_duration: int) -> None: def init_state_factory_nodes_mapping( num_nodes: int, num_messages: int, - erroneous_message: Optional[bool] = False, ) -> Tuple[StateFactory, NodeToPartitionMapping, Dict[UUID, float]]: """Instatiate StateFactory, register nodes and pre-insert messages in the state.""" # Register a state and a run_id in it @@ -67,7 +68,6 @@ def init_state_factory_nodes_mapping( nodes_mapping=nodes_mapping, run_id=run_id, num_messages=num_messages, - erroneous_message=erroneous_message, ) return state_factory, nodes_mapping, expected_results @@ -78,11 +78,10 @@ def register_messages_into_state( nodes_mapping: NodeToPartitionMapping, run_id: int, num_messages: int, - erroneous_message: Optional[bool] = False, ) -> Dict[UUID, float]: """Register `num_messages` into the state factory.""" state: InMemoryState = state_factory.state() # type: ignore - state.run_ids.add(run_id) + state.run_ids[run_id] = ("Mock/mock", "v1.0.0") # Artificially add TaskIns to state so they can be processed # by the Simulation Engine logic nodes_cycle = cycle(nodes_mapping.keys()) # we have more messages than supernodes @@ -104,15 +103,14 @@ def register_messages_into_state( dst_node_id=dst_node_id, # indicate destination node reply_to_message="", ttl=DEFAULT_TTL, - message_type=( - "a bad message" - if erroneous_message - else MessageTypeLegacy.GET_PROPERTIES - ), + message_type=MessageTypeLegacy.GET_PROPERTIES, ), ) # Convert Message to TaskIns taskins = message_to_taskins(message) + # Normally recorded by the driver servicer + # but since we don't have one in this test, we do this manually + taskins.task.pushed_at = time.time() # Instert in state task_id = state.store_task_ins(taskins) if task_id: @@ -196,32 +194,13 @@ def test_erroneous_client_app_attr(self) -> None: state_factory, nodes_mapping, _ = init_state_factory_nodes_mapping( num_nodes=num_nodes, num_messages=num_messages ) - with self.assertRaises(RuntimeError): + with self.assertRaises(LoadClientAppError): start_and_shutdown( client_app_attr="totally_fictitious_app:client", state_factory=state_factory, nodes_mapping=nodes_mapping, ) - def test_erroneous_messages(self) -> None: - """Test handling of error in async worker (consumer). - - We register messages which will trigger an error when handling, triggering an - error. - """ - num_messages = 100 - num_nodes = 59 - - state_factory, nodes_mapping, _ = init_state_factory_nodes_mapping( - num_nodes=num_nodes, num_messages=num_messages, erroneous_message=True - ) - - with self.assertRaises(RuntimeError): - start_and_shutdown( - state_factory=state_factory, - nodes_mapping=nodes_mapping, - ) - def test_erroneous_backend_config(self) -> None: """Backend Config should be a JSON stream.""" with self.assertRaises(JSONDecodeError): diff --git a/src/py/flwr/server/superlink/state/in_memory_state.py b/src/py/flwr/server/superlink/state/in_memory_state.py index 7bff8ab4befc..f86bf79d9dfa 100644 --- a/src/py/flwr/server/superlink/state/in_memory_state.py +++ b/src/py/flwr/server/superlink/state/in_memory_state.py @@ -17,9 +17,9 @@ import os import threading -from datetime import datetime +import time from logging import ERROR -from typing import Dict, List, Optional, Set +from typing import Dict, List, Optional, Set, Tuple from uuid import UUID, uuid4 from flwr.common import log, now @@ -27,15 +27,27 @@ from flwr.server.superlink.state.state import State from flwr.server.utils import validate_task_ins_or_res +from .utils import make_node_unavailable_taskres -class InMemoryState(State): + +class InMemoryState(State): # pylint: disable=R0902,R0904 """In-memory State implementation.""" def __init__(self) -> None: - self.node_ids: Set[int] = set() - self.run_ids: Set[int] = set() + + # Map node_id to (online_until, ping_interval) + self.node_ids: Dict[int, Tuple[float, float]] = {} + self.public_key_to_node_id: Dict[bytes, int] = {} + + # Map run_id to (fab_id, fab_version) + self.run_ids: Dict[int, Tuple[str, str]] = {} self.task_ins_store: Dict[UUID, TaskIns] = {} self.task_res_store: Dict[UUID, TaskRes] = {} + + self.client_public_keys: Set[bytes] = set() + self.server_public_key: Optional[bytes] = None + self.server_private_key: Optional[bytes] = None + self.lock = threading.Lock() def store_task_ins(self, task_ins: TaskIns) -> Optional[UUID]: @@ -50,13 +62,11 @@ def store_task_ins(self, task_ins: TaskIns) -> Optional[UUID]: log(ERROR, "`run_id` is invalid") return None - # Create task_id and created_at + # Create task_id task_id = uuid4() - created_at: datetime = now() # Store TaskIns task_ins.task_id = str(task_id) - task_ins.task.created_at = created_at.isoformat() with self.lock: self.task_ins_store[task_id] = task_ins @@ -111,13 +121,11 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: log(ERROR, "`run_id` is invalid") return None - # Create task_id and created_at + # Create task_id task_id = uuid4() - created_at: datetime = now() # Store TaskRes task_res.task_id = str(task_id) - task_res.task.created_at = created_at.isoformat() with self.lock: self.task_res_store[task_id] = task_res @@ -132,14 +140,31 @@ def get_task_res(self, task_ids: Set[UUID], limit: Optional[int]) -> List[TaskRe with self.lock: # Find TaskRes that were not delivered yet task_res_list: List[TaskRes] = [] + replied_task_ids: Set[UUID] = set() for _, task_res in self.task_res_store.items(): - if ( - UUID(task_res.task.ancestry[0]) in task_ids - and task_res.task.delivered_at == "" - ): + reply_to = UUID(task_res.task.ancestry[0]) + if reply_to in task_ids and task_res.task.delivered_at == "": task_res_list.append(task_res) + replied_task_ids.add(reply_to) + if limit and len(task_res_list) == limit: + break + + # Check if the node is offline + for task_id in task_ids - replied_task_ids: if limit and len(task_res_list) == limit: break + task_ins = self.task_ins_store.get(task_id) + if task_ins is None: + continue + node_id = task_ins.task.consumer.node_id + online_until, _ = self.node_ids[node_id] + # Generate a TaskRes containing an error reply if the node is offline. + if online_until < time.time(): + err_taskres = make_node_unavailable_taskres( + ref_taskins=task_ins, + ) + self.task_res_store[UUID(err_taskres.task_id)] = err_taskres + task_res_list.append(err_taskres) # Mark all of them as delivered delivered_at = now().isoformat() @@ -185,22 +210,47 @@ def num_task_res(self) -> int: """ return len(self.task_res_store) - def create_node(self) -> int: + def create_node( + self, ping_interval: float, public_key: Optional[bytes] = None + ) -> int: """Create, store in state, and return `node_id`.""" # Sample a random int64 as node_id node_id: int = int.from_bytes(os.urandom(8), "little", signed=True) - if node_id not in self.node_ids: - self.node_ids.add(node_id) + with self.lock: + if node_id in self.node_ids: + log(ERROR, "Unexpected node registration failure.") + return 0 + + if public_key is not None: + if ( + public_key in self.public_key_to_node_id + or node_id in self.public_key_to_node_id.values() + ): + log(ERROR, "Unexpected node registration failure.") + return 0 + + self.public_key_to_node_id[public_key] = node_id + + self.node_ids[node_id] = (time.time() + ping_interval, ping_interval) return node_id - log(ERROR, "Unexpected node registration failure.") - return 0 - def delete_node(self, node_id: int) -> None: + def delete_node(self, node_id: int, public_key: Optional[bytes] = None) -> None: """Delete a client node.""" - if node_id not in self.node_ids: - raise ValueError(f"Node {node_id} not found") - self.node_ids.remove(node_id) + with self.lock: + if node_id not in self.node_ids: + raise ValueError(f"Node {node_id} not found") + + if public_key is not None: + if ( + public_key not in self.public_key_to_node_id + or node_id not in self.public_key_to_node_id.values() + ): + raise ValueError("Public key or node_id not found") + + del self.public_key_to_node_id[public_key] + + del self.node_ids[node_id] def get_nodes(self, run_id: int) -> Set[int]: """Return all available client nodes. @@ -210,17 +260,77 @@ def get_nodes(self, run_id: int) -> Set[int]: If the provided `run_id` does not exist or has no matching nodes, an empty `Set` MUST be returned. """ - if run_id not in self.run_ids: - return set() - return self.node_ids - - def create_run(self) -> int: - """Create one run.""" + with self.lock: + if run_id not in self.run_ids: + return set() + current_time = time.time() + return { + node_id + for node_id, (online_until, _) in self.node_ids.items() + if online_until > current_time + } + + def get_node_id(self, client_public_key: bytes) -> Optional[int]: + """Retrieve stored `node_id` filtered by `client_public_keys`.""" + return self.public_key_to_node_id.get(client_public_key) + + def create_run(self, fab_id: str, fab_version: str) -> int: + """Create a new run for the specified `fab_id` and `fab_version`.""" # Sample a random int64 as run_id - run_id: int = int.from_bytes(os.urandom(8), "little", signed=True) + with self.lock: + run_id: int = int.from_bytes(os.urandom(8), "little", signed=True) - if run_id not in self.run_ids: - self.run_ids.add(run_id) - return run_id + if run_id not in self.run_ids: + self.run_ids[run_id] = (fab_id, fab_version) + return run_id log(ERROR, "Unexpected run creation failure.") return 0 + + def store_server_private_public_key( + self, private_key: bytes, public_key: bytes + ) -> None: + """Store `server_private_key` and `server_public_key` in state.""" + with self.lock: + if self.server_private_key is None and self.server_public_key is None: + self.server_private_key = private_key + self.server_public_key = public_key + else: + raise RuntimeError("Server private and public key already set") + + def get_server_private_key(self) -> Optional[bytes]: + """Retrieve `server_private_key` in urlsafe bytes.""" + return self.server_private_key + + def get_server_public_key(self) -> Optional[bytes]: + """Retrieve `server_public_key` in urlsafe bytes.""" + return self.server_public_key + + def store_client_public_keys(self, public_keys: Set[bytes]) -> None: + """Store a set of `client_public_keys` in state.""" + with self.lock: + self.client_public_keys = public_keys + + def store_client_public_key(self, public_key: bytes) -> None: + """Store a `client_public_key` in state.""" + with self.lock: + self.client_public_keys.add(public_key) + + def get_client_public_keys(self) -> Set[bytes]: + """Retrieve all currently stored `client_public_keys` as a set.""" + return self.client_public_keys + + def get_run(self, run_id: int) -> Tuple[int, str, str]: + """Retrieve information about the run with the specified `run_id`.""" + with self.lock: + if run_id not in self.run_ids: + log(ERROR, "`run_id` is invalid") + return 0, "", "" + return run_id, *self.run_ids[run_id] + + def acknowledge_ping(self, node_id: int, ping_interval: float) -> bool: + """Acknowledge a ping received from a node, serving as a heartbeat.""" + with self.lock: + if node_id in self.node_ids: + self.node_ids[node_id] = (time.time() + ping_interval, ping_interval) + return True + return False diff --git a/src/py/flwr/server/superlink/state/sqlite_state.py b/src/py/flwr/server/superlink/state/sqlite_state.py index 25d138f94203..acf2054f08b6 100644 --- a/src/py/flwr/server/superlink/state/sqlite_state.py +++ b/src/py/flwr/server/superlink/state/sqlite_state.py @@ -18,9 +18,9 @@ import os import re import sqlite3 -from datetime import datetime +import time from logging import DEBUG, ERROR -from typing import Any, Dict, List, Optional, Set, Tuple, Union, cast +from typing import Any, Dict, List, Optional, Sequence, Set, Tuple, Union, cast from uuid import UUID, uuid4 from flwr.common import log, now @@ -30,16 +30,39 @@ from flwr.server.utils.validator import validate_task_ins_or_res from .state import State +from .utils import make_node_unavailable_taskres SQL_CREATE_TABLE_NODE = """ CREATE TABLE IF NOT EXISTS node( - node_id INTEGER UNIQUE + node_id INTEGER UNIQUE, + online_until REAL, + ping_interval REAL, + public_key BLOB ); """ +SQL_CREATE_TABLE_CREDENTIAL = """ +CREATE TABLE IF NOT EXISTS credential( + private_key BLOB PRIMARY KEY, + public_key BLOB +); +""" + +SQL_CREATE_TABLE_PUBLIC_KEY = """ +CREATE TABLE IF NOT EXISTS public_key( + public_key BLOB UNIQUE +); +""" + +SQL_CREATE_INDEX_ONLINE_UNTIL = """ +CREATE INDEX IF NOT EXISTS idx_online_until ON node (online_until); +""" + SQL_CREATE_TABLE_RUN = """ CREATE TABLE IF NOT EXISTS run( - run_id INTEGER UNIQUE + run_id INTEGER UNIQUE, + fab_id TEXT, + fab_version TEXT ); """ @@ -52,8 +75,9 @@ producer_node_id INTEGER, consumer_anonymous BOOLEAN, consumer_node_id INTEGER, - created_at TEXT, + created_at REAL, delivered_at TEXT, + pushed_at REAL, ttl REAL, ancestry TEXT, task_type TEXT, @@ -62,7 +86,6 @@ ); """ - SQL_CREATE_TABLE_TASK_RES = """ CREATE TABLE IF NOT EXISTS task_res( task_id TEXT UNIQUE, @@ -72,8 +95,9 @@ producer_node_id INTEGER, consumer_anonymous BOOLEAN, consumer_node_id INTEGER, - created_at TEXT, + created_at REAL, delivered_at TEXT, + pushed_at REAL, ttl REAL, ancestry TEXT, task_type TEXT, @@ -82,10 +106,10 @@ ); """ -DictOrTuple = Union[Tuple[Any], Dict[str, Any]] +DictOrTuple = Union[Tuple[Any, ...], Dict[str, Any]] -class SqliteState(State): +class SqliteState(State): # pylint: disable=R0904 """SQLite-based state implementation.""" def __init__( @@ -123,6 +147,9 @@ def initialize(self, log_queries: bool = False) -> List[Tuple[str]]: cur.execute(SQL_CREATE_TABLE_TASK_INS) cur.execute(SQL_CREATE_TABLE_TASK_RES) cur.execute(SQL_CREATE_TABLE_NODE) + cur.execute(SQL_CREATE_TABLE_CREDENTIAL) + cur.execute(SQL_CREATE_TABLE_PUBLIC_KEY) + cur.execute(SQL_CREATE_INDEX_ONLINE_UNTIL) res = cur.execute("SELECT name FROM sqlite_schema;") return res.fetchall() @@ -130,7 +157,7 @@ def initialize(self, log_queries: bool = False) -> List[Tuple[str]]: def query( self, query: str, - data: Optional[Union[List[DictOrTuple], DictOrTuple]] = None, + data: Optional[Union[Sequence[DictOrTuple], DictOrTuple]] = None, ) -> List[Dict[str, Any]]: """Execute a SQL query.""" if self.conn is None: @@ -185,13 +212,11 @@ def store_task_ins(self, task_ins: TaskIns) -> Optional[UUID]: log(ERROR, errors) return None - # Create task_id and created_at + # Create task_id task_id = uuid4() - created_at: datetime = now() # Store TaskIns task_ins.task_id = str(task_id) - task_ins.task.created_at = created_at.isoformat() data = (task_ins_to_dict(task_ins),) columns = ", ".join([f":{key}" for key in data[0]]) query = f"INSERT INTO task_ins VALUES({columns});" @@ -318,13 +343,11 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: log(ERROR, errors) return None - # Create task_id and created_at + # Create task_id task_id = uuid4() - created_at: datetime = now() # Store TaskIns task_res.task_id = str(task_id) - task_res.task.created_at = created_at.isoformat() data = (task_res_to_dict(task_res),) columns = ", ".join([f":{key}" for key in data[0]]) query = f"INSERT INTO task_res VALUES({columns});" @@ -339,6 +362,7 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: return task_id + # pylint: disable-next=R0914 def get_task_res(self, task_ids: Set[UUID], limit: Optional[int]) -> List[TaskRes]: """Get TaskRes for task_ids. @@ -369,7 +393,7 @@ def get_task_res(self, task_ids: Set[UUID], limit: Optional[int]) -> List[TaskRe AND delivered_at = "" """ - data: Dict[str, Union[str, int]] = {} + data: Dict[str, Union[str, float, int]] = {} if limit is not None: query += " LIMIT :limit" @@ -403,6 +427,54 @@ def get_task_res(self, task_ids: Set[UUID], limit: Optional[int]) -> List[TaskRe rows = self.query(query, data) result = [dict_to_task_res(row) for row in rows] + + # 1. Query: Fetch consumer_node_id of remaining task_ids + # Assume the ancestry field only contains one element + data.clear() + replied_task_ids: Set[UUID] = {UUID(str(row["ancestry"])) for row in rows} + remaining_task_ids = task_ids - replied_task_ids + placeholders = ",".join([f":id_{i}" for i in range(len(remaining_task_ids))]) + query = f""" + SELECT consumer_node_id + FROM task_ins + WHERE task_id IN ({placeholders}); + """ + for index, task_id in enumerate(remaining_task_ids): + data[f"id_{index}"] = str(task_id) + node_ids = [int(row["consumer_node_id"]) for row in self.query(query, data)] + + # 2. Query: Select offline nodes + placeholders = ",".join([f":id_{i}" for i in range(len(node_ids))]) + query = f""" + SELECT node_id + FROM node + WHERE node_id IN ({placeholders}) + AND online_until < :time; + """ + data = {f"id_{i}": str(node_id) for i, node_id in enumerate(node_ids)} + data["time"] = time.time() + offline_node_ids = [int(row["node_id"]) for row in self.query(query, data)] + + # 3. Query: Select TaskIns for offline nodes + placeholders = ",".join([f":id_{i}" for i in range(len(offline_node_ids))]) + query = f""" + SELECT * + FROM task_ins + WHERE consumer_node_id IN ({placeholders}); + """ + data = {f"id_{i}": str(node_id) for i, node_id in enumerate(offline_node_ids)} + task_ins_rows = self.query(query, data) + + # Make TaskRes containing node unavailabe error + for row in task_ins_rows: + if limit and len(result) == limit: + break + task_ins = dict_to_task_ins(row) + err_taskres = make_node_unavailable_taskres( + ref_taskins=task_ins, + ) + result.append(err_taskres) + return result def num_task_ins(self) -> int: @@ -463,23 +535,54 @@ def delete_tasks(self, task_ids: Set[UUID]) -> None: return None - def create_node(self) -> int: + def create_node( + self, ping_interval: float, public_key: Optional[bytes] = None + ) -> int: """Create, store in state, and return `node_id`.""" # Sample a random int64 as node_id node_id: int = int.from_bytes(os.urandom(8), "little", signed=True) - query = "INSERT INTO node VALUES(:node_id);" + query = "SELECT node_id FROM node WHERE public_key = :public_key;" + row = self.query(query, {"public_key": public_key}) + + if len(row) > 0: + log(ERROR, "Unexpected node registration failure.") + return 0 + + query = ( + "INSERT INTO node " + "(node_id, online_until, ping_interval, public_key) " + "VALUES (?, ?, ?, ?)" + ) + try: - self.query(query, {"node_id": node_id}) + self.query( + query, (node_id, time.time() + ping_interval, ping_interval, public_key) + ) except sqlite3.IntegrityError: log(ERROR, "Unexpected node registration failure.") return 0 return node_id - def delete_node(self, node_id: int) -> None: + def delete_node(self, node_id: int, public_key: Optional[bytes] = None) -> None: """Delete a client node.""" - query = "DELETE FROM node WHERE node_id = :node_id;" - self.query(query, {"node_id": node_id}) + query = "DELETE FROM node WHERE node_id = ?" + params = (node_id,) + + if public_key is not None: + query += " AND public_key = ?" + params += (public_key,) # type: ignore + + if self.conn is None: + raise AttributeError("State is not initialized.") + + try: + with self.conn: + rows = self.conn.execute(query, params) + if rows.rowcount < 1: + raise ValueError("Public key or node_id not found") + except KeyError as exc: + log(ERROR, {"query": query, "data": params, "exception": exc}) def get_nodes(self, run_id: int) -> Set[int]: """Retrieve all currently stored node IDs as a set. @@ -495,13 +598,22 @@ def get_nodes(self, run_id: int) -> Set[int]: return set() # Get nodes - query = "SELECT * FROM node;" - rows = self.query(query) + query = "SELECT node_id FROM node WHERE online_until > ?;" + rows = self.query(query, (time.time(),)) result: Set[int] = {row["node_id"] for row in rows} return result - def create_run(self) -> int: - """Create one run and store it in state.""" + def get_node_id(self, client_public_key: bytes) -> Optional[int]: + """Retrieve stored `node_id` filtered by `client_public_keys`.""" + query = "SELECT node_id FROM node WHERE public_key = :public_key;" + row = self.query(query, {"public_key": client_public_key}) + if len(row) > 0: + node_id: int = row[0]["node_id"] + return node_id + return None + + def create_run(self, fab_id: str, fab_version: str) -> int: + """Create a new run for the specified `fab_id` and `fab_version`.""" # Sample a random int64 as run_id run_id: int = int.from_bytes(os.urandom(8), "little", signed=True) @@ -509,12 +621,86 @@ def create_run(self) -> int: query = "SELECT COUNT(*) FROM run WHERE run_id = ?;" # If run_id does not exist if self.query(query, (run_id,))[0]["COUNT(*)"] == 0: - query = "INSERT INTO run VALUES(:run_id);" - self.query(query, {"run_id": run_id}) + query = "INSERT INTO run (run_id, fab_id, fab_version) VALUES (?, ?, ?);" + self.query(query, (run_id, fab_id, fab_version)) return run_id log(ERROR, "Unexpected run creation failure.") return 0 + def store_server_private_public_key( + self, private_key: bytes, public_key: bytes + ) -> None: + """Store `server_private_key` and `server_public_key` in state.""" + query = "SELECT COUNT(*) FROM credential" + count = self.query(query)[0]["COUNT(*)"] + if count < 1: + query = ( + "INSERT OR REPLACE INTO credential (private_key, public_key) " + "VALUES (:private_key, :public_key)" + ) + self.query(query, {"private_key": private_key, "public_key": public_key}) + else: + raise RuntimeError("Server private and public key already set") + + def get_server_private_key(self) -> Optional[bytes]: + """Retrieve `server_private_key` in urlsafe bytes.""" + query = "SELECT private_key FROM credential" + rows = self.query(query) + try: + private_key: Optional[bytes] = rows[0]["private_key"] + except IndexError: + private_key = None + return private_key + + def get_server_public_key(self) -> Optional[bytes]: + """Retrieve `server_public_key` in urlsafe bytes.""" + query = "SELECT public_key FROM credential" + rows = self.query(query) + try: + public_key: Optional[bytes] = rows[0]["public_key"] + except IndexError: + public_key = None + return public_key + + def store_client_public_keys(self, public_keys: Set[bytes]) -> None: + """Store a set of `client_public_keys` in state.""" + query = "INSERT INTO public_key (public_key) VALUES (?)" + data = [(key,) for key in public_keys] + self.query(query, data) + + def store_client_public_key(self, public_key: bytes) -> None: + """Store a `client_public_key` in state.""" + query = "INSERT INTO public_key (public_key) VALUES (:public_key)" + self.query(query, {"public_key": public_key}) + + def get_client_public_keys(self) -> Set[bytes]: + """Retrieve all currently stored `client_public_keys` as a set.""" + query = "SELECT public_key FROM public_key" + rows = self.query(query) + result: Set[bytes] = {row["public_key"] for row in rows} + return result + + def get_run(self, run_id: int) -> Tuple[int, str, str]: + """Retrieve information about the run with the specified `run_id`.""" + query = "SELECT * FROM run WHERE run_id = ?;" + try: + row = self.query(query, (run_id,))[0] + return run_id, row["fab_id"], row["fab_version"] + except sqlite3.IntegrityError: + log(ERROR, "`run_id` does not exist.") + return 0, "", "" + + def acknowledge_ping(self, node_id: int, ping_interval: float) -> bool: + """Acknowledge a ping received from a node, serving as a heartbeat.""" + # Update `online_until` and `ping_interval` for the given `node_id` + query = "UPDATE node SET online_until = ?, ping_interval = ? WHERE node_id = ?;" + try: + self.query(query, (time.time() + ping_interval, ping_interval, node_id)) + return True + except sqlite3.IntegrityError: + log(ERROR, "`node_id` does not exist.") + return False + def dict_factory( cursor: sqlite3.Cursor, @@ -540,6 +726,7 @@ def task_ins_to_dict(task_msg: TaskIns) -> Dict[str, Any]: "consumer_node_id": task_msg.task.consumer.node_id, "created_at": task_msg.task.created_at, "delivered_at": task_msg.task.delivered_at, + "pushed_at": task_msg.task.pushed_at, "ttl": task_msg.task.ttl, "ancestry": ",".join(task_msg.task.ancestry), "task_type": task_msg.task.task_type, @@ -560,6 +747,7 @@ def task_res_to_dict(task_msg: TaskRes) -> Dict[str, Any]: "consumer_node_id": task_msg.task.consumer.node_id, "created_at": task_msg.task.created_at, "delivered_at": task_msg.task.delivered_at, + "pushed_at": task_msg.task.pushed_at, "ttl": task_msg.task.ttl, "ancestry": ",".join(task_msg.task.ancestry), "task_type": task_msg.task.task_type, @@ -588,6 +776,7 @@ def dict_to_task_ins(task_dict: Dict[str, Any]) -> TaskIns: ), created_at=task_dict["created_at"], delivered_at=task_dict["delivered_at"], + pushed_at=task_dict["pushed_at"], ttl=task_dict["ttl"], ancestry=task_dict["ancestry"].split(","), task_type=task_dict["task_type"], @@ -617,6 +806,7 @@ def dict_to_task_res(task_dict: Dict[str, Any]) -> TaskRes: ), created_at=task_dict["created_at"], delivered_at=task_dict["delivered_at"], + pushed_at=task_dict["pushed_at"], ttl=task_dict["ttl"], ancestry=task_dict["ancestry"].split(","), task_type=task_dict["task_type"], diff --git a/src/py/flwr/server/superlink/state/sqlite_state_test.py b/src/py/flwr/server/superlink/state/sqlite_state_test.py index 9eef71e396e3..20927df1cf12 100644 --- a/src/py/flwr/server/superlink/state/sqlite_state_test.py +++ b/src/py/flwr/server/superlink/state/sqlite_state_test.py @@ -38,6 +38,7 @@ def test_ins_res_to_dict(self) -> None: "consumer_node_id", "created_at", "delivered_at", + "pushed_at", "ttl", "ancestry", "task_type", diff --git a/src/py/flwr/server/superlink/state/state.py b/src/py/flwr/server/superlink/state/state.py index 9337ae6d8624..a72062f2a938 100644 --- a/src/py/flwr/server/superlink/state/state.py +++ b/src/py/flwr/server/superlink/state/state.py @@ -16,13 +16,13 @@ import abc -from typing import List, Optional, Set +from typing import List, Optional, Set, Tuple from uuid import UUID from flwr.proto.task_pb2 import TaskIns, TaskRes # pylint: disable=E0611 -class State(abc.ABC): +class State(abc.ABC): # pylint: disable=R0904 """Abstract State.""" @abc.abstractmethod @@ -132,11 +132,13 @@ def delete_tasks(self, task_ids: Set[UUID]) -> None: """Delete all delivered TaskIns/TaskRes pairs.""" @abc.abstractmethod - def create_node(self) -> int: + def create_node( + self, ping_interval: float, public_key: Optional[bytes] = None + ) -> int: """Create, store in state, and return `node_id`.""" @abc.abstractmethod - def delete_node(self, node_id: int) -> None: + def delete_node(self, node_id: int, public_key: Optional[bytes] = None) -> None: """Remove `node_id` from state.""" @abc.abstractmethod @@ -150,5 +152,72 @@ def get_nodes(self, run_id: int) -> Set[int]: """ @abc.abstractmethod - def create_run(self) -> int: - """Create one run.""" + def get_node_id(self, client_public_key: bytes) -> Optional[int]: + """Retrieve stored `node_id` filtered by `client_public_keys`.""" + + @abc.abstractmethod + def create_run(self, fab_id: str, fab_version: str) -> int: + """Create a new run for the specified `fab_id` and `fab_version`.""" + + @abc.abstractmethod + def get_run(self, run_id: int) -> Tuple[int, str, str]: + """Retrieve information about the run with the specified `run_id`. + + Parameters + ---------- + run_id : int + The identifier of the run. + + Returns + ------- + Tuple[int, str, str] + A tuple containing three elements: + - `run_id`: The identifier of the run, same as the specified `run_id`. + - `fab_id`: The identifier of the FAB used in the specified run. + - `fab_version`: The version of the FAB used in the specified run. + """ + + @abc.abstractmethod + def store_server_private_public_key( + self, private_key: bytes, public_key: bytes + ) -> None: + """Store `server_private_key` and `server_public_key` in state.""" + + @abc.abstractmethod + def get_server_private_key(self) -> Optional[bytes]: + """Retrieve `server_private_key` in urlsafe bytes.""" + + @abc.abstractmethod + def get_server_public_key(self) -> Optional[bytes]: + """Retrieve `server_public_key` in urlsafe bytes.""" + + @abc.abstractmethod + def store_client_public_keys(self, public_keys: Set[bytes]) -> None: + """Store a set of `client_public_keys` in state.""" + + @abc.abstractmethod + def store_client_public_key(self, public_key: bytes) -> None: + """Store a `client_public_key` in state.""" + + @abc.abstractmethod + def get_client_public_keys(self) -> Set[bytes]: + """Retrieve all currently stored `client_public_keys` as a set.""" + + @abc.abstractmethod + def acknowledge_ping(self, node_id: int, ping_interval: float) -> bool: + """Acknowledge a ping received from a node, serving as a heartbeat. + + Parameters + ---------- + node_id : int + The `node_id` from which the ping was received. + ping_interval : float + The interval (in seconds) from the current timestamp within which the next + ping from this node must be received. This acts as a hard deadline to ensure + an accurate assessment of the node's availability. + + Returns + ------- + is_acknowledged : bool + True if the ping is successfully acknowledged; otherwise, False. + """ diff --git a/src/py/flwr/server/superlink/state/state_test.py b/src/py/flwr/server/superlink/state/state_test.py index 01ac64de1380..9b0153ca548a 100644 --- a/src/py/flwr/server/superlink/state/state_test.py +++ b/src/py/flwr/server/superlink/state/state_test.py @@ -16,13 +16,21 @@ # pylint: disable=invalid-name, disable=R0904 import tempfile +import time import unittest from abc import abstractmethod from datetime import datetime, timezone from typing import List +from unittest.mock import patch from uuid import uuid4 from flwr.common import DEFAULT_TTL +from flwr.common.constant import ErrorCode +from flwr.common.secure_aggregation.crypto.symmetric_encryption import ( + generate_key_pairs, + private_key_to_bytes, + public_key_to_bytes, +) from flwr.proto.node_pb2 import Node # pylint: disable=E0611 from flwr.proto.recordset_pb2 import RecordSet # pylint: disable=E0611 from flwr.proto.task_pb2 import Task, TaskIns, TaskRes # pylint: disable=E0611 @@ -40,6 +48,20 @@ def state_factory(self) -> State: """Provide state implementation to test.""" raise NotImplementedError() + def test_create_and_get_run(self) -> None: + """Test if create_run and get_run work correctly.""" + # Prepare + state: State = self.state_factory() + run_id = state.create_run("Mock/mock", "v1.0.0") + + # Execute + actual_run_id, fab_id, fab_version = state.get_run(run_id) + + # Assert + assert actual_run_id == run_id + assert fab_id == "Mock/mock" + assert fab_version == "v1.0.0" + def test_get_task_ins_empty(self) -> None: """Validate that a new state has no TaskIns.""" # Prepare @@ -67,12 +89,12 @@ def test_store_task_ins_one(self) -> None: # Prepare consumer_node_id = 1 state = self.state_factory() - run_id = state.create_run() + run_id = state.create_run("mock/mock", "v1.0.0") task_ins = create_task_ins( consumer_node_id=consumer_node_id, anonymous=False, run_id=run_id ) - assert task_ins.task.created_at == "" # pylint: disable=no-member + assert task_ins.task.created_at < time.time() # pylint: disable=no-member assert task_ins.task.delivered_at == "" # pylint: disable=no-member # Execute @@ -89,12 +111,9 @@ def test_store_task_ins_one(self) -> None: actual_task = actual_task_ins.task - assert actual_task.created_at != "" assert actual_task.delivered_at != "" - assert datetime.fromisoformat(actual_task.created_at) > datetime( - 2020, 1, 1, tzinfo=timezone.utc - ) + assert actual_task.created_at < actual_task.pushed_at assert datetime.fromisoformat(actual_task.delivered_at) > datetime( 2020, 1, 1, tzinfo=timezone.utc ) @@ -105,7 +124,7 @@ def test_store_and_delete_tasks(self) -> None: # Prepare consumer_node_id = 1 state = self.state_factory() - run_id = state.create_run() + run_id = state.create_run("mock/mock", "v1.0.0") task_ins_0 = create_task_ins( consumer_node_id=consumer_node_id, anonymous=False, run_id=run_id ) @@ -179,7 +198,7 @@ def test_task_ins_store_anonymous_and_retrieve_anonymous(self) -> None: """ # Prepare state: State = self.state_factory() - run_id = state.create_run() + run_id = state.create_run("mock/mock", "v1.0.0") task_ins = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) # Execute @@ -194,7 +213,7 @@ def test_task_ins_store_anonymous_and_fail_retrieving_identitiy(self) -> None: """Store anonymous TaskIns and fail to retrieve it.""" # Prepare state: State = self.state_factory() - run_id = state.create_run() + run_id = state.create_run("mock/mock", "v1.0.0") task_ins = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) # Execute @@ -208,7 +227,7 @@ def test_task_ins_store_identity_and_fail_retrieving_anonymous(self) -> None: """Store identity TaskIns and fail retrieving it as anonymous.""" # Prepare state: State = self.state_factory() - run_id = state.create_run() + run_id = state.create_run("mock/mock", "v1.0.0") task_ins = create_task_ins(consumer_node_id=1, anonymous=False, run_id=run_id) # Execute @@ -222,7 +241,7 @@ def test_task_ins_store_identity_and_retrieve_identity(self) -> None: """Store identity TaskIns and retrieve it.""" # Prepare state: State = self.state_factory() - run_id = state.create_run() + run_id = state.create_run("mock/mock", "v1.0.0") task_ins = create_task_ins(consumer_node_id=1, anonymous=False, run_id=run_id) # Execute @@ -239,7 +258,7 @@ def test_task_ins_store_delivered_and_fail_retrieving(self) -> None: """Fail retrieving delivered task.""" # Prepare state: State = self.state_factory() - run_id = state.create_run() + run_id = state.create_run("mock/mock", "v1.0.0") task_ins = create_task_ins(consumer_node_id=1, anonymous=False, run_id=run_id) # Execute @@ -282,7 +301,7 @@ def test_task_res_store_and_retrieve_by_task_ins_id(self) -> None: """Store TaskRes retrieve it by task_ins_id.""" # Prepare state: State = self.state_factory() - run_id = state.create_run() + run_id = state.create_run("mock/mock", "v1.0.0") task_ins_id = uuid4() task_res = create_task_res( producer_node_id=0, @@ -303,7 +322,7 @@ def test_node_ids_initial_state(self) -> None: """Test retrieving all node_ids and empty initial state.""" # Prepare state: State = self.state_factory() - run_id = state.create_run() + run_id = state.create_run("mock/mock", "v1.0.0") # Execute retrieved_node_ids = state.get_nodes(run_id) @@ -315,24 +334,63 @@ def test_create_node_and_get_nodes(self) -> None: """Test creating a client node.""" # Prepare state: State = self.state_factory() - run_id = state.create_run() + run_id = state.create_run("mock/mock", "v1.0.0") node_ids = [] # Execute for _ in range(10): - node_ids.append(state.create_node()) + node_ids.append(state.create_node(ping_interval=10)) retrieved_node_ids = state.get_nodes(run_id) # Assert for i in retrieved_node_ids: assert i in node_ids + def test_create_node_public_key(self) -> None: + """Test creating a client node with public key.""" + # Prepare + state: State = self.state_factory() + public_key = b"mock" + run_id = state.create_run("mock/mock", "v1.0.0") + + # Execute + node_id = state.create_node(ping_interval=10, public_key=public_key) + retrieved_node_ids = state.get_nodes(run_id) + retrieved_node_id = state.get_node_id(public_key) + + # Assert + assert len(retrieved_node_ids) == 1 + assert retrieved_node_id == node_id + + def test_create_node_public_key_twice(self) -> None: + """Test creating a client node with same public key twice.""" + # Prepare + state: State = self.state_factory() + public_key = b"mock" + run_id = state.create_run("mock/mock", "v1.0.0") + node_id = state.create_node(ping_interval=10, public_key=public_key) + + # Execute + new_node_id = state.create_node(ping_interval=10, public_key=public_key) + retrieved_node_ids = state.get_nodes(run_id) + retrieved_node_id = state.get_node_id(public_key) + + # Assert + assert new_node_id == 0 + assert len(retrieved_node_ids) == 1 + assert retrieved_node_id == node_id + + # Assert node_ids and public_key_to_node_id are synced + if isinstance(state, InMemoryState): + assert len(state.node_ids) == 1 + assert len(state.public_key_to_node_id) == 1 + def test_delete_node(self) -> None: """Test deleting a client node.""" # Prepare state: State = self.state_factory() - run_id = state.create_run() - node_id = state.create_node() + run_id = state.create_run("mock/mock", "v1.0.0") + node_id = state.create_node(ping_interval=10) # Execute state.delete_node(node_id) @@ -341,13 +399,84 @@ def test_delete_node(self) -> None: # Assert assert len(retrieved_node_ids) == 0 + def test_delete_node_public_key(self) -> None: + """Test deleting a client node with public key.""" + # Prepare + state: State = self.state_factory() + public_key = b"mock" + run_id = state.create_run("mock/mock", "v1.0.0") + node_id = state.create_node(ping_interval=10, public_key=public_key) + + # Execute + state.delete_node(node_id, public_key=public_key) + retrieved_node_ids = state.get_nodes(run_id) + retrieved_node_id = state.get_node_id(public_key) + + # Assert + assert len(retrieved_node_ids) == 0 + assert retrieved_node_id is None + + def test_delete_node_public_key_none(self) -> None: + """Test deleting a client node with public key.""" + # Prepare + state: State = self.state_factory() + public_key = b"mock" + run_id = state.create_run("mock/mock", "v1.0.0") + node_id = 0 + + # Execute & Assert + with self.assertRaises(ValueError): + state.delete_node(node_id, public_key=public_key) + + retrieved_node_ids = state.get_nodes(run_id) + retrieved_node_id = state.get_node_id(public_key) + + assert len(retrieved_node_ids) == 0 + assert retrieved_node_id is None + + def test_delete_node_wrong_public_key(self) -> None: + """Test deleting a client node with wrong public key.""" + # Prepare + state: State = self.state_factory() + public_key = b"mock" + wrong_public_key = b"mock_mock" + run_id = state.create_run("mock/mock", "v1.0.0") + node_id = state.create_node(ping_interval=10, public_key=public_key) + + # Execute & Assert + with self.assertRaises(ValueError): + state.delete_node(node_id, public_key=wrong_public_key) + + retrieved_node_ids = state.get_nodes(run_id) + retrieved_node_id = state.get_node_id(public_key) + + assert len(retrieved_node_ids) == 1 + assert retrieved_node_id == node_id + + def test_get_node_id_wrong_public_key(self) -> None: + """Test retrieving a client node with wrong public key.""" + # Prepare + state: State = self.state_factory() + public_key = b"mock" + wrong_public_key = b"mock_mock" + run_id = state.create_run("mock/mock", "v1.0.0") + + # Execute + state.create_node(ping_interval=10, public_key=public_key) + retrieved_node_ids = state.get_nodes(run_id) + retrieved_node_id = state.get_node_id(wrong_public_key) + + # Assert + assert len(retrieved_node_ids) == 1 + assert retrieved_node_id is None + def test_get_nodes_invalid_run_id(self) -> None: """Test retrieving all node_ids with invalid run_id.""" # Prepare state: State = self.state_factory() - state.create_run() + state.create_run("mock/mock", "v1.0.0") invalid_run_id = 61016 - state.create_node() + state.create_node(ping_interval=10) # Execute retrieved_node_ids = state.get_nodes(invalid_run_id) @@ -359,7 +488,7 @@ def test_num_task_ins(self) -> None: """Test if num_tasks returns correct number of not delivered task_ins.""" # Prepare state: State = self.state_factory() - run_id = state.create_run() + run_id = state.create_run("mock/mock", "v1.0.0") task_0 = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) task_1 = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) @@ -377,7 +506,7 @@ def test_num_task_res(self) -> None: """Test if num_tasks returns correct number of not delivered task_res.""" # Prepare state: State = self.state_factory() - run_id = state.create_run() + run_id = state.create_run("mock/mock", "v1.0.0") task_0 = create_task_res( producer_node_id=0, anonymous=True, ancestry=["1"], run_id=run_id ) @@ -395,6 +524,146 @@ def test_num_task_res(self) -> None: # Assert assert num == 2 + def test_server_private_public_key(self) -> None: + """Test get server private and public key after inserting.""" + # Prepare + state: State = self.state_factory() + private_key, public_key = generate_key_pairs() + private_key_bytes = private_key_to_bytes(private_key) + public_key_bytes = public_key_to_bytes(public_key) + + # Execute + state.store_server_private_public_key(private_key_bytes, public_key_bytes) + server_private_key = state.get_server_private_key() + server_public_key = state.get_server_public_key() + + # Assert + assert server_private_key == private_key_bytes + assert server_public_key == public_key_bytes + + def test_server_private_public_key_none(self) -> None: + """Test get server private and public key without inserting.""" + # Prepare + state: State = self.state_factory() + + # Execute + server_private_key = state.get_server_private_key() + server_public_key = state.get_server_public_key() + + # Assert + assert server_private_key is None + assert server_public_key is None + + def test_store_server_private_public_key_twice(self) -> None: + """Test inserting private and public key twice.""" + # Prepare + state: State = self.state_factory() + private_key, public_key = generate_key_pairs() + private_key_bytes = private_key_to_bytes(private_key) + public_key_bytes = public_key_to_bytes(public_key) + new_private_key, new_public_key = generate_key_pairs() + new_private_key_bytes = private_key_to_bytes(new_private_key) + new_public_key_bytes = public_key_to_bytes(new_public_key) + + # Execute + state.store_server_private_public_key(private_key_bytes, public_key_bytes) + + # Assert + with self.assertRaises(RuntimeError): + state.store_server_private_public_key( + new_private_key_bytes, new_public_key_bytes + ) + + def test_client_public_keys(self) -> None: + """Test store_client_public_keys and get_client_public_keys from state.""" + # Prepare + state: State = self.state_factory() + key_pairs = [generate_key_pairs() for _ in range(3)] + public_keys = {public_key_to_bytes(pair[1]) for pair in key_pairs} + + # Execute + state.store_client_public_keys(public_keys) + client_public_keys = state.get_client_public_keys() + + # Assert + assert client_public_keys == public_keys + + def test_client_public_key(self) -> None: + """Test store_client_public_key and get_client_public_keys from state.""" + # Prepare + state: State = self.state_factory() + key_pairs = [generate_key_pairs() for _ in range(3)] + public_keys = {public_key_to_bytes(pair[1]) for pair in key_pairs} + + # Execute + for public_key in public_keys: + state.store_client_public_key(public_key) + client_public_keys = state.get_client_public_keys() + + # Assert + assert client_public_keys == public_keys + + def test_acknowledge_ping(self) -> None: + """Test if acknowledge_ping works and if get_nodes return online nodes.""" + # Prepare + state: State = self.state_factory() + run_id = state.create_run("mock/mock", "v1.0.0") + node_ids = [state.create_node(ping_interval=10) for _ in range(100)] + for node_id in node_ids[:70]: + state.acknowledge_ping(node_id, ping_interval=30) + for node_id in node_ids[70:]: + state.acknowledge_ping(node_id, ping_interval=90) + + # Execute + current_time = time.time() + with patch("time.time", side_effect=lambda: current_time + 50): + actual_node_ids = state.get_nodes(run_id) + + # Assert + self.assertSetEqual(actual_node_ids, set(node_ids[70:])) + + def test_node_unavailable_error(self) -> None: + """Test if get_task_res return TaskRes containing node unavailable error.""" + # Prepare + state: State = self.state_factory() + run_id = state.create_run("mock/mock", "v1.0.0") + node_id_0 = state.create_node(ping_interval=90) + node_id_1 = state.create_node(ping_interval=30) + # Create and store TaskIns + task_ins_0 = create_task_ins( + consumer_node_id=node_id_0, anonymous=False, run_id=run_id + ) + task_ins_1 = create_task_ins( + consumer_node_id=node_id_1, anonymous=False, run_id=run_id + ) + task_id_0 = state.store_task_ins(task_ins=task_ins_0) + task_id_1 = state.store_task_ins(task_ins=task_ins_1) + assert task_id_0 is not None and task_id_1 is not None + + # Get TaskIns to mark them delivered + state.get_task_ins(node_id=node_id_0, limit=None) + + # Create and store TaskRes + task_res_0 = create_task_res( + producer_node_id=100, + anonymous=False, + ancestry=[str(task_id_0)], + run_id=run_id, + ) + state.store_task_res(task_res_0) + + # Execute + current_time = time.time() + task_res_list: List[TaskRes] = [] + with patch("time.time", side_effect=lambda: current_time + 50): + task_res_list = state.get_task_res({task_id_0, task_id_1}, limit=None) + + # Assert + assert len(task_res_list) == 2 + err_taskres = task_res_list[1] + assert err_taskres.task.HasField("error") + assert err_taskres.task.error.code == ErrorCode.NODE_UNAVAILABLE + def create_task_ins( consumer_node_id: int, @@ -418,8 +687,10 @@ def create_task_ins( task_type="mock", recordset=RecordSet(parameters={}, metrics={}, configs={}), ttl=DEFAULT_TTL, + created_at=time.time(), ), ) + task.task.pushed_at = time.time() return task @@ -441,8 +712,10 @@ def create_task_res( task_type="mock", recordset=RecordSet(parameters={}, metrics={}, configs={}), ttl=DEFAULT_TTL, + created_at=time.time(), ), ) + task_res.task.pushed_at = time.time() return task_res @@ -476,7 +749,7 @@ def test_initialize(self) -> None: result = state.query("SELECT name FROM sqlite_schema;") # Assert - assert len(result) == 8 + assert len(result) == 13 class SqliteFileBasedTest(StateTest, unittest.TestCase): @@ -501,7 +774,7 @@ def test_initialize(self) -> None: result = state.query("SELECT name FROM sqlite_schema;") # Assert - assert len(result) == 8 + assert len(result) == 13 if __name__ == "__main__": diff --git a/src/py/flwr/server/superlink/state/utils.py b/src/py/flwr/server/superlink/state/utils.py new file mode 100644 index 000000000000..233a90946cc7 --- /dev/null +++ b/src/py/flwr/server/superlink/state/utils.py @@ -0,0 +1,56 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utility functions for State.""" + + +import time +from logging import ERROR +from uuid import uuid4 + +from flwr.common import log +from flwr.common.constant import ErrorCode +from flwr.proto.error_pb2 import Error # pylint: disable=E0611 +from flwr.proto.node_pb2 import Node # pylint: disable=E0611 +from flwr.proto.task_pb2 import Task, TaskIns, TaskRes # pylint: disable=E0611 + +NODE_UNAVAILABLE_ERROR_REASON = ( + "Error: Node Unavailable - The destination node is currently unavailable. " + "It exceeds the time limit specified in its last ping." +) + + +def make_node_unavailable_taskres(ref_taskins: TaskIns) -> TaskRes: + """Generate a TaskRes with a node unavailable error from a TaskIns.""" + current_time = time.time() + ttl = ref_taskins.task.ttl - (current_time - ref_taskins.task.created_at) + if ttl < 0: + log(ERROR, "Creating TaskRes for TaskIns that exceeds its TTL.") + ttl = 0 + return TaskRes( + task_id=str(uuid4()), + group_id=ref_taskins.group_id, + run_id=ref_taskins.run_id, + task=Task( + producer=Node(node_id=ref_taskins.task.consumer.node_id, anonymous=False), + consumer=Node(node_id=ref_taskins.task.producer.node_id, anonymous=False), + created_at=current_time, + ttl=ttl, + ancestry=[ref_taskins.task_id], + task_type=ref_taskins.task.task_type, + error=Error( + code=ErrorCode.NODE_UNAVAILABLE, reason=NODE_UNAVAILABLE_ERROR_REASON + ), + ), + ) diff --git a/src/py/flwr/server/utils/validator.py b/src/py/flwr/server/utils/validator.py index 285807d8d0e7..c0b0ec85761c 100644 --- a/src/py/flwr/server/utils/validator.py +++ b/src/py/flwr/server/utils/validator.py @@ -31,13 +31,21 @@ def validate_task_ins_or_res(tasks_ins_res: Union[TaskIns, TaskRes]) -> List[str if not tasks_ins_res.HasField("task"): validation_errors.append("`task` does not set field `task`") - # Created/delivered/TTL - if tasks_ins_res.task.created_at != "": - validation_errors.append("`created_at` must be an empty str") + # Created/delivered/TTL/Pushed + if ( + tasks_ins_res.task.created_at < 1711497600.0 + ): # unix timestamp of 27 March 2024 00h:00m:00s UTC + validation_errors.append( + "`created_at` must be a float that records the unix timestamp " + "in seconds when the message was created." + ) if tasks_ins_res.task.delivered_at != "": validation_errors.append("`delivered_at` must be an empty str") if tasks_ins_res.task.ttl <= 0: validation_errors.append("`ttl` must be higher than zero") + if tasks_ins_res.task.pushed_at < 1711497600.0: + # unix timestamp of 27 March 2024 00h:00m:00s UTC + validation_errors.append("`pushed_at` is not a recent timestamp") # TaskIns specific if isinstance(tasks_ins_res, TaskIns): diff --git a/src/py/flwr/server/utils/validator_test.py b/src/py/flwr/server/utils/validator_test.py index 926103c6b09a..61fe094c23d4 100644 --- a/src/py/flwr/server/utils/validator_test.py +++ b/src/py/flwr/server/utils/validator_test.py @@ -15,6 +15,7 @@ """Validator tests.""" +import time import unittest from typing import List, Tuple @@ -98,8 +99,11 @@ def create_task_ins( task_type="mock", recordset=RecordSet(parameters={}, metrics={}, configs={}), ttl=DEFAULT_TTL, + created_at=time.time(), ), ) + + task.task.pushed_at = time.time() return task @@ -120,6 +124,9 @@ def create_task_res( task_type="mock", recordset=RecordSet(parameters={}, metrics={}, configs={}), ttl=DEFAULT_TTL, + created_at=time.time(), ), ) + + task_res.task.pushed_at = time.time() return task_res diff --git a/src/py/flwr/server/workflow/default_workflows.py b/src/py/flwr/server/workflow/default_workflows.py index 42b1151f9835..80759316da84 100644 --- a/src/py/flwr/server/workflow/default_workflows.py +++ b/src/py/flwr/server/workflow/default_workflows.py @@ -17,13 +17,23 @@ import io import timeit -from logging import INFO -from typing import Optional, cast +from logging import INFO, WARN +from typing import List, Optional, Tuple, Union, cast import flwr.common.recordset_compat as compat -from flwr.common import DEFAULT_TTL, ConfigsRecord, Context, GetParametersIns, log +from flwr.common import ( + Code, + ConfigsRecord, + Context, + EvaluateRes, + FitRes, + GetParametersIns, + ParametersRecord, + log, +) from flwr.common.constant import MessageType, MessageTypeLegacy +from ..client_proxy import ClientProxy from ..compat.app_utils import start_update_client_manager_thread from ..compat.legacy_context import LegacyContext from ..driver import Driver @@ -88,7 +98,12 @@ def __call__(self, driver: Driver, context: Context) -> None: hist = context.history log(INFO, "") log(INFO, "[SUMMARY]") - log(INFO, "Run finished %s rounds in %.2fs", context.config.num_rounds, elapsed) + log( + INFO, + "Run finished %s round(s) in %.2fs", + context.config.num_rounds, + elapsed, + ) for idx, line in enumerate(io.StringIO(str(hist))): if idx == 0: log(INFO, "%s", line.strip("\n")) @@ -127,13 +142,27 @@ def default_init_params_workflow(driver: Driver, context: Context) -> None: message_type=MessageTypeLegacy.GET_PARAMETERS, dst_node_id=random_client.node_id, group_id="0", - ttl=DEFAULT_TTL, ) ] ) - log(INFO, "Received initial parameters from one random client") msg = list(messages)[0] - paramsrecord = next(iter(msg.content.parameters_records.values())) + + if ( + msg.has_content() + and compat._extract_status_from_recordset( # pylint: disable=W0212 + "getparametersres", msg.content + ).code + == Code.OK + ): + log(INFO, "Received initial parameters from one random client") + paramsrecord = next(iter(msg.content.parameters_records.values())) + else: + log( + WARN, + "Failed to receive initial parameters from the client." + " Empty initial parameters will be used.", + ) + paramsrecord = ParametersRecord() context.state.parameters_records[MAIN_PARAMS_RECORD] = paramsrecord @@ -226,7 +255,6 @@ def default_fit_workflow( # pylint: disable=R0914 message_type=MessageType.TRAIN, dst_node_id=proxy.node_id, group_id=str(current_round), - ttl=DEFAULT_TTL, ) for proxy, fitins in client_instructions ] @@ -246,14 +274,20 @@ def default_fit_workflow( # pylint: disable=R0914 ) # Aggregate training results - results = [ - ( - node_id_to_proxy[msg.metadata.src_node_id], - compat.recordset_to_fitres(msg.content, False), - ) - for msg in messages - ] - aggregated_result = context.strategy.aggregate_fit(current_round, results, []) + results: List[Tuple[ClientProxy, FitRes]] = [] + failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]] = [] + for msg in messages: + if msg.has_content(): + proxy = node_id_to_proxy[msg.metadata.src_node_id] + fitres = compat.recordset_to_fitres(msg.content, False) + if fitres.status.code == Code.OK: + results.append((proxy, fitres)) + else: + failures.append((proxy, fitres)) + else: + failures.append(Exception(msg.error)) + + aggregated_result = context.strategy.aggregate_fit(current_round, results, failures) parameters_aggregated, metrics_aggregated = aggregated_result # Update the parameters and write history @@ -267,6 +301,7 @@ def default_fit_workflow( # pylint: disable=R0914 ) +# pylint: disable-next=R0914 def default_evaluate_workflow(driver: Driver, context: Context) -> None: """Execute the default workflow for a single evaluate round.""" if not isinstance(context, LegacyContext): @@ -306,7 +341,6 @@ def default_evaluate_workflow(driver: Driver, context: Context) -> None: message_type=MessageType.EVALUATE, dst_node_id=proxy.node_id, group_id=str(current_round), - ttl=DEFAULT_TTL, ) for proxy, evalins in client_instructions ] @@ -326,14 +360,22 @@ def default_evaluate_workflow(driver: Driver, context: Context) -> None: ) # Aggregate the evaluation results - results = [ - ( - node_id_to_proxy[msg.metadata.src_node_id], - compat.recordset_to_evaluateres(msg.content), - ) - for msg in messages - ] - aggregated_result = context.strategy.aggregate_evaluate(current_round, results, []) + results: List[Tuple[ClientProxy, EvaluateRes]] = [] + failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]] = [] + for msg in messages: + if msg.has_content(): + proxy = node_id_to_proxy[msg.metadata.src_node_id] + evalres = compat.recordset_to_evaluateres(msg.content) + if evalres.status.code == Code.OK: + results.append((proxy, evalres)) + else: + failures.append((proxy, evalres)) + else: + failures.append(Exception(msg.error)) + + aggregated_result = context.strategy.aggregate_evaluate( + current_round, results, failures + ) loss_aggregated, metrics_aggregated = aggregated_result diff --git a/src/py/flwr/server/workflow/secure_aggregation/secaggplus_workflow.py b/src/py/flwr/server/workflow/secure_aggregation/secaggplus_workflow.py index 326947b653ff..d6d97c28f313 100644 --- a/src/py/flwr/server/workflow/secure_aggregation/secaggplus_workflow.py +++ b/src/py/flwr/server/workflow/secure_aggregation/secaggplus_workflow.py @@ -22,7 +22,6 @@ import flwr.common.recordset_compat as compat from flwr.common import ( - DEFAULT_TTL, ConfigsRecord, Context, FitRes, @@ -374,7 +373,6 @@ def make(nid: int) -> Message: message_type=MessageType.TRAIN, dst_node_id=nid, group_id=str(cfg[WorkflowKey.CURRENT_ROUND]), - ttl=DEFAULT_TTL, ) log( @@ -422,7 +420,6 @@ def make(nid: int) -> Message: message_type=MessageType.TRAIN, dst_node_id=nid, group_id=str(cfg[WorkflowKey.CURRENT_ROUND]), - ttl=DEFAULT_TTL, ) # Broadcast public keys to clients and receive secret key shares @@ -493,7 +490,6 @@ def make(nid: int) -> Message: message_type=MessageType.TRAIN, dst_node_id=nid, group_id=str(cfg[WorkflowKey.CURRENT_ROUND]), - ttl=DEFAULT_TTL, ) log( @@ -564,7 +560,6 @@ def make(nid: int) -> Message: message_type=MessageType.TRAIN, dst_node_id=nid, group_id=str(current_round), - ttl=DEFAULT_TTL, ) log( diff --git a/src/py/flwr/simulation/__init__.py b/src/py/flwr/simulation/__init__.py index d36d9977d1c5..57b0b01eb319 100644 --- a/src/py/flwr/simulation/__init__.py +++ b/src/py/flwr/simulation/__init__.py @@ -17,7 +17,7 @@ import importlib -from flwr.simulation.run_simulation import run_simulation, run_simulation_from_cli +from flwr.simulation.run_simulation import run_simulation is_ray_installed = importlib.util.find_spec("ray") is not None @@ -36,4 +36,4 @@ def start_simulation(*args, **kwargs): # type: ignore raise ImportError(RAY_IMPORT_ERROR) -__all__ = ["start_simulation", "run_simulation_from_cli", "run_simulation"] +__all__ = ["start_simulation", "run_simulation"] diff --git a/src/py/flwr/simulation/app.py b/src/py/flwr/simulation/app.py index ff18f37664be..4b4b7249ccd3 100644 --- a/src/py/flwr/simulation/app.py +++ b/src/py/flwr/simulation/app.py @@ -15,6 +15,8 @@ """Flower simulation app.""" +import asyncio +import logging import sys import threading import traceback @@ -27,7 +29,7 @@ from flwr.client import ClientFn from flwr.common import EventType, event -from flwr.common.logger import log +from flwr.common.logger import log, set_logger_propagation from flwr.server.client_manager import ClientManager from flwr.server.history import History from flwr.server.server import Server, init_defaults, run_fl @@ -156,6 +158,7 @@ def start_simulation( is an advanced feature. For all details, please refer to the Ray documentation: https://docs.ray.io/en/latest/ray-core/scheduling/index.html + Returns ------- hist : flwr.server.history.History @@ -167,6 +170,18 @@ def start_simulation( {"num_clients": len(clients_ids) if clients_ids is not None else num_clients}, ) + # Set logger propagation + loop: Optional[asyncio.AbstractEventLoop] = None + try: + loop = asyncio.get_running_loop() + except RuntimeError: + loop = None + finally: + if loop and loop.is_running(): + # Set logger propagation to False to prevent duplicated log output in Colab. + logger = logging.getLogger("flwr") + _ = set_logger_propagation(logger, False) + # Initialize server and server config initialized_server, initialized_config = init_defaults( server=server, diff --git a/src/py/flwr/simulation/ray_transport/ray_actor.py b/src/py/flwr/simulation/ray_transport/ray_actor.py index 08d0576e39f0..9caf0fc3e6c0 100644 --- a/src/py/flwr/simulation/ray_transport/ray_actor.py +++ b/src/py/flwr/simulation/ray_transport/ray_actor.py @@ -16,7 +16,6 @@ import asyncio import threading -import traceback from abc import ABC from logging import DEBUG, ERROR, WARNING from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union @@ -25,22 +24,13 @@ from ray import ObjectRef from ray.util.actor_pool import ActorPool -from flwr.client.client_app import ClientApp, LoadClientAppError +from flwr.client.client_app import ClientApp, ClientAppException, LoadClientAppError from flwr.common import Context, Message from flwr.common.logger import log ClientAppFn = Callable[[], ClientApp] -class ClientException(Exception): - """Raised when client side logic crashes with an exception.""" - - def __init__(self, message: str): - div = ">" * 7 - self.message = "\n" + div + "A ClientException occurred." + message - super().__init__(self.message) - - class VirtualClientEngineActor(ABC): """Abstract base class for VirtualClientEngine Actors.""" @@ -71,17 +61,7 @@ def run( raise load_ex except Exception as ex: - client_trace = traceback.format_exc() - mssg = ( - "\n\tSomething went wrong when running your client run." - "\n\tClient " - + cid - + " crashed when the " - + self.__class__.__name__ - + " was running its run." - "\n\tException triggered on the client side: " + client_trace, - ) - raise ClientException(str(mssg)) from ex + raise ClientAppException(str(ex)) from ex return cid, out_message, context @@ -493,13 +473,17 @@ async def submit( self._future_to_actor[future] = actor return future + async def add_actor_back_to_pool(self, future: Any) -> None: + """Ad actor assigned to run future back into the pool.""" + actor = self._future_to_actor.pop(future) + await self.pool.put(actor) + async def fetch_result_and_return_actor_to_pool( self, future: Any ) -> Tuple[Message, Context]: """Pull result given a future and add actor back to pool.""" # Get actor that ran job - actor = self._future_to_actor.pop(future) - await self.pool.put(actor) + await self.add_actor_back_to_pool(future) # Retrieve result for object store # Instead of doing ray.get(future) we await it _, out_mssg, updated_context = await future diff --git a/src/py/flwr/simulation/run_simulation.py b/src/py/flwr/simulation/run_simulation.py index 56fce363726a..2dbeef1a261c 100644 --- a/src/py/flwr/simulation/run_simulation.py +++ b/src/py/flwr/simulation/run_simulation.py @@ -24,15 +24,13 @@ from time import sleep from typing import Dict, Optional -import grpc - from flwr.client import ClientApp from flwr.common import EventType, event, log +from flwr.common.logger import set_logger_propagation, update_console_handler from flwr.common.typing import ConfigsRecordValues -from flwr.server.driver.driver import Driver +from flwr.server.driver import Driver, InMemoryDriver from flwr.server.run_serverapp import run from flwr.server.server_app import ServerApp -from flwr.server.superlink.driver.driver_grpc import run_driver_api_grpc from flwr.server.superlink.fleet import vce from flwr.server.superlink.state import StateFactory from flwr.simulation.ray_transport.utils import ( @@ -55,7 +53,6 @@ def run_simulation_from_cli() -> None: backend_name=args.backend, backend_config=backend_config_dict, app_dir=args.app_dir, - driver_api_address=args.driver_api_address, enable_tf_gpu_growth=args.enable_tf_gpu_growth, verbose_logging=args.verbose, ) @@ -154,7 +151,7 @@ def server_th_with_start_checks( # type: ignore # Upon completion, trigger stop event if one was passed if stop_event is not None: stop_event.set() - log(WARNING, "Triggered stop event for Simulation Engine.") + log(DEBUG, "Triggered stop event for Simulation Engine.") serverapp_th = threading.Thread( target=server_th_with_start_checks, @@ -176,7 +173,6 @@ def _main_loop( num_supernodes: int, backend_name: str, backend_config_stream: str, - driver_api_address: str, app_dir: str, enable_tf_gpu_growth: bool, client_app: Optional[ClientApp] = None, @@ -193,21 +189,11 @@ def _main_loop( # Initialize StateFactory state_factory = StateFactory(":flwr-in-memory-state:") - # Start Driver API - driver_server: grpc.Server = run_driver_api_grpc( - address=driver_api_address, - state_factory=state_factory, - certificates=None, - ) - f_stop = asyncio.Event() serverapp_th = None try: # Initialize Driver - driver = Driver( - driver_service_address=driver_api_address, - root_certificates=None, - ) + driver = InMemoryDriver(state_factory) # Get and run ServerApp thread serverapp_th = run_serverapp_th( @@ -238,9 +224,6 @@ def _main_loop( raise RuntimeError("An error was encountered. Ending simulation.") from ex finally: - # Stop Driver - driver_server.stop(grace=0) - driver.close() # Trigger stop event f_stop.set() @@ -248,7 +231,7 @@ def _main_loop( if serverapp_th: serverapp_th.join() - log(INFO, "Stopping Simulation Engine now.") + log(DEBUG, "Stopping Simulation Engine now.") # pylint: disable=too-many-arguments,too-many-locals @@ -261,7 +244,6 @@ def _run_simulation( client_app_attr: Optional[str] = None, server_app_attr: Optional[str] = None, app_dir: str = "", - driver_api_address: str = "0.0.0.0:9091", enable_tf_gpu_growth: bool = False, verbose_logging: bool = False, ) -> None: @@ -301,9 +283,6 @@ def _run_simulation( Add specified directory to the PYTHONPATH and load `ClientApp` from there. (Default: current working directory.) - driver_api_address : str (default: "0.0.0.0:9091") - Driver API (gRPC) server address (IPv4, IPv6, or a domain name) - enable_tf_gpu_growth : bool (default: False) A boolean to indicate whether to enable GPU growth on the main thread. This is desirable if you make use of a TensorFlow model on your `ServerApp` while @@ -316,14 +295,16 @@ def _run_simulation( When diabled, only INFO, WARNING and ERROR log messages will be shown. If enabled, DEBUG-level logs will be displayed. """ - # Set logging level - if not verbose_logging: - logger = logging.getLogger("flwr") - logger.setLevel(INFO) - if backend_config is None: backend_config = {} + # Set logging level + logger = logging.getLogger("flwr") + if verbose_logging: + update_console_handler(level=DEBUG, timestamps=True, colored=True) + else: + backend_config["silent"] = True + if enable_tf_gpu_growth: # Check that Backend config has also enabled using GPU growth use_tf = backend_config.get("tensorflow", False) @@ -339,7 +320,6 @@ def _run_simulation( num_supernodes, backend_name, backend_config_stream, - driver_api_address, app_dir, enable_tf_gpu_growth, client_app, @@ -364,6 +344,8 @@ def _run_simulation( finally: if run_in_thread: + # Set logger propagation to False to prevent duplicated log output in Colab. + logger = set_logger_propagation(logger, False) log(DEBUG, "Starting Simulation Engine on a new thread.") simulation_engine_th = threading.Thread(target=_main_loop, args=args) simulation_engine_th.start() @@ -394,12 +376,6 @@ def _parse_args_run_simulation() -> argparse.ArgumentParser: required=True, help="Number of simulated SuperNodes.", ) - parser.add_argument( - "--driver-api-address", - default="0.0.0.0:9091", - type=str, - help="For example: `server:app` or `project.package.module:wrapper.app`", - ) parser.add_argument( "--backend", default="ray", diff --git a/src/py/flwr_experimental/__init__.py b/src/py/flwr_experimental/__init__.py deleted file mode 100644 index 1e9952588480..000000000000 --- a/src/py/flwr_experimental/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== diff --git a/src/py/flwr_experimental/baseline/__init__.py b/src/py/flwr_experimental/baseline/__init__.py deleted file mode 100644 index b2fefc3f319d..000000000000 --- a/src/py/flwr_experimental/baseline/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower Baselines.""" diff --git a/src/py/flwr_experimental/baseline/command.py b/src/py/flwr_experimental/baseline/command.py deleted file mode 100644 index a776347bacf6..000000000000 --- a/src/py/flwr_experimental/baseline/command.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Provides functions to construct various Flower CLI commands.""" - - -from typing import List, Optional - -from flwr_experimental.ops.instance import Instance - - -def install_wheel( - wheel_remote_path: str, wheel_extras: Optional[List[str]] = None -) -> str: - """Return install command for wheel. - - Remove previous versions if existing. - """ - extras = ["http-logger"] - - if wheel_extras: - extras += wheel_extras - - extras_str = ",".join(extras) - - return ( - "python3.7 -m pip uninstall -y flwr && " - + f"python3.7 -m pip install '{wheel_remote_path}[{extras_str}]'" - ) - - -def start_logserver( - logserver_s3_bucket: Optional[str] = None, logserver_s3_key: Optional[str] = None -) -> str: - """Return command to run logserver.""" - cmd = "screen -d -m python3.7 -m flwr_experimental.logserver" - - if logserver_s3_bucket is not None and logserver_s3_key is not None: - cmd += f" --s3_bucket={logserver_s3_bucket}" + f" --s3_key={logserver_s3_key}" - - return cmd - - -# pylint: disable=too-many-arguments -def start_server(log_host: str, baseline: str, setting: str) -> str: - """Build command to run server.""" - return ( - "screen -d -m" - + f" python3.7 -m flwr_experimental.baseline.{baseline}.server" - + f" --log_host={log_host}" - + f" --setting={setting}" - ) - - -def start_client( - server_address: str, log_host: str, baseline: str, setting: str, cid: str -) -> str: - """Build command to run client.""" - return ( - "screen -d -m" - + f" python3.7 -m flwr_experimental.baseline.{baseline}.client" - + f" --server_address={server_address}" - + f" --log_host={log_host}" - + f" --setting={setting}" - + f" --cid={cid}" - ) - - -def download_dataset(baseline: str) -> str: - """Return command which makes dataset locally available.""" - return f"python3.7 -m flwr_experimental.baseline.{baseline}.download" - - -def watch_and_shutdown(keyword: str, adapter: str) -> str: - """Return command which shuts down the instance after no baseline is - running anymore.""" - cmd = ( - f"screen -d -m bash -c 'while [[ $(ps a | grep -v grep | grep {keyword}) ]]; " - + "do sleep 1; done; " - ) - - if adapter == "docker": - cmd += "sleep 180 && kill 1'" - elif adapter == "ec2": - # Shutdown after 2 minutes to allow a logged in user - # to chancel the shutdown manually just in case - cmd += "sudo shutdown -P 3'" - else: - raise Exception("Unknown Adapter") - - return cmd - - -def tail_logfile(adapter: str, private_key: str, logserver: Instance) -> str: - """Return command which can be used to tail the logfile on the - logserver.""" - ssh_key = f"-i {private_key}" - username = "root" if adapter == "docker" else "ubuntu" - - return ( - f"ssh {ssh_key} -o StrictHostKeyChecking=no -p {logserver.ssh_port} " - + f"{username}@{logserver.public_ip}" - + ' "tail -n 1000 -f flower_logs/flower.log"' - ) diff --git a/src/py/flwr_experimental/baseline/common/__init__.py b/src/py/flwr_experimental/baseline/common/__init__.py deleted file mode 100644 index 706d2c41cb69..000000000000 --- a/src/py/flwr_experimental/baseline/common/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Common baseline components.""" - - -from .client import VisionClassificationClient as VisionClassificationClient -from .common import custom_fit as custom_fit -from .common import get_evaluate_fn as get_evaluate_fn -from .common import get_lr_schedule as get_lr_schedule -from .common import keras_evaluate as keras_evaluate -from .common import keras_fit as keras_fit -from .data import build_dataset as build_dataset -from .data import load_partition as load_partition diff --git a/src/py/flwr_experimental/baseline/common/client.py b/src/py/flwr_experimental/baseline/common/client.py deleted file mode 100644 index 18346218d919..000000000000 --- a/src/py/flwr_experimental/baseline/common/client.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower client using TensorFlow/Keras for image classification.""" - - -from logging import DEBUG -from typing import Tuple - -import numpy as np -import tensorflow as tf - -import flwr as fl -from flwr.common.logger import log - -from .common import custom_fit, keras_evaluate -from .data import build_dataset - -tf.get_logger().setLevel("ERROR") - - -class VisionClassificationClient(fl.client.Client): - """Flower client implementing image classification using - TensorFlow/Keras.""" - - # pylint: disable=too-many-arguments - def __init__( - self, - cid: str, - model: tf.keras.Model, - xy_train: Tuple[np.ndarray, np.ndarray], - xy_test: Tuple[np.ndarray, np.ndarray], - delay_factor: float, - num_classes: int, - augment: bool = False, - augment_horizontal_flip: bool = False, - augment_offset: int = 0, - normalization_factor: float = 255.0, - ): - self.cid = cid - self.model = model - self.ds_train = build_dataset( - xy_train[0], - xy_train[1], - num_classes=num_classes, - shuffle_buffer_size=len(xy_train[0]), - augment=augment, - augment_horizontal_flip=augment_horizontal_flip, - augment_offset=augment_offset, - normalization_factor=normalization_factor, - ) - self.ds_test = build_dataset( - xy_test[0], - xy_test[1], - num_classes=num_classes, - shuffle_buffer_size=0, - augment=False, - normalization_factor=normalization_factor, - ) - self.num_examples_train = len(xy_train[0]) - self.num_examples_test = len(xy_test[0]) - self.delay_factor = delay_factor - - def get_parameters(self) -> fl.common.ParametersRes: - parameters = fl.common.ndarrays_to_parameters(self.model.get_weights()) - return fl.common.ParametersRes(parameters=parameters) - - def fit(self, ins: fl.common.FitIns) -> fl.common.FitRes: - weights: fl.common.NDArrays = fl.common.parameters_to_ndarrays(ins.parameters) - config = ins.config - log( - DEBUG, - "fit on %s (examples: %s), config %s", - self.cid, - self.num_examples_train, - config, - ) - - # Training configuration - # epoch_global = int(config["epoch_global"]) - epochs = int(config["epochs"]) - batch_size = int(config["batch_size"]) - # lr_initial = float(config["lr_initial"]) - # lr_decay = float(config["lr_decay"]) - timeout = int(config["timeout"]) if "timeout" in config else None - partial_updates = bool(int(config["partial_updates"])) - - # Use provided weights to update the local model - self.model.set_weights(weights) - - # Train the local model using the local dataset - completed, fit_duration, num_examples = custom_fit( - model=self.model, - dataset=self.ds_train, - num_epochs=epochs, - batch_size=batch_size, - callbacks=[], - delay_factor=self.delay_factor, - timeout=timeout, - ) - log(DEBUG, "client %s had fit_duration %s", self.cid, fit_duration) - - # Compute the maximum number of examples which could have been processed - num_examples_ceil = self.num_examples_train * epochs - - if not completed and not partial_updates: - # Return empty update if local update could not be completed in time - parameters = fl.common.ndarrays_to_parameters([]) - else: - # Return the refined weights and the number of examples used for training - parameters = fl.common.ndarrays_to_parameters(self.model.get_weights()) - return fl.common.FitRes( - parameters=parameters, - num_examples=num_examples, - num_examples_ceil=num_examples_ceil, - fit_duration=fit_duration, - ) - - def evaluate(self, ins: fl.common.EvaluateIns) -> fl.common.EvaluateRes: - weights = fl.common.parameters_to_ndarrays(ins.parameters) - config = ins.config - log( - DEBUG, - "evaluate on %s (examples: %s), config %s", - self.cid, - self.num_examples_test, - config, - ) - - # Use provided weights to update the local model - self.model.set_weights(weights) - - # Evaluate the updated model on the local dataset - loss, acc = keras_evaluate( - self.model, self.ds_test, batch_size=self.num_examples_test - ) - - # Return the number of evaluation examples and the evaluation result (loss) - return fl.common.EvaluateRes( - loss=loss, num_examples=self.num_examples_test, accuracy=acc - ) diff --git a/src/py/flwr_experimental/baseline/common/common.py b/src/py/flwr_experimental/baseline/common/common.py deleted file mode 100644 index 7be848f96d79..000000000000 --- a/src/py/flwr_experimental/baseline/common/common.py +++ /dev/null @@ -1,186 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Common baseline components.""" - - -import time -import timeit -from logging import INFO -from typing import Callable, List, Optional, Tuple - -import numpy as np -import tensorflow as tf - -import flwr as fl -from flwr.common.logger import log - -from .data import build_dataset - - -# pylint: disable=unused-argument,invalid-name,too-many-arguments,too-many-locals -def custom_fit( - model: tf.keras.Model, - dataset: tf.data.Dataset, - num_epochs: int, - batch_size: int, - callbacks: List[tf.keras.callbacks.Callback], - delay_factor: float = 0.0, - timeout: Optional[int] = None, -) -> Tuple[bool, float, int]: - """Train the model using a custom training loop.""" - ds_train = dataset.batch(batch_size=batch_size, drop_remainder=False) - - # Keep results for plotting - train_loss_results = [] - train_accuracy_results = [] - - # Optimizer - optimizer = tf.keras.optimizers.Adam() - - fit_begin = timeit.default_timer() - num_examples = 0 - for epoch in range(num_epochs): - log(INFO, "Starting epoch %s", epoch) - - epoch_loss_avg = tf.keras.metrics.Mean() - epoch_accuracy = tf.keras.metrics.CategoricalAccuracy() - - # Single loop over the dataset - batch_begin = timeit.default_timer() - num_examples_batch = 0 - for batch, (x, y) in enumerate(ds_train): - num_examples_batch += len(x) - - # Optimize the model - loss_value, grads = grad(model, x, y) - optimizer.apply_gradients(zip(grads, model.trainable_variables)) - - # Track progress - epoch_loss_avg.update_state(loss_value) # Add the current batch loss - epoch_accuracy.update_state(y, model(x, training=True)) - - # Track the number of examples used for training - num_examples += x.shape[0] - - # Delay - batch_duration = timeit.default_timer() - batch_begin - if delay_factor > 0.0: - time.sleep(batch_duration * delay_factor) - - # Progress log - if batch % 100 == 0: - log( - INFO, - "Batch %s: loss %s (%s examples processed, batch duration: %s)", - batch, - loss_value, - num_examples_batch, - batch_duration, - ) - - # Timeout - if timeout is not None: - fit_duration = timeit.default_timer() - fit_begin - if fit_duration > timeout: - log(INFO, "client timeout") - return (False, fit_duration, num_examples) - batch_begin = timeit.default_timer() - - # End epoch - train_loss_results.append(epoch_loss_avg.result()) - train_accuracy_results.append(epoch_accuracy.result()) - log( - INFO, - "Epoch {:03d}: Loss: {:.3f}, Accuracy: {:.3%}".format( - epoch, epoch_loss_avg.result(), epoch_accuracy.result() - ), - ) - - fit_duration = timeit.default_timer() - fit_begin - return True, fit_duration, num_examples - - -def loss( - model: tf.keras.Model, x: tf.Tensor, y: tf.Tensor, training: bool -) -> tf.Tensor: - """Calculate categorical crossentropy loss.""" - loss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=False) - y_ = model(x, training=training) - return loss_object(y_true=y, y_pred=y_) - - -def grad( - model: tf.keras.Model, x: tf.Tensor, y: tf.Tensor -) -> Tuple[tf.Tensor, List[tf.Tensor]]: - """Calculate gradients.""" - with tf.GradientTape() as tape: - loss_value = loss(model, x, y, training=True) - return loss_value, tape.gradient(loss_value, model.trainable_variables) - - -def keras_evaluate( - model: tf.keras.Model, dataset: tf.data.Dataset, batch_size: int -) -> Tuple[float, float]: - """Evaluate the model using model.evaluate(...).""" - ds_test = dataset.batch(batch_size=batch_size, drop_remainder=False) - test_loss, acc = model.evaluate(x=ds_test) - return float(test_loss), float(acc) - - -def keras_fit( - model: tf.keras.Model, - dataset: tf.data.Dataset, - num_epochs: int, - batch_size: int, - callbacks: List[tf.keras.callbacks.Callback], -) -> None: - """Train the model using model.fit(...).""" - ds_train = dataset.batch(batch_size=batch_size, drop_remainder=False) - model.fit(ds_train, epochs=num_epochs, callbacks=callbacks, verbose=2) - - -def get_lr_schedule( - epoch_global: int, lr_initial: float, lr_decay: float -) -> Callable[[int], float]: - """Return a schedule which decays the learning rate after each epoch.""" - - def lr_schedule(epoch: int) -> float: - """Learning rate schedule.""" - epoch += epoch_global - return lr_initial * lr_decay**epoch - - return lr_schedule - - -def get_evaluate_fn( - model: tf.keras.Model, num_classes: int, xy_test: Tuple[np.ndarray, np.ndarray] -) -> Callable[[fl.common.NDArrays], Optional[Tuple[float, float]]]: - """Return an evaluation function for centralized evaluation.""" - - ds_test = build_dataset( - xy_test[0], - xy_test[1], - num_classes=num_classes, - shuffle_buffer_size=0, - augment=False, - ) - - def evaluate(weights: fl.common.NDArrays) -> Optional[Tuple[float, float]]: - """Use entire test set for evaluation.""" - model.set_weights(weights) - lss, acc = keras_evaluate(model, ds_test, batch_size=len(xy_test[0])) - return lss, acc - - return evaluate diff --git a/src/py/flwr_experimental/baseline/common/data.py b/src/py/flwr_experimental/baseline/common/data.py deleted file mode 100644 index 7d939d608310..000000000000 --- a/src/py/flwr_experimental/baseline/common/data.py +++ /dev/null @@ -1,157 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Baseline utilities for data loading.""" - - -from typing import List, Optional, Tuple, cast - -import numpy as np -import tensorflow as tf - - -# pylint: disable=too-many-arguments -def load_partition( - xy_partitions: List[Tuple[np.ndarray, np.ndarray]], - xy_test: Tuple[np.ndarray, np.ndarray], - partition: int, - num_clients: int, - seed: int, - dry_run: bool = False, -) -> Tuple[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]: - """Load, normalize, and sample CIFAR-10/100.""" - - # Take partition - x_train, y_train = xy_partitions[partition] - - # Take a subset of the test set - x_test, y_test = shuffle(xy_test[0], xy_test[1], seed=seed) - x_test, y_test = get_partition(x_test, y_test, partition, num_clients) - - # Adjust x shape for model - if x_train.ndim == 3: - x_train = adjust_x_shape(x_train) - x_test = adjust_x_shape(x_test) - - # Adjust y shape for model - if y_train.ndim == 2: - y_train = adjust_y_shape(y_train) - y_test = adjust_y_shape(y_test) - - # Return a small subset of the data if dry_run is set - if dry_run: - return (x_train[0:100], y_train[0:100]), (x_test[0:50], y_test[0:50]) - return (x_train, y_train), (x_test, y_test) - - -def shuffle( - x_orig: np.ndarray, y_orig: np.ndarray, seed: int -) -> Tuple[np.ndarray, np.ndarray]: - """Shuffle x and y in the same way.""" - np.random.seed(seed) - idx = np.random.permutation(len(x_orig)) - return x_orig[idx], y_orig[idx] - - -def get_partition( - x_orig: np.ndarray, y_orig: np.ndarray, partition: int, num_clients: int -) -> Tuple[np.ndarray, np.ndarray]: - """Return a single partition of an equally partitioned dataset.""" - step_size = len(x_orig) / num_clients - start_index = int(step_size * partition) - end_index = int(start_index + step_size) - return x_orig[start_index:end_index], y_orig[start_index:end_index] - - -def adjust_x_shape(nda: np.ndarray) -> np.ndarray: - """Turn shape (x, y, z) into (x, y, z, 1).""" - nda_adjusted = np.reshape(nda, (nda.shape[0], nda.shape[1], nda.shape[2], 1)) - return cast(np.ndarray, nda_adjusted) - - -def adjust_y_shape(nda: np.ndarray) -> np.ndarray: - """Turn shape (x, 1) into (x).""" - nda_adjusted = np.reshape(nda, (nda.shape[0])) - return cast(np.ndarray, nda_adjusted) - - -# pylint: disable=too-many-arguments,invalid-name -def build_dataset( - x: np.ndarray, - y: np.ndarray, - num_classes: int, - shuffle_buffer_size: int = 0, - augment: bool = False, - augment_color: bool = False, - augment_horizontal_flip: bool = False, - augment_offset: int = 0, - seed: Optional[int] = None, - normalization_factor: float = 255.0, -) -> tf.data.Dataset: - """Normalize images, one-hot encode labels, optionally shuffle and - augment.""" - dataset = tf.data.Dataset.from_tensor_slices((x, y)) - dataset = dataset.map( - lambda x, y: ( - tf.cast(x, tf.float32) / normalization_factor, - tf.one_hot( - indices=tf.cast(y, tf.int32), depth=num_classes, on_value=1, off_value=0 - ), - ), - num_parallel_calls=tf.data.experimental.AUTOTUNE, - ) - if shuffle_buffer_size > 0: - dataset = dataset.shuffle( - buffer_size=shuffle_buffer_size, seed=seed, reshuffle_each_iteration=True - ) - if augment: - dataset = dataset.map( - lambda x, y: ( - apply_augmentation( - x, - seed=seed, - color=augment_color, - horizontal_flip=augment_horizontal_flip, - offset=augment_offset, - ), - y, - ), - num_parallel_calls=tf.data.experimental.AUTOTUNE, - ) - return dataset - - -def apply_augmentation( - img: tf.Tensor, - seed: Optional[int], - color: bool, - horizontal_flip: bool, - offset: int, -) -> tf.Tensor: - """Apply different augmentations to a single example.""" - if color: - img = tf.image.random_hue(img, 0.08, seed=seed) - img = tf.image.random_saturation(img, 0.6, 1.6, seed=seed) - img = tf.image.random_brightness(img, 0.05, seed=seed) - img = tf.image.random_contrast(img, 0.7, 1.3, seed=seed) - if horizontal_flip: - img = tf.image.random_flip_left_right(img, seed=seed) - # Get image size from tensor - size = img.shape.as_list() # E.g., [28, 28, 1] or [32, 32, 3] - height = size[0] - width = size[1] - img_padded = tf.image.pad_to_bounding_box( - img, offset, offset, height + 2 * offset, width + 2 * offset - ) - return tf.image.random_crop(img_padded, size=size, seed=seed) diff --git a/src/py/flwr_experimental/baseline/config/__init__.py b/src/py/flwr_experimental/baseline/config/__init__.py deleted file mode 100644 index 0221a92188e5..000000000000 --- a/src/py/flwr_experimental/baseline/config/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Common client configuration.""" - - -from .config import configure_client_instances as configure_client_instances -from .config import sample_delay_factors as sample_delay_factors -from .config import sample_real_delay_factors as sample_real_delay_factors diff --git a/src/py/flwr_experimental/baseline/config/config.py b/src/py/flwr_experimental/baseline/config/config.py deleted file mode 100644 index 16c144bb6a2f..000000000000 --- a/src/py/flwr_experimental/baseline/config/config.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Provides a variaty of baseline settings for Fashion-MNIST.""" - - -import random -from typing import List, Optional, Tuple - -import numpy as np - -from flwr_experimental.ops.instance import Instance - -# We assume that devices which are older will have at most -# ~80% of the Samsung Galaxy Note 5 compute performance. -SCORE_MISSING = int(226 * 0.80) - -DEVICE_DISTRIBUTION = [ - ("10.0", "Note 10", 0.1612, 729), - ("Pie 9", "Samsung Galaxy Note 9", 0.374, 607), - ("Oreo 8.0/8.1", "Samsung Galaxy S8", 0.1129 + 0.0737, 359), - ("Nougat 7.0/7.1", "Samsung Galaxy S7", 0.0624 + 0.043, 343), - ("Marshmallow 6.0", "Samsung Galaxy Note 5", 0.0872, 226), - ("Lollipop 5.1", "Samsung Galaxy Note 4", 0.0484, SCORE_MISSING), - ("KitKat 4.4", "Samsung Galaxy Note 4", 0.0187, SCORE_MISSING), - ("Other", "Samsung Galaxy S III", 0.0185, SCORE_MISSING), -] - - -def sample_delay_factors( - num_clients: int, max_delay: float, seed: Optional[int] -) -> List[float]: - """Sample delay factors.""" - np.random.seed(seed) - # pylint: disable=invalid-name - ps = [float(p) for p in np.random.rand(num_clients)] - step_size = max_delay / num_clients - ds = [(i + 1) * step_size for i in range(num_clients)] - return [p * d for p, d in zip(ps, ds)] - - -def sample_real_delay_factors(num_clients: int, seed: int = 2021) -> List[float]: - """Split list of floats into two buckets.""" - random.seed(seed) - - if num_clients % 2 != 0: - raise Exception("num_clients has to be divisible by two") - - factors = sorted([get_delay_factor() for _ in range(num_clients)]) - - buckets: Tuple[List[float], List[float]] = ( - factors[: num_clients // 2], # fast, lower factor - factors[num_clients // 2 :], # slow, higher factor - ) - - final_factors: List[float] = [] - - for idx in range(num_clients): - # higher probability to pick bucket 0 with low idx - bucket_idx = random.choices([0, 1], [num_clients - idx, idx])[0] - picked_bucket = buckets[bucket_idx] - other_bucket = buckets[bucket_idx - 1] - - if picked_bucket == other_bucket: - raise Exception("Picked and other bucket can't be same") - - if len(picked_bucket) > 0: - value = picked_bucket.pop(0) - else: - value = other_bucket.pop(0) - - final_factors.append(value) - - return final_factors - - -def get_delay_factor() -> float: - """Return a delay factor.""" - values_prob = [val[2] for val in DEVICE_DISTRIBUTION] - values_perf = [val[3] for val in DEVICE_DISTRIBUTION] - max_perf = max(values_perf) - chosen_score = random.choices(values_perf, values_prob)[0] - return round(max_perf / chosen_score - 1, 4) - - -def configure_client_instances( - num_clients: int, num_cpu: int, num_ram: float, gpu: bool = False -) -> Tuple[List[Instance], List[str]]: - """Return list of client instances and a list of instance names.""" - instance_names = [f"client_{i}" for i in range(num_clients)] - - instances = [ - Instance( - name=instance_name, - group="clients", - num_cpu=num_cpu, - num_ram=num_ram, - gpu=gpu, - ) - for instance_name in instance_names - ] - - return instances, instance_names diff --git a/src/py/flwr_experimental/baseline/config/config_test.py b/src/py/flwr_experimental/baseline/config/config_test.py deleted file mode 100644 index 1e8377b3be1e..000000000000 --- a/src/py/flwr_experimental/baseline/config/config_test.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Implements tests for config module.""" - - -from .config import sample_real_delay_factors - - -def test_sample_real_delay_factors_100() -> None: - """Test delay factors.""" - # Prepare - num_clients = 100 - - # Execute - factors = sample_real_delay_factors(num_clients=num_clients) - - # Assert - assert len(factors) == num_clients - - -def test_sample_real_delay_factors_10() -> None: - """Test delay factors.""" - # Prepare - num_clients = 10 - - # Execute - factors = sample_real_delay_factors(num_clients=num_clients) - - # Assert - assert len(factors) == num_clients - - -def test_sample_real_delay_factors_seed() -> None: - """Test delay factors.""" - # Prepare - num_clients = 100 - - # Execute - factors_a = sample_real_delay_factors(num_clients=num_clients, seed=0) - factors_b = sample_real_delay_factors(num_clients=num_clients, seed=0) - factors_c = sample_real_delay_factors(num_clients=num_clients, seed=1) - - # Assert - assert len(factors_a) == num_clients - assert len(factors_b) == num_clients - assert len(factors_c) == num_clients - - # pylint: disable=invalid-name - all_same_in_a_and_b = True - all_same_in_a_and_c = True - - for a, b, c in zip(factors_a, factors_b, factors_c): - all_same_in_a_and_b = all_same_in_a_and_b and (a == b) - all_same_in_a_and_c = all_same_in_a_and_c and (a == c) - - assert all_same_in_a_and_b - assert not all_same_in_a_and_c diff --git a/src/py/flwr_experimental/baseline/dataset/__init__.py b/src/py/flwr_experimental/baseline/dataset/__init__.py deleted file mode 100644 index aa19c2fbfce8..000000000000 --- a/src/py/flwr_experimental/baseline/dataset/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Partitioned versions of popular datasets.""" diff --git a/src/py/flwr_experimental/baseline/dataset/dataset.py b/src/py/flwr_experimental/baseline/dataset/dataset.py deleted file mode 100644 index 8e27ad71821d..000000000000 --- a/src/py/flwr_experimental/baseline/dataset/dataset.py +++ /dev/null @@ -1,206 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Partitioned versions of CIFAR-10/100 datasets.""" -# pylint: disable=invalid-name - -from typing import List, Tuple, cast - -import numpy as np - -XY = Tuple[np.ndarray, np.ndarray] -XYList = List[XY] -PartitionedDataset = Tuple[XYList, XYList] - -np.random.seed(2020) - - -def float_to_int(i: float) -> int: - """Return float as int but raise if decimal is dropped.""" - if not i.is_integer(): - raise Exception("Cast would drop decimals") - - return int(i) - - -def sort_by_label(x: np.ndarray, y: np.ndarray) -> XY: - """Sort by label. - - Assuming two labels and four examples the resulting label order - would be 1,1,2,2 - """ - idx = np.argsort(y, axis=0).reshape((y.shape[0])) - return (x[idx], y[idx]) - - -def sort_by_label_repeating(x: np.ndarray, y: np.ndarray) -> XY: - """Sort by label in repeating groups. Assuming two labels and four examples - the resulting label order would be 1,2,1,2. - - Create sorting index which is applied to by label sorted x, y - - .. code-block:: python - - # given: - y = [ - 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9 - ] - - # use: - idx = [ - 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19 - ] - - # so that y[idx] becomes: - y = [ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 - ] - """ - x, y = sort_by_label(x, y) - - num_example = x.shape[0] - num_class = np.unique(y).shape[0] - idx = ( - np.array(range(num_example), np.int64) - .reshape((num_class, num_example // num_class)) - .transpose() - .reshape(num_example) - ) - - return (x[idx], y[idx]) - - -def split_at_fraction(x: np.ndarray, y: np.ndarray, fraction: float) -> Tuple[XY, XY]: - """Split x, y at a certain fraction.""" - splitting_index = float_to_int(x.shape[0] * fraction) - # Take everything BEFORE splitting_index - x_0, y_0 = x[:splitting_index], y[:splitting_index] - # Take everything AFTER splitting_index - x_1, y_1 = x[splitting_index:], y[splitting_index:] - return (x_0, y_0), (x_1, y_1) - - -def shuffle(x: np.ndarray, y: np.ndarray) -> XY: - """Shuffle x and y.""" - idx = np.random.permutation(len(x)) - return x[idx], y[idx] - - -def partition(x: np.ndarray, y: np.ndarray, num_partitions: int) -> List[XY]: - """Return x, y as list of partitions.""" - return list(zip(np.split(x, num_partitions), np.split(y, num_partitions))) - - -def combine_partitions(xy_list_0: XYList, xy_list_1: XYList) -> XYList: - """Combine two lists of ndarray Tuples into one list.""" - return [ - (np.concatenate([x_0, x_1], axis=0), np.concatenate([y_0, y_1], axis=0)) - for (x_0, y_0), (x_1, y_1) in zip(xy_list_0, xy_list_1) - ] - - -def shift(x: np.ndarray, y: np.ndarray) -> XY: - """Shift x_1, y_1 so that the first half contains only labels 0 to 4 and - the second half 5 to 9.""" - x, y = sort_by_label(x, y) - - (x_0, y_0), (x_1, y_1) = split_at_fraction(x, y, fraction=0.5) - (x_0, y_0), (x_1, y_1) = shuffle(x_0, y_0), shuffle(x_1, y_1) - x, y = np.concatenate([x_0, x_1], axis=0), np.concatenate([y_0, y_1], axis=0) - return x, y - - -def create_partitions( - unpartitioned_dataset: XY, - iid_fraction: float, - num_partitions: int, -) -> XYList: - """Create partitioned version of a training or test set. - - Currently tested and supported are MNIST, FashionMNIST and - CIFAR-10/100 - """ - x, y = unpartitioned_dataset - - x, y = shuffle(x, y) - x, y = sort_by_label_repeating(x, y) - - (x_0, y_0), (x_1, y_1) = split_at_fraction(x, y, fraction=iid_fraction) - - # Shift in second split of dataset the classes into two groups - x_1, y_1 = shift(x_1, y_1) - - xy_0_partitions = partition(x_0, y_0, num_partitions) - xy_1_partitions = partition(x_1, y_1, num_partitions) - - xy_partitions = combine_partitions(xy_0_partitions, xy_1_partitions) - - # Adjust x and y shape - return [adjust_xy_shape(xy) for xy in xy_partitions] - - -def create_partitioned_dataset( - keras_dataset: Tuple[XY, XY], - iid_fraction: float, - num_partitions: int, -) -> Tuple[PartitionedDataset, XY]: - """Create partitioned version of keras dataset. - - Currently tested and supported are MNIST, FashionMNIST and - CIFAR-10/100 - """ - xy_train, xy_test = keras_dataset - - xy_train_partitions = create_partitions( - unpartitioned_dataset=xy_train, - iid_fraction=iid_fraction, - num_partitions=num_partitions, - ) - - xy_test_partitions = create_partitions( - unpartitioned_dataset=xy_test, - iid_fraction=iid_fraction, - num_partitions=num_partitions, - ) - - return (xy_train_partitions, xy_test_partitions), adjust_xy_shape(xy_test) - - -def log_distribution(xy_partitions: XYList) -> None: - """Print label distribution for list of paritions.""" - distro = [np.unique(y, return_counts=True) for _, y in xy_partitions] - for d in distro: - print(d) - - -def adjust_xy_shape(xy: XY) -> XY: - """Adjust shape of both x and y.""" - x, y = xy - if x.ndim == 3: - x = adjust_x_shape(x) - if y.ndim == 2: - y = adjust_y_shape(y) - return (x, y) - - -def adjust_x_shape(nda: np.ndarray) -> np.ndarray: - """Turn shape (x, y, z) into (x, y, z, 1).""" - nda_adjusted = np.reshape(nda, (nda.shape[0], nda.shape[1], nda.shape[2], 1)) - return cast(np.ndarray, nda_adjusted) - - -def adjust_y_shape(nda: np.ndarray) -> np.ndarray: - """Turn shape (x, 1) into (x).""" - nda_adjusted = np.reshape(nda, (nda.shape[0])) - return cast(np.ndarray, nda_adjusted) diff --git a/src/py/flwr_experimental/baseline/dataset/dataset_test.py b/src/py/flwr_experimental/baseline/dataset/dataset_test.py deleted file mode 100644 index 4c484b8836bc..000000000000 --- a/src/py/flwr_experimental/baseline/dataset/dataset_test.py +++ /dev/null @@ -1,158 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for partitioned CIFAR-10/100 dataset generation.""" -# pylint: disable=no-self-use, invalid-name - -import unittest - -import numpy as np -import tensorflow as tf - -from flwr_experimental.baseline.dataset.dataset import ( - XY, - combine_partitions, - partition, - shuffle, - sort_by_label, - sort_by_label_repeating, - split_at_fraction, -) - - -def hash_xy(xy: XY) -> int: - """Return hash of xy.""" - hashes = set() - for x, y in zip(xy[0], xy[1]): - hashes.add(hash(x.tobytes() + y.tobytes())) - return hash(frozenset(hashes)) - - -def assert_identity(xy_0: XY, xy_1: XY) -> None: - """Assert that both datasets contain the same examples.""" - assert xy_0[0].shape == xy_1[0].shape - assert xy_0[1].shape == xy_1[1].shape - assert hash_xy(xy_0) == hash_xy(xy_1) - - -class CifarPartitionedTestCase(unittest.TestCase): - """Tests for partitioned CIFAR-10/100 dataset generation.""" - - def setUp(self) -> None: - (x, y), (_, _) = tf.keras.datasets.cifar10.load_data() - - np.random.seed(2000) - idx = np.random.permutation(x.shape[0]) - x, y = x[idx], y[idx] - - self.ds = x, y - - # Make sure subsequent shuffle in tests - # produce other permutations - np.random.seed(3000) - - def test_assert_identity(self) -> None: - """Test assert_identity function.""" - assert_identity(self.ds, self.ds) - - def test_sort_by_label(self) -> None: - """Test sort_by_label function.""" - # Prepare - x_org, y_org = self.ds - - # Execute - x, y = sort_by_label(x_org, y_org) - - # Assert - assert_identity(self.ds, (x, y)) - for i, _ in enumerate(y): - if i > 0: - assert y[i] >= y[i - 1] - - def test_sort_by_label_repeating(self) -> None: - """Test sort_by_label function.""" - # Prepare - x, y = self.ds - idx = np.random.permutation(x.shape[0]) - x, y = x[idx], y[idx] - - # Execute - x, y = sort_by_label_repeating(x, y) - - # Assert - assert_identity(self.ds, (x, y)) - assert {y[0] for y in y[:10]} == set(range(10)) - - def test_split_at_fraction(self) -> None: - """Test split_at_fraction function.""" - # Prepare - fraction = 0.5 - x, y = self.ds - - # Execute - (x_0, y_0), (x_1, y_1) = split_at_fraction(x, y, fraction) - - # Assert - barrier = int(x.shape[0] * fraction) - np.testing.assert_equal(x_0, x[:barrier]) - np.testing.assert_equal(y_0, y[:barrier]) - np.testing.assert_equal(x_1, x[barrier:]) - np.testing.assert_equal(y_1, y[barrier:]) - - def test_shuffle(self) -> None: - """Test sort_by_label function.""" - # Prepare - x, y = self.ds - - # Execute - x, y = shuffle(x, y) - - # Assert - assert_identity(self.ds, (x, y)) - - def test_partition(self) -> None: - """Test partition function.""" - # Prepare - x, y = self.ds - - # Execute - partitions = partition(x, y, 2) - - # Assert - assert len(partitions) == 2 - assert partitions[0][0].shape == partitions[1][0].shape - assert partitions[0][1].shape == partitions[1][1].shape - - def test_combine_partitions(self) -> None: - """Test combine function.""" - # Prepare - r_0_5 = list(range(0, 5)) - r_5_10 = list(range(5, 10)) - r_0_10 = r_0_5 + r_5_10 - xy_list_0 = [(np.array(r_0_5, np.int64), np.array(r_0_5, np.int64))] - xy_list_1 = [(np.array(r_5_10, np.int64), np.array(r_5_10, np.int64))] - - # Execute - xy_combined = combine_partitions(xy_list_0, xy_list_1) - - # Assert - assert len(xy_combined) == 1 - assert isinstance(xy_combined[0], tuple) - x_01, y_01 = xy_combined[0] - np.testing.assert_equal(x_01, r_0_10) - np.testing.assert_equal(y_01, r_0_10) - - -if __name__ == "__main__": - unittest.main(verbosity=2) diff --git a/src/py/flwr_experimental/baseline/dataset/tf_cifar_partitioned.py b/src/py/flwr_experimental/baseline/dataset/tf_cifar_partitioned.py deleted file mode 100644 index 61b6e31c29ab..000000000000 --- a/src/py/flwr_experimental/baseline/dataset/tf_cifar_partitioned.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Partitioned versions of CIFAR-10/100 datasets.""" -# pylint: disable=invalid-name - -from typing import Tuple - -import tensorflow as tf - -from .dataset import ( - XY, - PartitionedDataset, - create_partitioned_dataset, - log_distribution, -) - - -def load_data( - iid_fraction: float, num_partitions: int, cifar100: bool = False -) -> Tuple[PartitionedDataset, XY]: - """Load partitioned version of CIFAR-10/100.""" - cifar = tf.keras.datasets.cifar100 if cifar100 else tf.keras.datasets.cifar10 - (xy_train_partitions, xy_test_partitions), xy_test = create_partitioned_dataset( - cifar.load_data(), iid_fraction, num_partitions - ) - return (xy_train_partitions, xy_test_partitions), xy_test - - -if __name__ == "__main__": - # Load a partitioned dataset and show distribution of examples - for _num_partitions in [10, 100]: - for _fraction in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]: - (xy_train_par, xy_test_par), _ = load_data(_fraction, _num_partitions) - print(f"\nfraction: {_fraction}; num_partitions: {_num_partitions}") - log_distribution(xy_train_par) - log_distribution(xy_test_par) diff --git a/src/py/flwr_experimental/baseline/dataset/tf_cifar_partitioned_test.py b/src/py/flwr_experimental/baseline/dataset/tf_cifar_partitioned_test.py deleted file mode 100644 index dc655682adec..000000000000 --- a/src/py/flwr_experimental/baseline/dataset/tf_cifar_partitioned_test.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for partitioned CIFAR-10/100 dataset generation.""" -# pylint: disable=no-self-use - -import unittest - -from flwr_experimental.baseline.dataset.tf_cifar_partitioned import load_data - - -class CifarPartitionedTestCase(unittest.TestCase): - """Tests for partitioned CIFAR-10/100 dataset generation.""" - - def test_load_data_integration(self) -> None: - """Test partition function.""" - # Execute - for num_partitions in [10, 100]: - for fraction in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]: - (_, _), _ = load_data(fraction, num_partitions) - - -if __name__ == "__main__": - unittest.main(verbosity=2) diff --git a/src/py/flwr_experimental/baseline/dataset/tf_fashion_mnist_partitioned.py b/src/py/flwr_experimental/baseline/dataset/tf_fashion_mnist_partitioned.py deleted file mode 100644 index da7d70434bb4..000000000000 --- a/src/py/flwr_experimental/baseline/dataset/tf_fashion_mnist_partitioned.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Partitioned versions of CIFAR-10/100 datasets.""" -# pylint: disable=invalid-name - - -from typing import Tuple - -import tensorflow as tf - -from .dataset import ( - XY, - PartitionedDataset, - create_partitioned_dataset, - log_distribution, -) - - -def load_data( - iid_fraction: float, num_partitions: int -) -> Tuple[PartitionedDataset, XY]: - """Load partitioned version of FashionMNIST.""" - (xy_train_partitions, xy_test_partitions), xy_test = create_partitioned_dataset( - tf.keras.datasets.fashion_mnist.load_data(), iid_fraction, num_partitions - ) - return (xy_train_partitions, xy_test_partitions), xy_test - - -if __name__ == "__main__": - # Load a partitioned dataset and show distribution of examples - for _num_partitions in [10, 100]: - for _fraction in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]: - (xy_train_par, xy_test_par), _ = load_data(_fraction, _num_partitions) - print(f"\nfraction: {_fraction}; num_partitions: {_num_partitions}") - log_distribution(xy_train_par) - log_distribution(xy_test_par) diff --git a/src/py/flwr_experimental/baseline/dataset/tf_fashion_mnist_partitioned_test.py b/src/py/flwr_experimental/baseline/dataset/tf_fashion_mnist_partitioned_test.py deleted file mode 100644 index 2782bcf0353a..000000000000 --- a/src/py/flwr_experimental/baseline/dataset/tf_fashion_mnist_partitioned_test.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for partitioned FashionMNIST dataset generation.""" -# pylint: disable=no-self-use - -import unittest - -from flwr_experimental.baseline.dataset.tf_fashion_mnist_partitioned import load_data - - -class FashionMnistPartitionedTestCase(unittest.TestCase): - """Tests for partitioned FashionMNIST dataset generation.""" - - def test_load_data_integration(self) -> None: - """Test partition function.""" - # Execute - for num_partitions in [10, 100]: - for fraction in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]: - (_, _), _ = load_data(fraction, num_partitions) - - -if __name__ == "__main__": - unittest.main(verbosity=2) diff --git a/src/py/flwr_experimental/baseline/dataset/tf_hotkey_partitioned.py b/src/py/flwr_experimental/baseline/dataset/tf_hotkey_partitioned.py deleted file mode 100644 index 265d67301b64..000000000000 --- a/src/py/flwr_experimental/baseline/dataset/tf_hotkey_partitioned.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Partitioned versions of Spoken Keyword Detection dataset.""" -# pylint: disable=invalid-name - -import os -import pickle -import urllib.request -from typing import Tuple - -from .dataset import ( - XY, - PartitionedDataset, - create_partitioned_dataset, - log_distribution, -) - - -def download(filename: str, path: str) -> None: - """Download hotkey dataset.""" - urls = { - "hotkey_test_x.pkl": "https://www.dropbox.com/s/ve0g1m3wtuecb7r/hotkey_test_x.pkl?dl=1", - "hotkey_test_y.pkl": "https://www.dropbox.com/s/hlihc8qchpo3hhj/hotkey_test_y.pkl?dl=1", - "hotkey_train_x.pkl": "https://www.dropbox.com/s/05ym4jg8n7oi5qh/hotkey_train_x.pkl?dl=1", - "hotkey_train_y.pkl": "https://www.dropbox.com/s/k69lhw5j02gsscq/hotkey_train_y.pkl?dl=1", - } - url = urls[filename] - urllib.request.urlretrieve(url, path) - print("Downloaded ", url) - - -def hotkey_load(dirname: str = "./data/hotkey/") -> Tuple[XY, XY]: - """Load Hotkey dataset from disk.""" - files = [ - "hotkey_train_x.pkl", - "hotkey_train_y.pkl", - "hotkey_test_x.pkl", - "hotkey_test_y.pkl", - ] - paths = [] - - for f in files: - if not os.path.exists(dirname): - os.makedirs(dirname) - path = os.path.join(dirname, f) - if not os.path.exists(path): - download(f, path) - paths.append(path) - - with open(paths[0], "rb") as input_file: - x_train = pickle.load(input_file) - - with open(paths[1], "rb") as input_file: - y_train = pickle.load(input_file) - - with open(paths[2], "rb") as input_file: - x_test = pickle.load(input_file) - - with open(paths[3], "rb") as input_file: - y_test = pickle.load(input_file) - - return ( - (x_train[0:31000, :, :], y_train[0:31000]), - (x_test[0:4000, :, :], y_test[0:4000]), - ) - - -def load_data( - iid_fraction: float, num_partitions: int -) -> Tuple[PartitionedDataset, XY]: - """Load partitioned version of FashionMNIST.""" - (xy_train_partitions, xy_test_partitions), xy_test = create_partitioned_dataset( - hotkey_load(), iid_fraction, num_partitions - ) - return (xy_train_partitions, xy_test_partitions), xy_test - - -if __name__ == "__main__": - # Load a partitioned dataset and show distribution of examples - for _num_partitions in [10, 50, 100]: - for _fraction in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]: - (xy_train_par, xy_test_par), _ = load_data(_fraction, _num_partitions) - print(f"\nfraction: {_fraction}; num_partitions: {_num_partitions}") - log_distribution(xy_train_par) - log_distribution(xy_test_par) diff --git a/src/py/flwr_experimental/baseline/model/__init__.py b/src/py/flwr_experimental/baseline/model/__init__.py deleted file mode 100644 index 70c22a137002..000000000000 --- a/src/py/flwr_experimental/baseline/model/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Baseline models.""" - - -from .cnn import keyword_cnn as keyword_cnn -from .cnn import orig_cnn as orig_cnn -from .resnet import resnet50v2 as resnet50v2 diff --git a/src/py/flwr_experimental/baseline/model/cnn.py b/src/py/flwr_experimental/baseline/model/cnn.py deleted file mode 100644 index 0f0d777745b2..000000000000 --- a/src/py/flwr_experimental/baseline/model/cnn.py +++ /dev/null @@ -1,149 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""CNN.""" - - -from typing import Optional, Tuple - -import tensorflow as tf - -CNN_REG = 1e-5 -DENSE_REG = 1e-3 - - -def orig_cnn( - input_shape: Tuple[int, int, int] = (28, 28, 1), seed: Optional[int] = None -) -> tf.keras.Model: - """Create a CNN instance.""" - # Kernel initializer - kernel_initializer = tf.keras.initializers.glorot_uniform(seed=seed) - - # Architecture - inputs = tf.keras.layers.Input(shape=input_shape) - layers = tf.keras.layers.Conv2D( - 32, - kernel_size=(5, 5), - strides=(1, 1), - kernel_initializer=kernel_initializer, - padding="same", - activation="relu", - kernel_regularizer=tf.keras.regularizers.l2(CNN_REG), - )(inputs) - layers = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2))(layers) - layers = tf.keras.layers.Conv2D( - 64, - kernel_size=(5, 5), - strides=(1, 1), - kernel_initializer=kernel_initializer, - padding="same", - activation="relu", - kernel_regularizer=tf.keras.regularizers.l2(CNN_REG), - )(layers) - layers = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2))(layers) - layers = tf.keras.layers.Flatten()(layers) - layers = tf.keras.layers.Dense( - 512, - kernel_initializer=kernel_initializer, - activation="relu", - kernel_regularizer=tf.keras.regularizers.l2(DENSE_REG), - bias_regularizer=tf.keras.regularizers.l2(DENSE_REG), - )(layers) - - outputs = tf.keras.layers.Dense( - 10, - kernel_initializer=kernel_initializer, - activation="softmax", - kernel_regularizer=tf.keras.regularizers.l2(DENSE_REG), - bias_regularizer=tf.keras.regularizers.l2(DENSE_REG), - )(layers) - - model = tf.keras.Model(inputs=inputs, outputs=outputs) - - # Compile model w/ learning rate schedule - lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( - initial_learning_rate=1e-3, - decay_steps=10000, - decay_rate=0.9, - ) - model.compile( - optimizer=tf.keras.optimizers.SGD(learning_rate=lr_schedule, momentum=0.9), - loss=tf.keras.losses.categorical_crossentropy, - metrics=["accuracy"], - ) - return model - - -def keyword_cnn( - input_shape: Tuple[int, int, int] = (80, 40, 1), seed: Optional[int] = None -) -> tf.keras.Model: - """Create a keyword detection model instance.""" - # Kernel initializer - kernel_initializer = tf.keras.initializers.glorot_uniform(seed=seed) - - # Architecture - inputs = tf.keras.layers.Input(shape=input_shape) - layers = tf.keras.layers.Conv2D( - 32, - kernel_size=(20, 8), - strides=(1, 1), - kernel_initializer=kernel_initializer, - padding="same", - activation="relu", - )(inputs) - layers = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2))(layers) - layers = tf.keras.layers.Dropout(0.5)(layers) - layers = tf.keras.layers.Conv2D( - 64, - kernel_size=(10, 4), - strides=(1, 1), - kernel_initializer=kernel_initializer, - padding="same", - activation="relu", - )(layers) - layers = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2))(layers) - - layers = tf.keras.layers.Conv2D( - 64, - kernel_size=(2, 2), - strides=(1, 1), - kernel_initializer=kernel_initializer, - padding="same", - activation="relu", - )(layers) - - layers = tf.keras.layers.GlobalAveragePooling2D()(layers) - layers = tf.keras.layers.Dense( - 128, kernel_initializer=kernel_initializer, activation="relu" - )(layers) - - outputs = tf.keras.layers.Dense( - 10, kernel_initializer=kernel_initializer, activation="softmax" - )(layers) - - model = tf.keras.Model(inputs=inputs, outputs=outputs) - - # Compile model w/ learning rate schedule - lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( - initial_learning_rate=1e-3, - decay_steps=10000, - decay_rate=0.9, - ) - model.compile( - optimizer=tf.keras.optimizers.Adam(learning_rate=lr_schedule), - loss=tf.keras.losses.categorical_crossentropy, - metrics=["accuracy"], - ) - - return model diff --git a/src/py/flwr_experimental/baseline/model/cnn_test.py b/src/py/flwr_experimental/baseline/model/cnn_test.py deleted file mode 100644 index 3cf23d96d961..000000000000 --- a/src/py/flwr_experimental/baseline/model/cnn_test.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for CNN models.""" - - -from .cnn import orig_cnn - - -def test_cnn_size_mnist() -> None: - """Test number of parameters with MNIST-sized inputs.""" - # Prepare - model = orig_cnn(input_shape=(28, 28, 1)) - expected = 1_663_370 - - # Execute - actual = model.count_params() - - # Assert - assert actual == expected - - -def test_cnn_size_cifar() -> None: - """Test number of parameters with CIFAR-sized inputs.""" - # Prepare - model = orig_cnn(input_shape=(32, 32, 3)) - expected = 2_156_490 - - # Execute - actual = model.count_params() - - # Assert - assert actual == expected diff --git a/src/py/flwr_experimental/baseline/model/resnet.py b/src/py/flwr_experimental/baseline/model/resnet.py deleted file mode 100644 index 264822e09616..000000000000 --- a/src/py/flwr_experimental/baseline/model/resnet.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""ResNet.""" - - -from typing import Optional, Tuple - -import tensorflow as tf - - -# pylint: disable=unused-argument -def resnet50v2( - input_shape: Tuple[int, int, int], num_classes: int, seed: Optional[int] = None -) -> tf.keras.Model: - """Create a ResNet-50 (v2) instance.""" - - model = tf.keras.applications.ResNet50V2( - weights=None, include_top=True, input_shape=input_shape, classes=num_classes - ) - - # Compile model w/ learning rate schedule - lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( - initial_learning_rate=1e-3, - decay_steps=10000, - decay_rate=0.9, - ) - model.compile( - optimizer=tf.keras.optimizers.Adam(learning_rate=lr_schedule), - loss="categorical_crossentropy", - metrics=["accuracy"], - ) - return model diff --git a/src/py/flwr_experimental/baseline/plot/__init__.py b/src/py/flwr_experimental/baseline/plot/__init__.py deleted file mode 100644 index bfab50defebf..000000000000 --- a/src/py/flwr_experimental/baseline/plot/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Provides plotting functionality.""" - - -from .plot import bar_chart as bar_chart -from .plot import line_chart as line_chart -from .plot import single_bar_chart as single_bar_chart diff --git a/src/py/flwr_experimental/baseline/plot/plot.py b/src/py/flwr_experimental/baseline/plot/plot.py deleted file mode 100644 index e64b2435f5c7..000000000000 --- a/src/py/flwr_experimental/baseline/plot/plot.py +++ /dev/null @@ -1,247 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Provides plotting functions.""" - - -import math -import os.path -from enum import Enum -from pathlib import Path -from typing import List, Union - -import matplotlib -import matplotlib.pyplot as plt -import numpy as np - -matplotlib.rcParams["ps.useafm"] = True -matplotlib.rcParams["pdf.use14corefonts"] = True -matplotlib.rcParams["axes.axisbelow"] = True -matplotlib.rcParams["hatch.linewidth"] = 1.0 -matplotlib.use("Agg") - - -ROOT_DIR = os.path.realpath(os.path.dirname(__file__) + "/../../..") -PLOT_DIR = ROOT_DIR + "/plot" - -# If it does not exist create the output directory for the plots -Path(PLOT_DIR).mkdir(exist_ok=True) - - -MARKERSIZE = 3 # Size of the symbols on a linecharts - - -class LegendLoc(Enum): - """Enumerates possible legend location in a plot.""" - - UL = "upper left" - UR = "upper right" - LL = "lower left" - LR = "lower right" - UC = "upper center" - LC = "lower center" - CL = "center left" - CR = "center right" - - -# Disable too many arguments for all functions -# pylint: disable=too-many-arguments too-many-locals - - -def roundup_nearest(max_num: Union[int, float], div: int = 10) -> int: - """Roundup to nearst number divideable by n.""" - return int(math.ceil(max_num / float(div))) * div - - -def final_path(dir_name: str, filename: str, suffix: str = "pdf") -> str: - """Join path components and return as string.""" - filename_with_suffix = filename + "." + suffix - - if os.path.isabs(filename_with_suffix): - return filename_with_suffix - - return os.path.join(dir_name, filename_with_suffix) - - -def single_bar_chart( - y_values: np.ndarray, - tick_labels: List[str], - x_label: str, - y_label: str, - filename: str = "single_bar_chart", -) -> str: - """Plot and save a single bar chart.""" - - x_values = np.arange(y_values.size) - fig = plt.figure(figsize=(5, 3)) - ax_subplot = fig.add_subplot(111) - - barwidth = 0.7 - opacity = 1.0 - - plt.bar( - x_values, - y_values, - barwidth / 2, - alpha=opacity, - color=["black"], - linewidth=1, - edgecolor="black", - ) - - ax_subplot.spines["right"].set_visible(False) - ax_subplot.spines["top"].set_visible(False) - ax_subplot.xaxis.set_ticks_position("bottom") - ax_subplot.yaxis.set_ticks_position("left") - - plt.ylabel(y_label, fontsize=16) - plt.xlabel(x_label, fontsize=16) - - plt.xlim((-1, y_values.size)) - plt.ylim((0, 100)) - - plt.grid(linestyle="dotted") - - gca = plt.gca() - gca.set_yticklabels(gca.get_yticks(), fontsize=16) - ax_subplot.yaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter("%.0f")) - - ax_subplot.set_xticks([0, 1, 2, 3]) - - ax_subplot.set_xticklabels(tick_labels, fontsize=14) - - fig.tight_layout() - path = final_path(PLOT_DIR, filename) - plt.savefig(path, dpi=1000, bbox_inches="tight", transparent=True) - return path - - -def bar_chart( - y_values: List[np.ndarray], - bar_labels: List[str], - x_label: str, - x_tick_labels: List[str], - y_label: str, - legend_location: LegendLoc = LegendLoc.LR, - filename: str = "bar_chart", -) -> str: - """Plot and save a bar chart. - - Note: - Currently only supports len(y_values) == 2 but it should be easy to - support more than 2 bars. Feel free to contribute. - """ - - x_values = np.arange(y_values[0].size) - fig = plt.figure(figsize=(5, 3)) - ax_subplot = fig.add_subplot(111) - - barwidth = 0.7 - opacity = 1.0 - - colors = ["r", "b"] - - rects = [ - plt.bar( - x_values - barwidth * 0.25 * pow(-1, i), - val, - barwidth / len(y_values), - alpha=opacity, - color=[colors[i]], - linewidth=1, - edgecolor="black", - ) - for i, val in enumerate(y_values) - ] - - ax_subplot.spines["right"].set_visible(False) - ax_subplot.spines["top"].set_visible(False) - ax_subplot.xaxis.set_ticks_position("bottom") - ax_subplot.yaxis.set_ticks_position("left") - - plt.ylabel(y_label, fontsize=16) - plt.xlabel(x_label, fontsize=16) - - plt.xlim((-1, y_values[0].size)) - plt.ylim((0, roundup_nearest(np.max(y_values), 20))) - - plt.grid(linestyle="dotted") - gca = plt.gca() - gca.set_yticklabels(gca.get_yticks(), fontsize=16) - ax_subplot.yaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter("%.0f")) - - # xticks - ax_subplot.set_xticks(range(len(x_tick_labels))) - ax_subplot.set_xticklabels(x_tick_labels, fontsize=14) - - lgd = ax_subplot.legend( - tuple([rect[0] for rect in rects]), - tuple(bar_labels), - loc=legend_location.value, - fontsize=14, - ncol=2, - ) - - fig.tight_layout() - path = final_path(PLOT_DIR, filename) - plt.savefig( - path, - dpi=1000, - bbox_inches="tight", - bbox_extra_artists=(lgd,), - transparent=True, - ) - return path - - -def line_chart( - lines: List[np.ndarray], - labels: List[str], - x_label: str, - y_label: str, - legend_location: LegendLoc = LegendLoc.LR, - filename: str = "line_chart", - y_floor: int = 0, - y_ceil: int = 100, -) -> str: - """Plot and save a line chart.""" - - assert len({line.size for line in lines}) == 1, "Each line must be of same size." - - x_values = range(0, len(lines[0])) - plt.figure(figsize=(6, 4)) - ax_subplot = plt.subplot(111) - symbols = ["-o", "-s", "-d", "-^", "-x", "-8", "-*", "-P"] - - for i, zipped in enumerate(zip(lines, labels)): - line, label = zipped - ax_subplot.plot(x_values, line, symbols[i], label=label, markersize=MARKERSIZE) - - plt.yticks(np.arange(y_floor, y_ceil, 10.0), fontsize=14) - plt.xticks(np.arange(min(x_values), max(x_values) + 1, 10.0), fontsize=10) - - gca = plt.gca() - gca.set_yticklabels(gca.get_yticks(), fontsize=10) - ax_subplot.yaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter("%.0f")) - - plt.ylim((y_floor, y_ceil + 1)) - plt.xlim((-1, len(x_values))) - plt.legend(loc=legend_location.value, fontsize=14) - # ax.set_xticklabels(('15s', '30s', '60s', '90s', '120s'), fontsize=15) - plt.xlabel(x_label, fontsize=16) - plt.ylabel(y_label, fontsize=16) - - path = final_path(PLOT_DIR, filename) - plt.savefig(path, dpi=1000, bbox_inches="tight", transparent=True) - return path diff --git a/src/py/flwr_experimental/baseline/run.py b/src/py/flwr_experimental/baseline/run.py deleted file mode 100644 index f21963c69a41..000000000000 --- a/src/py/flwr_experimental/baseline/run.py +++ /dev/null @@ -1,268 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Execute Fashion-MNIST baseline locally in Docker.""" - - -import argparse -import concurrent.futures -import configparser -import sys -from logging import INFO -from os import path -from time import strftime -from typing import List, Optional - -import flwr_experimental.baseline.tf_cifar.settings as tf_cifar_settings -import flwr_experimental.baseline.tf_fashion_mnist.settings as tf_fashion_mnist_settings -import flwr_experimental.baseline.tf_hotkey.settings as tf_hotkey_settings -from flwr.common.logger import configure, log -from flwr_experimental.baseline import command -from flwr_experimental.baseline.setting import Baseline -from flwr_experimental.ops.cluster import Cluster -from flwr_experimental.ops.compute.adapter import Adapter -from flwr_experimental.ops.compute.docker_adapter import DockerAdapter -from flwr_experimental.ops.compute.ec2_adapter import EC2Adapter -from flwr_experimental.ops.instance import Instance - -OPS_INI_PATH = path.normpath( - f"{path.dirname(path.realpath(__file__))}/../../../../.flower_ops" -) - -# Read config file and extract all values which are needed further down. -CONFIG = configparser.ConfigParser() -CONFIG.read(OPS_INI_PATH) - -WHEEL_FILENAME = CONFIG.get("paths", "wheel_filename") -WHEEL_LOCAL_PATH = path.expanduser(CONFIG.get("paths", "wheel_dir")) + WHEEL_FILENAME - -DOCKER_PRIVATE_KEY = path.realpath(path.dirname(__file__) + "/../../../docker/ssh_key") - - -def now() -> str: - """Return current date and time as string.""" - return strftime("%Y%m%dT%H%M%S") - - -def configure_cluster( - adapter: str, instances: List[Instance], baseline: str, setting: str -) -> Cluster: - """Return configured compute cluster.""" - adapter_instance: Optional[Adapter] = None - private_key: Optional[str] = None - - if adapter == "docker": - adapter_instance = DockerAdapter() - user = "root" - private_key = DOCKER_PRIVATE_KEY - elif adapter == "ec2": - adapter_instance = EC2Adapter( - image_id=CONFIG.get("aws", "image_id"), - key_name=path.expanduser(CONFIG.get("aws", "key_name")), - subnet_id=CONFIG.get("aws", "subnet_id"), - security_group_ids=CONFIG.get("aws", "security_group_ids").split(","), - tags=[ - ("Purpose", "flwr_experimental.baseline"), - ("Baseline Name", baseline), - ("Baseline Setting", setting), - ], - ) - user = "ubuntu" - private_key = path.expanduser(CONFIG.get("ssh", "private_key")) - else: - raise Exception(f"Adapter of type {adapter} does not exist.") - - cluster = Cluster( - adapter=adapter_instance, - ssh_credentials=(user, private_key), - instances=instances, - timeout=60, - ) - - return cluster - - -def load_baseline_setting(baseline: str, setting: str) -> Baseline: - """Return appropriate baseline setting.""" - if baseline == "tf_cifar": - return tf_cifar_settings.get_setting(setting) - if baseline == "tf_fashion_mnist": - return tf_fashion_mnist_settings.get_setting(setting) - if baseline == "tf_hotkey": - return tf_hotkey_settings.get_setting(setting) - - raise Exception("Setting not found.") - - -# pylint: disable=too-many-arguments, too-many-locals -def run(baseline: str, setting: str, adapter: str) -> None: - """Run baseline.""" - print(f"Starting baseline with {setting} settings.") - - wheel_remote_path = ( - f"/root/{WHEEL_FILENAME}" - if adapter == "docker" - else f"/home/ubuntu/{WHEEL_FILENAME}" - ) - - settings = load_baseline_setting(baseline, setting) - - # Get instances and add a logserver to the list - instances = settings.instances - instances.append( - Instance(name="logserver", group="logserver", num_cpu=2, num_ram=2) - ) - - # Configure cluster - log(INFO, "(1/9) Configure cluster.") - cluster = configure_cluster(adapter, instances, baseline, setting) - - # Start the cluster; this takes some time - log(INFO, "(2/9) Start cluster.") - cluster.start() - - # Upload wheel to all instances - log(INFO, "(3/9) Upload wheel to all instances.") - cluster.upload_all(WHEEL_LOCAL_PATH, wheel_remote_path) - - # Install the wheel on all instances - log(INFO, "(4/9) Install wheel on all instances.") - cluster.exec_all(command.install_wheel(wheel_remote_path)) - extras = ["examples-tensorflow"] if "tf_" in baseline else ["examples-pytorch"] - cluster.exec_all( - command.install_wheel(wheel_remote_path=wheel_remote_path, wheel_extras=extras) - ) - - # Download datasets in server and clients - log(INFO, "(5/9) Download dataset on server and clients.") - cluster.exec_all( - command.download_dataset(baseline=baseline), groups=["server", "clients"] - ) - - # Start logserver - log(INFO, "(6/9) Start logserver.") - logserver = cluster.get_instance("logserver") - cluster.exec( - logserver.name, - command.start_logserver( - logserver_s3_bucket=CONFIG.get("aws", "logserver_s3_bucket"), - logserver_s3_key=f"{baseline}_{setting}_{now()}.log", - ), - ) - - # Start Flower server on Flower server instances - log(INFO, "(7/9) Start server.") - cluster.exec( - "server", - command.start_server( - log_host=f"{logserver.private_ip}:8081", - baseline=baseline, - setting=setting, - ), - ) - - # Start Flower clients - log(INFO, "(8/9) Start clients.") - server = cluster.get_instance("server") - - with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor: - # Start the load operations and mark each future with its URL - concurrent.futures.wait( - [ - executor.submit( - cluster.exec, - client_setting.instance_name, - command.start_client( - log_host=f"{logserver.private_ip}:8081", - server_address=f"{server.private_ip}:8080", - baseline=baseline, - setting=setting, - cid=client_setting.cid, - ), - ) - for client_setting in settings.clients - ] - ) - - # Shutdown server and client instance after 10min if not at least one Flower - # process is running it - log(INFO, "(9/9) Start shutdown watcher script.") - cluster.exec_all(command.watch_and_shutdown("flwr", adapter)) - - # Give user info how to tail logfile - private_key = ( - DOCKER_PRIVATE_KEY - if adapter == "docker" - else path.expanduser(CONFIG.get("ssh", "private_key")) - ) - - log( - INFO, - "If you would like to tail the central logfile run:\n\n\t%s\n", - command.tail_logfile(adapter, private_key, logserver), - ) - - -def main() -> None: - """Start Flower baseline.""" - parser = argparse.ArgumentParser(description="Flower") - - # When adding a new setting make sure to modify the load_baseline_setting function - possible_baselines = ["tf_cifar", "tf_fashion_mnist", "tf_hotkey"] - possible_settings = [] - all_settings = [ - list(tf_cifar_settings.SETTINGS.keys()), - list(tf_fashion_mnist_settings.SETTINGS.keys()), - list(tf_hotkey_settings.SETTINGS.keys()), - ] - - # Show only relevant settings based on baseline as choices - # for --setting parameter - baseline_arg = [arg for arg in sys.argv if "--baseline" in arg] - if len(baseline_arg) > 0: - selected_baseline = baseline_arg[0].split("=")[1] - idx = possible_baselines.index(selected_baseline) - possible_settings = all_settings[idx] - - parser.add_argument( - "--baseline", - type=str, - required=True, - choices=possible_baselines, - help="Name of baseline name to run.", - ) - parser.add_argument( - "--setting", - type=str, - required=True, - choices=possible_settings, - help="Name of setting to run.", - ) - parser.add_argument( - "--adapter", - type=str, - required=True, - choices=["docker", "ec2"], - help="Set adapter to be used.", - ) - args = parser.parse_args() - - # Configure logger - configure(f"flower_{args.baseline}_{args.setting}") - - run(baseline=args.baseline, setting=args.setting, adapter=args.adapter) - - -if __name__ == "__main__": - main() diff --git a/src/py/flwr_experimental/baseline/setting.py b/src/py/flwr_experimental/baseline/setting.py deleted file mode 100644 index c47d202539ab..000000000000 --- a/src/py/flwr_experimental/baseline/setting.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Provides a variaty of baseline settings base classes.""" - - -from dataclasses import dataclass -from typing import List, Optional - -from flwr_experimental.ops.instance import Instance - - -@dataclass -class BaseSetting: - """Base class for all settings.""" - - instance_name: str - - -# pylint: disable=too-many-instance-attributes -@dataclass -class ServerSetting(BaseSetting): - """Settings for the server.""" - - strategy: str - rounds: int - min_num_clients: int - sample_fraction: float - min_sample_size: int - training_round_timeout: Optional[int] - lr_initial: float - partial_updates: bool - importance_sampling: bool - dynamic_timeout: bool - alternating_timeout: bool = False - dry_run: bool = False - training_round_timeout_short: Optional[int] = None - - -@dataclass -class ClientSetting(BaseSetting): - """Settings for the client.""" - - # Individual per client - cid: str - partition: int - delay_factor: float - - # Same across all clients - iid_fraction: float - num_clients: int - dry_run: bool - - -@dataclass -class Baseline: - """One specific training setting.""" - - instances: List[Instance] - server: ServerSetting - clients: List[ClientSetting] diff --git a/src/py/flwr_experimental/baseline/tf_cifar/README.md b/src/py/flwr_experimental/baseline/tf_cifar/README.md deleted file mode 100644 index aa1de0074e8d..000000000000 --- a/src/py/flwr_experimental/baseline/tf_cifar/README.md +++ /dev/null @@ -1,42 +0,0 @@ -# CIFAR-10/100 - -## Ops -To execute the `run_aws.py` script you will have to create a `.flower_ops` file in the -git root of this project. The file needs to contain the following fields - -``` -[paths] -wheel_dir = ~/development/adap/flower/dist/ -wheel_filename = WHEEL_FILENAME - -[aws] -image_id = ami-0370b0294d7241341 -key_name = AWS_KEY_NAME -subnet_id = YOUR_AWS_SUBNET_ID -security_group_ids = YOUR_AWS_SECURITY_GROUP_ID - -[ssh] -private_key = PATH_TO_YOU_PRIVATE_KEY_TO_SSH_INTO_THE_MACHINES -``` - -### Remarks - -#### Wheel directory -Adjust the wheel directory according to the localation of the repo on your machine. - -#### Security Group -The security group needs to have port 8080 open so that the clients can connect to the server. - -#### Subnet Id -We are starting all instances in the same subnet to be more cost efficent (traffic between EC2 -instances in the same subnet over their private IP does not incure any cost). - -#### AMI -The provided AMI is a bare Ubuntu 18.04 image which was modified with the -`dev/aws-ami-bootstrap.sh` script. - -### Execution -To execute the script simply do: -```bash -python -m flwr_experimental.baseline.tf_cifar.run_aws -``` diff --git a/src/py/flwr_experimental/baseline/tf_cifar/client.py b/src/py/flwr_experimental/baseline/tf_cifar/client.py deleted file mode 100644 index 5233ee368ac8..000000000000 --- a/src/py/flwr_experimental/baseline/tf_cifar/client.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower client using TensorFlow for CIFAR-10/100.""" - - -import argparse -from logging import ERROR, INFO - -import tensorflow as tf - -import flwr as fl -from flwr.common.logger import configure, log -from flwr_experimental.baseline.common import VisionClassificationClient -from flwr_experimental.baseline.dataset import tf_cifar_partitioned -from flwr_experimental.baseline.model import resnet50v2 -from flwr_experimental.baseline.setting import ClientSetting -from flwr_experimental.baseline.tf_cifar.settings import SETTINGS, get_setting - -from . import DEFAULT_SERVER_ADDRESS, NUM_CLASSES, SEED - -tf.get_logger().setLevel("ERROR") - - -class ClientSettingNotFound(Exception): - """Raise when client setting could not be found.""" - - -def parse_args() -> argparse.Namespace: - """Parse and return commandline arguments.""" - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--server_address", - type=str, - default=DEFAULT_SERVER_ADDRESS, - help=f"gRPC server address (IPv6, default: {DEFAULT_SERVER_ADDRESS})", - ) - parser.add_argument( - "--log_host", - type=str, - help="HTTP log handler host (no default)", - ) - parser.add_argument( - "--setting", - type=str, - choices=SETTINGS.keys(), - help="Setting to run.", - ) - parser.add_argument("--cid", type=str, required=True, help="Client cid.") - return parser.parse_args() - - -def get_client_setting(setting: str, cid: str) -> ClientSetting: - """Return client setting based on setting name and cid.""" - for client_setting in get_setting(setting).clients: - if client_setting.cid == cid: - return client_setting - - raise ClientSettingNotFound() - - -def main() -> None: - """Load data, create and start CIFAR-10/100 client.""" - args = parse_args() - - client_setting = get_client_setting(args.setting, args.cid) - - # Configure logger - configure(identifier=f"client:{client_setting.cid}", host=args.log_host) - log(INFO, "Starting client, settings: %s", client_setting) - - # Load model - model = resnet50v2(input_shape=(32, 32, 3), num_classes=NUM_CLASSES, seed=SEED) - - # Load local data partition - (xy_train_partitions, xy_test_partitions), _ = tf_cifar_partitioned.load_data( - iid_fraction=client_setting.iid_fraction, - num_partitions=client_setting.num_clients, - cifar100=False, - ) - x_train, y_train = xy_train_partitions[client_setting.partition] - x_test, y_test = xy_test_partitions[client_setting.partition] - if client_setting.dry_run: - x_train = x_train[0:100] - y_train = y_train[0:100] - x_test = x_test[0:50] - y_test = y_test[0:50] - - # Start client - client = VisionClassificationClient( - client_setting.cid, - model, - (x_train, y_train), - (x_test, y_test), - client_setting.delay_factor, - NUM_CLASSES, - augment=True, - augment_horizontal_flip=True, - augment_offset=2, - ) - fl.client.start_client(args.server_address, client) - - -if __name__ == "__main__": - # pylint: disable=broad-except - try: - main() - except Exception as err: - log(ERROR, "Fatal error in main") - log(ERROR, err, exc_info=True, stack_info=True) - - # Raise the error again so the exit code is correct - raise err diff --git a/src/py/flwr_experimental/baseline/tf_cifar/download.py b/src/py/flwr_experimental/baseline/tf_cifar/download.py deleted file mode 100644 index 4beb3fb4a0f2..000000000000 --- a/src/py/flwr_experimental/baseline/tf_cifar/download.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Helper script to download CIFAR-10/100.""" - - -import argparse -from logging import INFO - -import tensorflow as tf - -from flwr.common.logger import log - -tf.get_logger().setLevel("ERROR") - - -def main() -> None: - """Download data.""" - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--cifar", - type=int, - choices=[10, 100], - default=10, - help="CIFAR version, allowed values: 10 or 100 (default: 10)", - ) - args = parser.parse_args() - log(INFO, "Download CIFAR-%s", args.cifar) - - # Load model and data - download_data(num_classes=args.cifar) - - -def download_data(num_classes: int) -> None: - """Download CIFAR-10/100.""" - cifar = ( - tf.keras.datasets.cifar10 if num_classes == 10 else tf.keras.datasets.cifar100 - ) - (_, _), (_, _) = cifar.load_data() - - -if __name__ == "__main__": - main() diff --git a/src/py/flwr_experimental/baseline/tf_cifar/server.py b/src/py/flwr_experimental/baseline/tf_cifar/server.py deleted file mode 100644 index cb7a99b9d360..000000000000 --- a/src/py/flwr_experimental/baseline/tf_cifar/server.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower server for CIFAR-10/100 image classification.""" - - -import argparse -import math -from logging import ERROR, INFO -from typing import Callable, Dict, Optional - -import flwr as fl -from flwr.common.logger import configure, log -from flwr_experimental.baseline.common import get_evaluate_fn -from flwr_experimental.baseline.dataset import tf_cifar_partitioned -from flwr_experimental.baseline.model import resnet50v2 -from flwr_experimental.baseline.tf_cifar.settings import SETTINGS, get_setting - -from . import DEFAULT_SERVER_ADDRESS, NUM_CLASSES, SEED - - -def parse_args() -> argparse.Namespace: - """Parse and return commandline arguments.""" - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--log_host", - type=str, - help="HTTP log handler host (no default)", - ) - parser.add_argument( - "--setting", - type=str, - choices=SETTINGS.keys(), - help="Setting to run.", - ) - - return parser.parse_args() - - -def main() -> None: - """Start server and train a number of rounds.""" - args = parse_args() - - # Configure logger - configure(identifier="server", host=args.log_host) - - server_setting = get_setting(args.setting).server - log(INFO, "server_setting: %s", server_setting) - - # Load evaluation data - (_, _), (x_test, y_test) = tf_cifar_partitioned.load_data( - iid_fraction=0.0, num_partitions=1, cifar100=NUM_CLASSES == 100 - ) - if server_setting.dry_run: - x_test = x_test[0:50] - y_test = y_test[0:50] - - # Load model (for centralized evaluation) - model = resnet50v2(input_shape=(32, 32, 3), num_classes=NUM_CLASSES, seed=SEED) - - # Strategy - evaluate_fn = get_evaluate_fn( - model=model, num_classes=NUM_CLASSES, xy_test=(x_test, y_test) - ) - fit_config_fn = get_on_fit_config_fn( - lr_initial=server_setting.lr_initial, - timeout=server_setting.training_round_timeout, - partial_updates=server_setting.partial_updates, - ) - - if server_setting.strategy == "fedavg": - strategy = fl.server.strategy.FedAvg( - fraction_fit=server_setting.sample_fraction, - min_fit_clients=server_setting.min_sample_size, - min_available_clients=server_setting.min_num_clients, - evaluate_fn=evaluate_fn, - on_fit_config_fn=fit_config_fn, - ) - - if server_setting.strategy == "fast-and-slow": - if server_setting.training_round_timeout is None: - raise ValueError( - "No `training_round_timeout` set for `fast-and-slow` strategy" - ) - strategy = fl.server.strategy.FastAndSlow( - fraction_fit=server_setting.sample_fraction, - min_fit_clients=server_setting.min_sample_size, - min_available_clients=server_setting.min_num_clients, - evaluate_fn=evaluate_fn, - on_fit_config_fn=fit_config_fn, - importance_sampling=server_setting.importance_sampling, - dynamic_timeout=server_setting.dynamic_timeout, - dynamic_timeout_percentile=0.8, - alternating_timeout=server_setting.alternating_timeout, - r_fast=1, - r_slow=1, - t_fast=math.ceil(0.5 * server_setting.training_round_timeout), - t_slow=server_setting.training_round_timeout, - ) - - # Run server - fl.server.start_server( - DEFAULT_SERVER_ADDRESS, - config={"num_rounds": server_setting.rounds}, - strategy=strategy, - ) - - -def get_on_fit_config_fn( - lr_initial: float, timeout: Optional[int], partial_updates: bool -) -> Callable[[int], Dict[str, fl.common.Scalar]]: - """Return a function which returns training configurations.""" - - def fit_config(server_round: int) -> Dict[str, fl.common.Scalar]: - """Return a configuration with static batch size and (local) epochs.""" - config: Dict[str, fl.common.Scalar] = { - "epoch_global": str(server_round), - "epochs": str(1), - "batch_size": str(32), - "lr_initial": str(lr_initial), - "lr_decay": str(0.99), - "partial_updates": "1" if partial_updates else "0", - } - if timeout is not None: - config["timeout"] = str(timeout) - - return config - - return fit_config - - -if __name__ == "__main__": - # pylint: disable=broad-except - try: - main() - except Exception as err: - log(ERROR, "Fatal error in main") - log(ERROR, err, exc_info=True, stack_info=True) - - # Raise the error again so the exit code is correct - raise err diff --git a/src/py/flwr_experimental/baseline/tf_cifar/settings.py b/src/py/flwr_experimental/baseline/tf_cifar/settings.py deleted file mode 100644 index ed1a72cafac9..000000000000 --- a/src/py/flwr_experimental/baseline/tf_cifar/settings.py +++ /dev/null @@ -1,355 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Provides a variaty of baseline settings for CIFAR.""" - - -from typing import List - -from flwr_experimental.baseline.config import ( - configure_client_instances, - sample_delay_factors, -) -from flwr_experimental.baseline.setting import Baseline, ClientSetting, ServerSetting -from flwr_experimental.ops.instance import Instance - -ROUNDS = 20 -MIN_NUM_CLIENTS = 80 -SAMPLE_FRACTION = 0.1 -MIN_SAMPLE_SIZE = 10 - -LR_INITIAL = 0.01 - -IID_FRACTION = 0.1 -MAX_DELAY_FACTOR = 4.0 # Equals a 5x slowdown - - -def get_setting(name: str) -> Baseline: - """Return appropriate setting.""" - if name not in SETTINGS: - raise Exception( - f"Baseline {name} does not exist. Valid settings are: {list(SETTINGS.keys())}" - ) - return SETTINGS[name] - - -def get_instance_name( - instance_names: List[str], num_clients: int, client_index: int -) -> str: - """Return instance_name.""" - idx = client_index // (num_clients // len(instance_names)) - idx = min([idx, len(instance_names) - 1]) - return instance_names[min(idx, len(instance_names))] - - -def configure_uniform_clients( - iid_fraction: float, - instance_names: List[str], - num_clients: int, - dry_run: bool, -) -> List[ClientSetting]: - """Configure `num_clients`, all using the same delay factor.""" - clients = [] - for i in range(num_clients): - client = ClientSetting( - # Set instance on which to run - instance_name=get_instance_name(instance_names, num_clients, i), - # Individual - cid=str(i), - partition=i, - delay_factor=0.0, - # Shared - iid_fraction=iid_fraction, - num_clients=num_clients, - dry_run=dry_run, - ) - clients.append(client) - - return clients - - -# pylint: disable=too-many-arguments -def configure_clients( - iid_fraction: float, - instance_names: List[str], - num_clients: int, - dry_run: bool, - delay_factor_fast: float, - delay_factor_slow: float, - sample_delays: bool = True, -) -> List[ClientSetting]: - """Configure `num_clients` with different delay factors.""" - if sample_delays: - # Configure clients with sampled delay factors - delay_factors = sample_delay_factors( - num_clients=num_clients, max_delay=delay_factor_slow, seed=2020 - ) - return [ - ClientSetting( - # Set instance on which to run - instance_name=get_instance_name(instance_names, num_clients, i), - # Individual - cid=str(i), - partition=i, - delay_factor=delay_factors[i], - # Shared - iid_fraction=iid_fraction, - num_clients=num_clients, - dry_run=dry_run, - ) - for i in range(num_clients) - ] - # Configure clients with fixed delay factors - clients = [] - for i in range(num_clients): - client = ClientSetting( - # Set instance on which to run - instance_name=get_instance_name(instance_names, num_clients, i), - # Individual - cid=str(i), - partition=i, - # Indices 0 to 49 fast, 50 to 99 slow - delay_factor=( - delay_factor_fast if i < int(num_clients / 2) else delay_factor_slow - ), - # Shared - iid_fraction=iid_fraction, - num_clients=num_clients, - dry_run=dry_run, - ) - clients.append(client) - - return clients - - -client_instances_100, client_names_100 = configure_client_instances( - num_clients=100, num_cpu=2, num_ram=8 -) - -client_instances_10, client_names_10 = configure_client_instances( - num_clients=10, num_cpu=2, num_ram=8 -) - -client_instances_4, client_names_4 = configure_client_instances( - num_clients=4, num_cpu=2, num_ram=8 -) - -client_instances_2, client_names_2 = configure_client_instances( - num_clients=2, num_cpu=16, num_ram=64 -) - -SETTINGS = { - "fedavg-sync-min": Baseline( - instances=[Instance(name="server", group="server", num_cpu=2, num_ram=8)] - + client_instances_2, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=10, - min_num_clients=2, - sample_fraction=1.0, - min_sample_size=2, - training_round_timeout=None, - lr_initial=0.001, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - dry_run=False, - ), - clients=configure_uniform_clients( - iid_fraction=1.0, - instance_names=client_names_2, - num_clients=2, - dry_run=False, - ), - ), - "fedavg-sync-10-10": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_10, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=ROUNDS, - min_num_clients=10, - sample_fraction=1.0, - min_sample_size=10, - training_round_timeout=None, - lr_initial=0.001, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - dry_run=False, - ), - clients=configure_uniform_clients( - iid_fraction=1.0, - instance_names=client_names_10, - num_clients=10, - dry_run=False, - ), - ), - "fedavg-sync-100-10": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=ROUNDS, - min_num_clients=100, - sample_fraction=0.1, - min_sample_size=10, - training_round_timeout=None, - lr_initial=0.001, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - dry_run=False, - ), - clients=configure_uniform_clients( - iid_fraction=1.0, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - ), - ), - ######################################## - ### PREVIOUS ### - ######################################## - "dry-run": Baseline( - instances=[ - Instance(name="server", group="server", num_cpu=2, num_ram=8), - Instance(name="client", group="clients", num_cpu=2, num_ram=8), - ], - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=1, - min_num_clients=1, - sample_fraction=1.0, - min_sample_size=1, - training_round_timeout=600, - lr_initial=0.01, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - dry_run=True, - ), - clients=configure_uniform_clients( - iid_fraction=0.1, instance_names=["client"], num_clients=4, dry_run=True - ), - ), - "minimal": Baseline( - instances=[Instance(name="server", group="server", num_cpu=2, num_ram=8)] - + client_instances_4, - server=ServerSetting( - instance_name="server", - strategy="fast-and-slow", - rounds=2, - min_num_clients=4, - sample_fraction=0.75, - min_sample_size=3, - training_round_timeout=3600, - lr_initial=0.01, - partial_updates=True, - importance_sampling=True, - dynamic_timeout=True, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_4, - num_clients=4, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - ), - ), - "fedavg-sync": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=None, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - ), - ), - "fedavg-async": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=20, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - ), - ), - "fedfs": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fast-and-slow", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=20, - lr_initial=LR_INITIAL, - partial_updates=True, - importance_sampling=True, - dynamic_timeout=True, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - ), - ), -} diff --git a/src/py/flwr_experimental/baseline/tf_fashion_mnist/README.md b/src/py/flwr_experimental/baseline/tf_fashion_mnist/README.md deleted file mode 100644 index 43c25065344b..000000000000 --- a/src/py/flwr_experimental/baseline/tf_fashion_mnist/README.md +++ /dev/null @@ -1,95 +0,0 @@ -# Fashion-MNIST Baselines - -## Prepare - -To execute the `run.py` script you need to create a `.flower_ops` file in the -git root of this project. The file needs to contain the following fields: - -``` -[paths] -wheel_dir = ~/development/adap/flower/dist/ -wheel_filename = flwr-0.0.1-py3-none-any.whl - -[aws] -image_id = ami-0370b0294d7241341 -key_name = AWS_KEY_NAME -subnet_id = YOUR_AWS_SUBNET_ID -security_group_ids = YOUR_AWS_SECURITY_GROUP_ID -logserver_s3_bucket = YOUR_S3_BUCKET - -[ssh] -private_key = PATH_TO_YOU_PRIVATE_KEY_TO_SSH_INTO_THE_MACHINES -``` - -### Remarks - -#### Wheel directory - -Adjust the wheel directory according to the localation of the repo on your -machine. - -#### Security Group - -The security group needs to have port 8080 open so that the clients can connect -to the server. - -#### Subnet Id - -We are starting all instances in the same subnet to be more cost efficent -(traffic between EC2 instances in the same subnet over their private IP does -not incure any cost). - -#### AMI - -The provided AMI is a bare Ubuntu 18.04 image which was modified using the -`dev/aws-ami-bootstrap.sh` script. - -## Build Docker Container - -```bash -./src/docker/build.sh -``` - -## Build Python Wheel - -To execute the latest version of your baselines during development, please -ensure that the `.whl` build in `dist/` reflects your changes. Re-build -if necessary: - -```bash -./dev/build.sh -``` - -## Execute - -To execute a baseline setting locally using docker: - -```bash -python -m flwr_experimental.baseline.tf_fashion_mnist.run --adapter="docker" --setting="minimal" -``` - -To execute a baseline setting remotely on AWS: - -```bash -python -m flwr_experimental.baseline.tf_fashion_mnist.run --adapter="ec2" --setting="minimal" -``` - -Or alternatively, customize the wrapper script `run.sh` and run it using your AWS profile: - -```bash -AWS_PROFILE=your-aws-profile src/py/flwr_experimental/baseline/run.sh -``` - -## Get Results - -See all current and past results on the S3 website of your S3 bucket: - -``` -http://[your-flower-log-s3-bucket].s3-website.eu-central-1.amazonaws.com/ -``` - -Download and filter invididual logs using `cURL` and `jq`: - -```bash -curl http://[your-flower-log-s3-bucket].s3-eu-central-1.amazonaws.com/[your-experiment].log | jq '.identifier + " => " + .message' -``` diff --git a/src/py/flwr_experimental/baseline/tf_fashion_mnist/__init__.py b/src/py/flwr_experimental/baseline/tf_fashion_mnist/__init__.py deleted file mode 100644 index 62c3d33eee7f..000000000000 --- a/src/py/flwr_experimental/baseline/tf_fashion_mnist/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower baseline using TensorFlow for Fashion-MNIST image classification.""" - - -DEFAULT_SERVER_ADDRESS = "[::]:8080" - -SEED = 2020 diff --git a/src/py/flwr_experimental/baseline/tf_fashion_mnist/client.py b/src/py/flwr_experimental/baseline/tf_fashion_mnist/client.py deleted file mode 100644 index 6177336446f1..000000000000 --- a/src/py/flwr_experimental/baseline/tf_fashion_mnist/client.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower client using TensorFlow for Fashion-MNIST image classification.""" - - -import argparse -from logging import ERROR, INFO - -import tensorflow as tf - -import flwr as fl -from flwr.common.logger import configure, log -from flwr_experimental.baseline.common import VisionClassificationClient -from flwr_experimental.baseline.dataset import tf_fashion_mnist_partitioned -from flwr_experimental.baseline.model import orig_cnn -from flwr_experimental.baseline.setting import ClientSetting -from flwr_experimental.baseline.tf_fashion_mnist.settings import SETTINGS, get_setting - -from . import DEFAULT_SERVER_ADDRESS, SEED - -tf.get_logger().setLevel("ERROR") - - -class ClientSettingNotFound(Exception): - """Raise when client setting could not be found.""" - - -def parse_args() -> argparse.Namespace: - """Parse and return commandline arguments.""" - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--server_address", - type=str, - default=DEFAULT_SERVER_ADDRESS, - help=f"gRPC server address (IPv6, default: {DEFAULT_SERVER_ADDRESS})", - ) - parser.add_argument( - "--log_host", - type=str, - help="HTTP log handler host (no default)", - ) - parser.add_argument( - "--setting", - type=str, - choices=SETTINGS.keys(), - help="Setting to run.", - ) - parser.add_argument("--cid", type=str, required=True, help="Client cid.") - return parser.parse_args() - - -def get_client_setting(setting: str, cid: str) -> ClientSetting: - """Return client setting based on setting name and cid.""" - for client_setting in get_setting(setting).clients: - if client_setting.cid == cid: - return client_setting - - raise ClientSettingNotFound() - - -def main() -> None: - """Load data, create and start Fashion-MNIST client.""" - args = parse_args() - - client_setting = get_client_setting(args.setting, args.cid) - - # Configure logger - configure(identifier=f"client:{client_setting.cid}", host=args.log_host) - log(INFO, "Starting client, settings: %s", client_setting) - - # Load model - model = orig_cnn(input_shape=(28, 28, 1), seed=SEED) - - # Load local data partition - ( - (xy_train_partitions, xy_test_partitions), - _, - ) = tf_fashion_mnist_partitioned.load_data( - iid_fraction=client_setting.iid_fraction, - num_partitions=client_setting.num_clients, - ) - x_train, y_train = xy_train_partitions[client_setting.partition] - x_test, y_test = xy_test_partitions[client_setting.partition] - if client_setting.dry_run: - x_train = x_train[0:100] - y_train = y_train[0:100] - x_test = x_test[0:50] - y_test = y_test[0:50] - - # Start client - client = VisionClassificationClient( - client_setting.cid, - model, - (x_train, y_train), - (x_test, y_test), - client_setting.delay_factor, - 10, - augment=True, - augment_horizontal_flip=False, - augment_offset=1, - ) - fl.client.start_client(args.server_address, client) - - -if __name__ == "__main__": - # pylint: disable=broad-except - try: - main() - except Exception as err: - log(ERROR, "Fatal error in main") - log(ERROR, err, exc_info=True, stack_info=True) - - # Raise the error again so the exit code is correct - raise err diff --git a/src/py/flwr_experimental/baseline/tf_fashion_mnist/download.py b/src/py/flwr_experimental/baseline/tf_fashion_mnist/download.py deleted file mode 100644 index c2f0eb580cf0..000000000000 --- a/src/py/flwr_experimental/baseline/tf_fashion_mnist/download.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Helper script to download CIFAR-10/100.""" - - -from logging import INFO - -import tensorflow as tf - -from flwr.common.logger import log - -tf.get_logger().setLevel("ERROR") - - -def main() -> None: - """Download data.""" - log(INFO, "Download Fashion-MNIST") - tf.keras.datasets.fashion_mnist.load_data() - - -if __name__ == "__main__": - main() diff --git a/src/py/flwr_experimental/baseline/tf_fashion_mnist/fn_plots.py b/src/py/flwr_experimental/baseline/tf_fashion_mnist/fn_plots.py deleted file mode 100644 index e28d98808d8a..000000000000 --- a/src/py/flwr_experimental/baseline/tf_fashion_mnist/fn_plots.py +++ /dev/null @@ -1,416 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Generate plots for Fashion-MNIST results.""" - - -from typing import List, Tuple - -import numpy as np - -from flwr_experimental.baseline.plot import line_chart - -RESULTS = { - "fn-c10-r40-fedfs-v1-16": [ - (0, 0.03759999945759773), - (1, 0.7357000112533569), - (2, 0.7964000105857849), - (3, 0.8057000041007996), - (4, 0.8197000026702881), - (5, 0.8321999907493591), - (6, 0.8583999872207642), - (7, 0.8324999809265137), - (8, 0.864300012588501), - (9, 0.8565000295639038), - (10, 0.8743000030517578), - (11, 0.8575000166893005), - (12, 0.8496999740600586), - (13, 0.8644999861717224), - (14, 0.8758999705314636), - (15, 0.8762999773025513), - (16, 0.8198999762535095), - (17, 0.8725000023841858), - (18, 0.882099986076355), - (19, 0.8758999705314636), - (20, 0.8791000247001648), - (21, 0.8792999982833862), - (22, 0.885699987411499), - (23, 0.8748000264167786), - (24, 0.8561000227928162), - (25, 0.8564000129699707), - (26, 0.8363999724388123), - (27, 0.876800000667572), - (28, 0.8805999755859375), - (29, 0.8569999933242798), - (30, 0.8654000163078308), - (31, 0.8705999851226807), - (32, 0.8468999862670898), - (33, 0.887499988079071), - (34, 0.8823000192642212), - (35, 0.8806999921798706), - (36, 0.8823000192642212), - (37, 0.8889999985694885), - (38, 0.8101000189781189), - (39, 0.8652999997138977), - (40, 0.8766000270843506), - ], - "fn-c10-r40-fedfs-v0-16-16": [ - (0, 0.03759999945759773), - (1, 0.7462000250816345), - (2, 0.7843000292778015), - (3, 0.7990999817848206), - (4, 0.8149999976158142), - (5, 0.8291000127792358), - (6, 0.8413000106811523), - (7, 0.8600999712944031), - (8, 0.8511999845504761), - (9, 0.8668000102043152), - (10, 0.857699990272522), - (11, 0.8673999905586243), - (12, 0.8765000104904175), - (13, 0.8773999810218811), - (14, 0.8773999810218811), - (15, 0.8562999963760376), - (16, 0.8758999705314636), - (17, 0.8729000091552734), - (18, 0.8722000122070312), - (19, 0.8356999754905701), - (20, 0.8776999711990356), - (21, 0.8845000267028809), - (22, 0.8700000047683716), - (23, 0.8766999840736389), - (24, 0.8870999813079834), - (25, 0.7976999878883362), - (26, 0.876800000667572), - (27, 0.8084999918937683), - (28, 0.8737999796867371), - (29, 0.8867999911308289), - (30, 0.8797000050544739), - (31, 0.8866999745368958), - (32, 0.8795999884605408), - (33, 0.8743000030517578), - (34, 0.8881000280380249), - (35, 0.8858000040054321), - (36, 0.8881000280380249), - (37, 0.8851000070571899), - (38, 0.8403000235557556), - (39, 0.8751000165939331), - (40, 0.8812000155448914), - ], - "fn-c10-r40-fedfs-v0-16-08": [ - (0, 0.03759999945759773), - (1, 0.644599974155426), - (2, 0.7526000142097473), - (3, 0.7882999777793884), - (4, 0.8141000270843506), - (5, 0.8335000276565552), - (6, 0.8378999829292297), - (7, 0.8572999835014343), - (8, 0.86080002784729), - (9, 0.84170001745224), - (10, 0.8429999947547913), - (11, 0.8489000201225281), - (12, 0.858299970626831), - (13, 0.8694999814033508), - (14, 0.8694000244140625), - (15, 0.8751999735832214), - (16, 0.8722000122070312), - (17, 0.8736000061035156), - (18, 0.8744000196456909), - (19, 0.8763999938964844), - (20, 0.8431000113487244), - (21, 0.8564000129699707), - (22, 0.869700014591217), - (23, 0.873199999332428), - (24, 0.8788999915122986), - (25, 0.8726000189781189), - (26, 0.8784999847412109), - (27, 0.8777999877929688), - (28, 0.8776000142097473), - (29, 0.8830000162124634), - (30, 0.8838000297546387), - (31, 0.873199999332428), - (32, 0.8822000026702881), - (33, 0.8835999965667725), - (34, 0.8826000094413757), - (35, 0.8847000002861023), - (36, 0.8835999965667725), - (37, 0.7781000137329102), - (38, 0.8820000290870667), - (39, 0.8762000203132629), - (40, 0.8736000061035156), - ], - "fn-c10-r40-fedavg-16": [ - (0, 0.03759999945759773), - (1, 0.6743000149726868), - (2, 0.7746000289916992), - (3, 0.7752000093460083), - (4, 0.7994999885559082), - (5, 0.8137000203132629), - (6, 0.8341000080108643), - (7, 0.822700023651123), - (8, 0.822700023651123), - (9, 0.8327999711036682), - (10, 0.8264999985694885), - (11, 0.8608999848365784), - (12, 0.8526999950408936), - (13, 0.859000027179718), - (14, 0.8611000180244446), - (15, 0.8482999801635742), - (16, 0.8560000061988831), - (17, 0.8414000272750854), - (18, 0.8305000066757202), - (19, 0.8445000052452087), - (20, 0.8525999784469604), - (21, 0.8528000116348267), - (22, 0.8544999957084656), - (23, 0.8572999835014343), - (24, 0.8547000288963318), - (25, 0.8582000136375427), - (26, 0.8501999974250793), - (27, 0.8741999864578247), - (28, 0.8605999946594238), - (29, 0.8578000068664551), - (30, 0.8578000068664551), - (31, 0.8598999977111816), - (32, 0.8450999855995178), - (33, 0.85589998960495), - (34, 0.8565999865531921), - (35, 0.8582000136375427), - (36, 0.8547999858856201), - (37, 0.8608999848365784), - (38, 0.8503000140190125), - (39, 0.8677999973297119), - (40, 0.8535000085830688), - ], - "fn-c50-r40-fedfs-v1-16": [ - (0, 0.03759999945759773), - (1, 0.7195000052452087), - (2, 0.7919999957084656), - (3, 0.8069000244140625), - (4, 0.8201000094413757), - (5, 0.8353000283241272), - (6, 0.8583999872207642), - (7, 0.8440999984741211), - (8, 0.8585000038146973), - (9, 0.8571000099182129), - (10, 0.840499997138977), - (11, 0.8586000204086304), - (12, 0.853600025177002), - (13, 0.8680999875068665), - (14, 0.8540999889373779), - (15, 0.8722000122070312), - (16, 0.8702999949455261), - (17, 0.8741999864578247), - (18, 0.8626000285148621), - (19, 0.8730999827384949), - (20, 0.8611999750137329), - (21, 0.8758999705314636), - (22, 0.8833000063896179), - (23, 0.8773000240325928), - (24, 0.8705000281333923), - (25, 0.8709999918937683), - (26, 0.8791999816894531), - (27, 0.8755999803543091), - (28, 0.8640000224113464), - (29, 0.8776000142097473), - (30, 0.8615000247955322), - (31, 0.8776999711990356), - (32, 0.8809999823570251), - (33, 0.8824999928474426), - (34, 0.8783000111579895), - (35, 0.8817999958992004), - (36, 0.8858000040054321), - (37, 0.8791999816894531), - (38, 0.8888999819755554), - (39, 0.8822000026702881), - (40, 0.8755999803543091), - ], - "fn-c50-r40-fedfs-v0-16-16": [ - (0, 0.03759999945759773), - (1, 0.7275999784469604), - (2, 0.7993999719619751), - (3, 0.8122000098228455), - (4, 0.8399999737739563), - (5, 0.8474000096321106), - (6, 0.8608999848365784), - (7, 0.8666999936103821), - (8, 0.8718000054359436), - (9, 0.8705000281333923), - (10, 0.8758999705314636), - (11, 0.8726999759674072), - (12, 0.8804000020027161), - (13, 0.8805999755859375), - (14, 0.8823000192642212), - (15, 0.8834999799728394), - (16, 0.8777999877929688), - (17, 0.883400022983551), - (18, 0.8848999738693237), - (19, 0.8844000101089478), - (20, 0.8852999806404114), - (21, 0.8855999708175659), - (22, 0.8845000267028809), - (23, 0.8885999917984009), - (24, 0.8859000205993652), - (25, 0.8862000107765198), - (26, 0.8885999917984009), - (27, 0.8881999850273132), - (28, 0.8901000022888184), - (29, 0.885699987411499), - (30, 0.885200023651123), - (31, 0.8899000287055969), - (32, 0.8924000263214111), - (33, 0.890500009059906), - (34, 0.8894000053405762), - (35, 0.8916000127792358), - (36, 0.8934000134468079), - (37, 0.8913999795913696), - (38, 0.8902000188827515), - (39, 0.8916000127792358), - (40, 0.8913999795913696), - ], - "fn-c50-r40-fedfs-v0-16-08": [ - (0, 0.03759999945759773), - (1, 0.6811000108718872), - (2, 0.7753999829292297), - (3, 0.8039000034332275), - (4, 0.8253999948501587), - (5, 0.8299000263214111), - (6, 0.8508999943733215), - (7, 0.8583999872207642), - (8, 0.8583999872207642), - (9, 0.8593000173568726), - (10, 0.8654000163078308), - (11, 0.8607000112533569), - (12, 0.8736000061035156), - (13, 0.8740000128746033), - (14, 0.8770999908447266), - (15, 0.8766000270843506), - (16, 0.8762000203132629), - (17, 0.8787999749183655), - (18, 0.8787999749183655), - (19, 0.8801000118255615), - (20, 0.879800021648407), - (21, 0.8812999725341797), - (22, 0.8828999996185303), - (23, 0.8848000168800354), - (24, 0.8794999718666077), - (25, 0.8830000162124634), - (26, 0.8841000199317932), - (27, 0.8841000199317932), - (28, 0.8816999793052673), - (29, 0.8845000267028809), - (30, 0.8884999752044678), - (31, 0.8881999850273132), - (32, 0.8885999917984009), - (33, 0.8899000287055969), - (34, 0.8883000016212463), - (35, 0.8884000182151794), - (36, 0.8914999961853027), - (37, 0.8913999795913696), - (38, 0.8920999765396118), - (39, 0.8902999758720398), - (40, 0.8909000158309937), - ], - "fn-c50-r40-fedavg-16": [ - (0, 0.03759999945759773), - (1, 0.6868000030517578), - (2, 0.7861999869346619), - (3, 0.8012999892234802), - (4, 0.8083000183105469), - (5, 0.8226000070571899), - (6, 0.823199987411499), - (7, 0.84170001745224), - (8, 0.8342000246047974), - (9, 0.8363000154495239), - (10, 0.8543000221252441), - (11, 0.8504999876022339), - (12, 0.8500999808311462), - (13, 0.8579999804496765), - (14, 0.8633999824523926), - (15, 0.852400004863739), - (16, 0.8640000224113464), - (17, 0.8540999889373779), - (18, 0.8550000190734863), - (19, 0.8555999994277954), - (20, 0.8589000105857849), - (21, 0.8683000206947327), - (22, 0.8655999898910522), - (23, 0.8604999780654907), - (24, 0.859000027179718), - (25, 0.8605999946594238), - (26, 0.8716999888420105), - (27, 0.8683000206947327), - (28, 0.867900013923645), - (29, 0.8668000102043152), - (30, 0.859000027179718), - (31, 0.8586999773979187), - (32, 0.8657000064849854), - (33, 0.8700000047683716), - (34, 0.8619999885559082), - (35, 0.8705000281333923), - (36, 0.8709999918937683), - (37, 0.8708999752998352), - (38, 0.8719000220298767), - (39, 0.8698999881744385), - (40, 0.8705999851226807), - ], -} - - -def accuracy_fn_c10() -> None: - """Generate plots.""" - lines = [ - ("FedFSv1, c=10, t_max=16", RESULTS["fn-c10-r40-fedfs-v1-16"]), - ("FedFSv0, c=10, t=16/16", RESULTS["fn-c10-r40-fedfs-v0-16-16"]), - ("FedFSv0, c=10, t=16/08", RESULTS["fn-c10-r40-fedfs-v0-16-08"]), - ("FedAvg, c=10, t=16", RESULTS["fn-c10-r40-fedavg-16"]), - ] - plot(lines, "fmnist-fn-progress-c10") - - -def accuracy_fn_c50() -> None: - """Generate plots.""" - lines = [ - ("FedFSv1, c=10, t_max=16", RESULTS["fn-c50-r40-fedfs-v1-16"]), - ("FedFSv0, c=10, t=16/16", RESULTS["fn-c50-r40-fedfs-v0-16-16"]), - ("FedFSv0, c=10, t=16/08", RESULTS["fn-c50-r40-fedfs-v0-16-08"]), - ("FedAvg, c=10, t=16", RESULTS["fn-c50-r40-fedavg-16"]), - ] - plot(lines, "fmnist-fn-progress-c50") - - -def plot(lines: List[Tuple[str, List[Tuple[int, float]]]], filename: str) -> None: - """Plot a single line chart.""" - values = [np.array([x * 100 for _, x in val]) for _, val in lines] - labels = [label for label, _ in lines] - line_chart( - values, - labels, - "Round", - "Accuracy", - filename=filename, - y_floor=60, - y_ceil=100, - ) - - -def main() -> None: - """Call all plot functions.""" - accuracy_fn_c10() - accuracy_fn_c50() - - -if __name__ == "__main__": - main() diff --git a/src/py/flwr_experimental/baseline/tf_fashion_mnist/gen_plots.py b/src/py/flwr_experimental/baseline/tf_fashion_mnist/gen_plots.py deleted file mode 100644 index f6c0383367fd..000000000000 --- a/src/py/flwr_experimental/baseline/tf_fashion_mnist/gen_plots.py +++ /dev/null @@ -1,334 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Generate plots for Fashion-MNIST results.""" - - -from typing import List, Tuple - -import numpy as np - -from flwr_experimental.baseline.plot import bar_chart, line_chart - -RESULTS = { - "fedavg-t10": [ - (0, 0.03759999945759773), - (1, 0.03759999945759773), - (2, 0.03759999945759773), - (3, 0.03759999945759773), - (4, 0.03759999945759773), - (5, 0.03759999945759773), - (6, 0.03759999945759773), - (7, 0.03759999945759773), - (8, 0.03759999945759773), - (9, 0.03759999945759773), - (10, 0.03759999945759773), - (11, 0.03759999945759773), - (12, 0.03759999945759773), - (13, 0.03759999945759773), - (14, 0.03759999945759773), - (15, 0.03759999945759773), - (16, 0.03759999945759773), - (17, 0.03759999945759773), - (18, 0.03759999945759773), - (19, 0.03759999945759773), - (20, 0.03759999945759773), - ], - "fedavg-t12": [ - (0, 0.03759999945759773), - (1, 0.03759999945759773), - (2, 0.03759999945759773), - (3, 0.03759999945759773), - (4, 0.03759999945759773), - (5, 0.03759999945759773), - (6, 0.03759999945759773), - (7, 0.03759999945759773), - (8, 0.03759999945759773), - (9, 0.03759999945759773), - (10, 0.03759999945759773), - (11, 0.03759999945759773), - (12, 0.03759999945759773), - (13, 0.03759999945759773), - (14, 0.03759999945759773), - (15, 0.03759999945759773), - (16, 0.03759999945759773), - (17, 0.03759999945759773), - (18, 0.03759999945759773), - (19, 0.03759999945759773), - (20, 0.03759999945759773), - ], - "fedavg-t14": [ - (0, 0.03759999945759773), - (1, 0.03759999945759773), - (2, 0.6743999719619751), - (3, 0.6802999973297119), - (4, 0.6802999973297119), - (5, 0.6802999973297119), - (6, 0.6802999973297119), - (7, 0.7853999733924866), - (8, 0.7853999733924866), - (9, 0.7876999974250793), - (10, 0.7642999887466431), - (11, 0.8054999709129333), - (12, 0.8181999921798706), - (13, 0.8108999729156494), - (14, 0.7907000184059143), - (15, 0.763700008392334), - (16, 0.8091999888420105), - (17, 0.8296999931335449), - (18, 0.8123999834060669), - (19, 0.8123999834060669), - (20, 0.8101999759674072), - ], - "fedavg-t16": [ - (0, 0.03759999945759773), - (1, 0.7197999954223633), - (2, 0.7720999717712402), - (3, 0.7900999784469604), - (4, 0.7811999917030334), - (5, 0.7724000215530396), - (6, 0.8023999929428101), - (7, 0.8043000102043152), - (8, 0.8230999708175659), - (9, 0.8327999711036682), - (10, 0.8299000263214111), - (11, 0.8402000069618225), - (12, 0.853600025177002), - (13, 0.8370000123977661), - (14, 0.83160001039505), - (15, 0.8424000144004822), - (16, 0.830299973487854), - (17, 0.8476999998092651), - (18, 0.8632000088691711), - (19, 0.8636999726295471), - (20, 0.8657000064849854), - ], - "fedfs-t10": [ - (0, 0.03759999945759773), - (1, 0.7343000173568726), - (2, 0.7664999961853027), - (3, 0.7900000214576721), - (4, 0.805899977684021), - (5, 0.8237000107765198), - (6, 0.8406999707221985), - (7, 0.8263000249862671), - (8, 0.8442999720573425), - (9, 0.8564000129699707), - (10, 0.8651999831199646), - (11, 0.8375999927520752), - (12, 0.8646000027656555), - (13, 0.8669999837875366), - (14, 0.861299991607666), - (15, 0.8773999810218811), - (16, 0.800599992275238), - (17, 0.8676999807357788), - (18, 0.8763999938964844), - (19, 0.8695999979972839), - (20, 0.873199999332428), - ], - "fedfs-t12": [ - (0, 0.03759999945759773), - (1, 0.7153000235557556), - (2, 0.7835999727249146), - (3, 0.8083999752998352), - (4, 0.816100001335144), - (5, 0.8215000033378601), - (6, 0.8429999947547913), - (7, 0.8464000225067139), - (8, 0.8603000044822693), - (9, 0.8482999801635742), - (10, 0.8450000286102295), - (11, 0.866599977016449), - (12, 0.863099992275238), - (13, 0.8709999918937683), - (14, 0.873199999332428), - (15, 0.8701000213623047), - (16, 0.8600000143051147), - (17, 0.8766999840736389), - (18, 0.8697999715805054), - (19, 0.8795999884605408), - (20, 0.8830999732017517), - ], - "fedfs-t14": [ - (0, 0.03759999945759773), - (1, 0.7245000004768372), - (2, 0.7972000241279602), - (3, 0.8059999942779541), - (4, 0.8252999782562256), - (5, 0.8334000110626221), - (6, 0.8560000061988831), - (7, 0.8510000109672546), - (8, 0.8650000095367432), - (9, 0.8621000051498413), - (10, 0.866599977016449), - (11, 0.8615999817848206), - (12, 0.8636999726295471), - (13, 0.8740000128746033), - (14, 0.866100013256073), - (15, 0.867900013923645), - (16, 0.83160001039505), - (17, 0.8741999864578247), - (18, 0.8736000061035156), - (19, 0.8810999989509583), - (20, 0.8762000203132629), - ], - "fedfs-t16": [ - (0, 0.03759999945759773), - (1, 0.7476999759674072), - (2, 0.7982000112533569), - (3, 0.8276000022888184), - (4, 0.8256999850273132), - (5, 0.8312000036239624), - (6, 0.8536999821662903), - (7, 0.8483999967575073), - (8, 0.85589998960495), - (9, 0.8687000274658203), - (10, 0.8664000034332275), - (11, 0.8586999773979187), - (12, 0.8662999868392944), - (13, 0.8754000067710876), - (14, 0.878600001335144), - (15, 0.8763999938964844), - (16, 0.748199999332428), - (17, 0.8806999921798706), - (18, 0.8794000148773193), - (19, 0.8813999891281128), - (20, 0.8708000183105469), - ], -} - -RESULTS_WALL_CLOCK_TIME = { - "fedavg-14": 218.49, - "fedfs-14": 61.16, - "fedavg-16": 153.56, - "fedfs-16": 66.84, -} - - -def accuracy_t10() -> None: - """Generate plots.""" - lines = [ - ("FedAvg, t=10", RESULTS["fedavg-t10"]), - ("FedFS, t=10", RESULTS["fedfs-t10"]), - ] - plot(lines, "fmnist-progress-t10") - - -def accuracy_t12() -> None: - """Generate plots.""" - lines = [ - ("FedAvg, t=12", RESULTS["fedavg-t12"]), - ("FedFS, t=12", RESULTS["fedfs-t12"]), - ] - plot(lines, "fmnist-progress-t12") - - -def accuracy_t14() -> None: - """Generate plots.""" - lines = [ - ("FedAvg, t=14", RESULTS["fedavg-t14"]), - ("FedFS, t=14", RESULTS["fedfs-t14"]), - ] - plot(lines, "fmnist-progress-t14") - - -def accuracy_t16() -> None: - """Generate plots.""" - lines = [ - ("FedAvg, t=16", RESULTS["fedavg-t16"]), - ("FedFS, t=16", RESULTS["fedfs-t16"]), - ] - plot(lines, "fmnist-progress-t16") - - -def accuracy_fedavg_vs_fedfs() -> None: - """Comparision of FedAvg vs FedFS.""" - fedavg = [ - RESULTS["fedavg-t10"][-1][1], - RESULTS["fedavg-t12"][-1][1], - RESULTS["fedavg-t14"][-1][1], - RESULTS["fedavg-t16"][-1][1], - ] - fedfs = [ - RESULTS["fedfs-t10"][-1][1], - RESULTS["fedfs-t12"][-1][1], - RESULTS["fedfs-t14"][-1][1], - RESULTS["fedfs-t16"][-1][1], - ] - bar_chart( - y_values=[ - np.array([x * 100 for x in fedavg]), - np.array([x * 100 for x in fedfs]), - ], - bar_labels=["FedAvg", "FedFS"], - x_label="Timeout", - x_tick_labels=["T=10", "T=12", "T=14", "T=16"], - y_label="Accuracy", - filename="fmnist-accuracy_fedavg_vs_fedfs", - ) - - -def wall_clock_time_fedavg_vs_fedfs() -> None: - """Comparision of FedAvg vs FedFS.""" - - bar_chart( - y_values=[ - np.array( - [ - RESULTS_WALL_CLOCK_TIME["fedavg-14"], - RESULTS_WALL_CLOCK_TIME["fedavg-16"], - ] - ), - np.array( - [ - RESULTS_WALL_CLOCK_TIME["fedfs-t14"], - RESULTS_WALL_CLOCK_TIME["fedfs-16"], - ] - ), - ], - bar_labels=["FedAvg", "FedFS"], - x_label="Timeout", - x_tick_labels=["T=14", "T=16"], - y_label="Completion time", - filename="fmnist-time_fedavg_vs_fedfs", - ) - - -def plot(lines: List[Tuple[str, List[Tuple[int, float]]]], filename: str) -> None: - """Plot a single line chart.""" - values = [np.array([x * 100 for _, x in val]) for _, val in lines] - labels = [label for label, _ in lines] - line_chart( - values, - labels, - "Round", - "Accuracy", - filename=filename, - y_floor=0, - y_ceil=100, - ) - - -def main() -> None: - """Call all plot functions.""" - accuracy_t10() - accuracy_t12() - accuracy_t14() - accuracy_t16() - accuracy_fedavg_vs_fedfs() - wall_clock_time_fedavg_vs_fedfs() - - -if __name__ == "__main__": - main() diff --git a/src/py/flwr_experimental/baseline/tf_fashion_mnist/server.py b/src/py/flwr_experimental/baseline/tf_fashion_mnist/server.py deleted file mode 100644 index fbeb0683df28..000000000000 --- a/src/py/flwr_experimental/baseline/tf_fashion_mnist/server.py +++ /dev/null @@ -1,203 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower server for Fashion-MNIST image classification.""" - - -import argparse -import math -from logging import ERROR, INFO -from typing import Callable, Dict, Optional - -import flwr as fl -from flwr.common.logger import configure, log -from flwr_experimental.baseline.common import get_evaluate_fn -from flwr_experimental.baseline.dataset import tf_fashion_mnist_partitioned -from flwr_experimental.baseline.model import orig_cnn -from flwr_experimental.baseline.tf_fashion_mnist.settings import SETTINGS, get_setting - -from . import DEFAULT_SERVER_ADDRESS, SEED - - -def parse_args() -> argparse.Namespace: - """Parse and return commandline arguments.""" - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--log_host", - type=str, - help="HTTP log handler host (no default)", - ) - parser.add_argument( - "--setting", - type=str, - choices=SETTINGS.keys(), - help="Setting to run.", - ) - - return parser.parse_args() - - -def main() -> None: - """Start server and train a number of rounds.""" - args = parse_args() - - # Configure logger - configure(identifier="server", host=args.log_host) - - server_setting = get_setting(args.setting).server - log(INFO, "server_setting: %s", server_setting) - - # Load evaluation data - (_, _), (x_test, y_test) = tf_fashion_mnist_partitioned.load_data( - iid_fraction=0.0, num_partitions=1 - ) - if server_setting.dry_run: - x_test = x_test[0:50] - y_test = y_test[0:50] - - # Load model (for centralized evaluation) - model = orig_cnn(input_shape=(28, 28, 1), seed=SEED) - - # Strategy - evaluate_fn = get_evaluate_fn(model=model, num_classes=10, xy_test=(x_test, y_test)) - on_fit_config_fn = get_on_fit_config_fn( - lr_initial=server_setting.lr_initial, - timeout=server_setting.training_round_timeout, - partial_updates=server_setting.partial_updates, - ) - - if server_setting.strategy == "fedavg": - strategy = fl.server.strategy.FedAvg( - fraction_fit=server_setting.sample_fraction, - min_fit_clients=server_setting.min_sample_size, - min_available_clients=server_setting.min_num_clients, - evaluate_fn=evaluate_fn, - on_fit_config_fn=on_fit_config_fn, - ) - - if server_setting.strategy == "fast-and-slow": - if server_setting.training_round_timeout is None: - raise ValueError( - "No `training_round_timeout` set for `fast-and-slow` strategy" - ) - t_fast = ( - math.ceil(0.5 * server_setting.training_round_timeout) - if server_setting.training_round_timeout_short is None - else server_setting.training_round_timeout_short - ) - strategy = fl.server.strategy.FastAndSlow( - fraction_fit=server_setting.sample_fraction, - min_fit_clients=server_setting.min_sample_size, - min_available_clients=server_setting.min_num_clients, - evaluate_fn=evaluate_fn, - on_fit_config_fn=on_fit_config_fn, - importance_sampling=server_setting.importance_sampling, - dynamic_timeout=server_setting.dynamic_timeout, - dynamic_timeout_percentile=0.8, - alternating_timeout=server_setting.alternating_timeout, - r_fast=1, - r_slow=1, - t_fast=t_fast, - t_slow=server_setting.training_round_timeout, - ) - - if server_setting.strategy == "fedfs-v0": - if server_setting.training_round_timeout is None: - raise ValueError("No `training_round_timeout` set for `fedfs-v0` strategy") - t_fast = ( - math.ceil(0.5 * server_setting.training_round_timeout) - if server_setting.training_round_timeout_short is None - else server_setting.training_round_timeout_short - ) - strategy = fl.server.strategy.FedFSv0( - fraction_fit=server_setting.sample_fraction, - min_fit_clients=server_setting.min_sample_size, - min_available_clients=server_setting.min_num_clients, - evaluate_fn=evaluate_fn, - on_fit_config_fn=on_fit_config_fn, - r_fast=1, - r_slow=1, - t_fast=t_fast, - t_slow=server_setting.training_round_timeout, - ) - - if server_setting.strategy == "fedfs-v1": - if server_setting.training_round_timeout is None: - raise ValueError("No `training_round_timeout` set for `fedfs-v1` strategy") - strategy = fl.server.strategy.FedFSv1( - fraction_fit=server_setting.sample_fraction, - min_fit_clients=server_setting.min_sample_size, - min_available_clients=server_setting.min_num_clients, - evaluate_fn=evaluate_fn, - on_fit_config_fn=on_fit_config_fn, - dynamic_timeout_percentile=0.8, - r_fast=1, - r_slow=1, - t_max=server_setting.training_round_timeout, - use_past_contributions=True, - ) - - if server_setting.strategy == "qffedavg": - strategy = fl.server.strategy.QFedAvg( - q_param=0.2, - qffl_learning_rate=0.1, - fraction_fit=server_setting.sample_fraction, - min_fit_clients=server_setting.min_sample_size, - min_available_clients=server_setting.min_num_clients, - evaluate_fn=evaluate_fn, - on_fit_config_fn=on_fit_config_fn, - ) - - # Run server - log(INFO, "Instantiating server, strategy: %s", str(strategy)) - fl.server.start_server( - DEFAULT_SERVER_ADDRESS, - config={"num_rounds": server_setting.rounds}, - strategy=strategy, - ) - - -def get_on_fit_config_fn( - lr_initial: float, timeout: Optional[int], partial_updates: bool -) -> Callable[[int], Dict[str, fl.common.Scalar]]: - """Return a function which returns training configurations.""" - - def fit_config(server_round: int) -> Dict[str, fl.common.Scalar]: - """Return a configuration with static batch size and (local) epochs.""" - config: Dict[str, fl.common.Scalar] = { - "epoch_global": str(server_round), - "epochs": str(5), - "batch_size": str(10), - "lr_initial": str(lr_initial), - "lr_decay": str(0.99), - "partial_updates": "1" if partial_updates else "0", - } - if timeout is not None: - config["timeout"] = str(timeout) - - return config - - return fit_config - - -if __name__ == "__main__": - # pylint: disable=broad-except - try: - main() - except Exception as err: - log(ERROR, "Fatal error in main") - log(ERROR, err, exc_info=True, stack_info=True) - - # Raise the error again so the exit code is correct - raise err diff --git a/src/py/flwr_experimental/baseline/tf_fashion_mnist/settings.py b/src/py/flwr_experimental/baseline/tf_fashion_mnist/settings.py deleted file mode 100644 index 72adc1f0be04..000000000000 --- a/src/py/flwr_experimental/baseline/tf_fashion_mnist/settings.py +++ /dev/null @@ -1,998 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Provides a variaty of baseline settings for Fashion-MNIST.""" - - -from typing import List - -from flwr_experimental.baseline.config import ( - configure_client_instances, - sample_delay_factors, - sample_real_delay_factors, -) -from flwr_experimental.baseline.setting import Baseline, ClientSetting, ServerSetting -from flwr_experimental.ops.instance import Instance - -N20_ROUNDS = 50 -ROUNDS = 20 -MIN_NUM_CLIENTS = 90 -SAMPLE_FRACTION = 0.1 -MIN_SAMPLE_SIZE = 10 - -LR_INITIAL = 0.01 - -IID_FRACTION = 0.1 -MAX_DELAY_FACTOR = 4.0 # Equals a 5x slowdown - - -FN_ROUNDS = 40 -FN_MIN_NUM_CLIENTS = 90 -FN_LR_INITIAL = 0.001 -FN_IID_FRACTION = 0.1 -FN_MAX_DELAY_FACTOR = 4.0 - -FN_SAMPLE_FRACTION_50 = 0.5 -FN_MIN_SAMPLE_SIZE_50 = 50 - -FN_SAMPLE_FRACTION_10 = 0.1 -FN_MIN_SAMPLE_SIZE_10 = 10 - - -def get_setting(name: str) -> Baseline: - """Return appropriate setting.""" - if name not in SETTINGS: - raise Exception( - f"Baseline {name} does not exist. Valid settings are: {list(SETTINGS.keys())}" - ) - return SETTINGS[name] - - -def get_instance_name( - instance_names: List[str], num_clients: int, client_index: int -) -> str: - """Return instance_name.""" - idx = client_index // (num_clients // len(instance_names)) - idx = min([idx, len(instance_names) - 1]) - return instance_names[min(idx, len(instance_names))] - - -def configure_uniform_clients( - iid_fraction: float, - instance_names: List[str], - num_clients: int, - dry_run: bool, -) -> List[ClientSetting]: - """Configure `num_clients`, all using the same delay factor.""" - clients = [] - for i in range(num_clients): - client = ClientSetting( - # Set instance on which to run - instance_name=get_instance_name(instance_names, num_clients, i), - # Individual - cid=str(i), - partition=i, - delay_factor=0.0, - # Shared - iid_fraction=iid_fraction, - num_clients=num_clients, - dry_run=dry_run, - ) - clients.append(client) - - return clients - - -# pylint: disable=too-many-arguments -def configure_clients( - iid_fraction: float, - instance_names: List[str], - num_clients: int, - dry_run: bool, - delay_factor_fast: float, - delay_factor_slow: float, - sample_delays: bool = True, - real_delays: bool = False, -) -> List[ClientSetting]: - """Configure `num_clients` with different delay factors.""" - if sample_delays: - # Configure clients with sampled delay factors - if real_delays: - delay_factors = sample_real_delay_factors( - num_clients=num_clients, seed=2020 - ) - else: - delay_factors = sample_delay_factors( - num_clients=num_clients, max_delay=delay_factor_slow, seed=2020 - ) - return [ - ClientSetting( - # Set instance on which to run - instance_name=get_instance_name(instance_names, num_clients, i), - # Individual - cid=str(i), - partition=i, - delay_factor=delay_factors[i], - # Shared - iid_fraction=iid_fraction, - num_clients=num_clients, - dry_run=dry_run, - ) - for i in range(num_clients) - ] - # Configure clients with fixed delay factors - clients = [] - for i in range(num_clients): - client = ClientSetting( - # Set instance on which to run - instance_name=get_instance_name(instance_names, num_clients, i), - # Individual - cid=str(i), - partition=i, - # Indices 0 to 49 fast, 50 to 99 slow - delay_factor=( - delay_factor_fast if i < int(num_clients / 2) else delay_factor_slow - ), - # Shared - iid_fraction=iid_fraction, - num_clients=num_clients, - dry_run=dry_run, - ) - clients.append(client) - - return clients - - -client_instances_100, client_names_100 = configure_client_instances( - num_clients=100, num_cpu=2, num_ram=4 -) - -client_instances_10, client_names_10 = configure_client_instances( - num_clients=10, num_cpu=2, num_ram=4 -) - -SETTINGS = { - ### - ### FedFS vs FedAvg - ### - "fn-c50-r40-fedavg-16": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=FN_ROUNDS, - min_num_clients=FN_MIN_NUM_CLIENTS, - sample_fraction=FN_SAMPLE_FRACTION_50, - min_sample_size=FN_MIN_SAMPLE_SIZE_50, - training_round_timeout=16, - lr_initial=FN_LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - ), - clients=configure_clients( - iid_fraction=FN_IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=FN_MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fn-c50-r40-fedfs-v0-16-08": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedfs-v0", - rounds=FN_ROUNDS, - min_num_clients=FN_MIN_NUM_CLIENTS, - sample_fraction=FN_SAMPLE_FRACTION_50, - min_sample_size=FN_MIN_SAMPLE_SIZE_50, - training_round_timeout=16, - lr_initial=FN_LR_INITIAL, - partial_updates=True, - importance_sampling=False, - dynamic_timeout=False, - training_round_timeout_short=8, - ), - clients=configure_clients( - iid_fraction=FN_IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=FN_MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fn-c50-r40-fedfs-v0-16-16": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedfs-v0", - rounds=FN_ROUNDS, - min_num_clients=FN_MIN_NUM_CLIENTS, - sample_fraction=FN_SAMPLE_FRACTION_50, - min_sample_size=FN_MIN_SAMPLE_SIZE_50, - training_round_timeout=16, - lr_initial=FN_LR_INITIAL, - partial_updates=True, - importance_sampling=False, - dynamic_timeout=False, - training_round_timeout_short=16, - ), - clients=configure_clients( - iid_fraction=FN_IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=FN_MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fn-c50-r40-fedfs-v1-16": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedfs-v1", - rounds=FN_ROUNDS, - min_num_clients=FN_MIN_NUM_CLIENTS, - sample_fraction=FN_SAMPLE_FRACTION_50, - min_sample_size=FN_MIN_SAMPLE_SIZE_50, - training_round_timeout=16, - lr_initial=FN_LR_INITIAL, - partial_updates=True, - importance_sampling=False, - dynamic_timeout=False, - ), - clients=configure_clients( - iid_fraction=FN_IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=FN_MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fn-c50-r40-qffedavg-16": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="qffedavg", - rounds=FN_ROUNDS, - min_num_clients=FN_MIN_NUM_CLIENTS, - sample_fraction=FN_SAMPLE_FRACTION_50, - min_sample_size=FN_MIN_SAMPLE_SIZE_50, - training_round_timeout=16, - lr_initial=FN_LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - ), - clients=configure_clients( - iid_fraction=FN_IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=FN_MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fn-c10-r40-fedavg-16": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=FN_ROUNDS, - min_num_clients=FN_MIN_NUM_CLIENTS, - sample_fraction=FN_SAMPLE_FRACTION_10, - min_sample_size=FN_MIN_SAMPLE_SIZE_10, - training_round_timeout=16, - lr_initial=FN_LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - ), - clients=configure_clients( - iid_fraction=FN_IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=FN_MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fn-c10-r40-fedfs-v0-16-08": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedfs-v0", - rounds=FN_ROUNDS, - min_num_clients=FN_MIN_NUM_CLIENTS, - sample_fraction=FN_SAMPLE_FRACTION_10, - min_sample_size=FN_MIN_SAMPLE_SIZE_10, - training_round_timeout=16, - lr_initial=FN_LR_INITIAL, - partial_updates=True, - importance_sampling=False, - dynamic_timeout=False, - training_round_timeout_short=8, - ), - clients=configure_clients( - iid_fraction=FN_IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=FN_MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fn-c10-r40-fedfs-v0-16-16": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedfs-v0", - rounds=FN_ROUNDS, - min_num_clients=FN_MIN_NUM_CLIENTS, - sample_fraction=FN_SAMPLE_FRACTION_10, - min_sample_size=FN_MIN_SAMPLE_SIZE_10, - training_round_timeout=16, - lr_initial=FN_LR_INITIAL, - partial_updates=True, - importance_sampling=False, - dynamic_timeout=False, - training_round_timeout_short=16, - ), - clients=configure_clients( - iid_fraction=FN_IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=FN_MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fn-c10-r40-fedfs-v1-16": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedfs-v1", - rounds=FN_ROUNDS, - min_num_clients=FN_MIN_NUM_CLIENTS, - sample_fraction=FN_SAMPLE_FRACTION_10, - min_sample_size=FN_MIN_SAMPLE_SIZE_10, - training_round_timeout=16, - lr_initial=FN_LR_INITIAL, - partial_updates=True, - importance_sampling=False, - dynamic_timeout=False, - ), - clients=configure_clients( - iid_fraction=FN_IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=FN_MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fn-c10-r40-qffedavg-16": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="qffedavg", - rounds=FN_ROUNDS, - min_num_clients=FN_MIN_NUM_CLIENTS, - sample_fraction=FN_SAMPLE_FRACTION_10, - min_sample_size=FN_MIN_SAMPLE_SIZE_10, - training_round_timeout=16, - lr_initial=FN_LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - ), - clients=configure_clients( - iid_fraction=FN_IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=FN_MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - ### - ### FedFS - ### - "n20-fedfs-v0-16-08": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedfs-v0", - rounds=N20_ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=16, - lr_initial=LR_INITIAL, - partial_updates=True, - importance_sampling=False, - dynamic_timeout=False, - training_round_timeout_short=8, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "n20-fedfs-v0-16-16": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedfs-v0", - rounds=N20_ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=16, - lr_initial=LR_INITIAL, - partial_updates=True, - importance_sampling=False, - dynamic_timeout=False, - training_round_timeout_short=16, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "n20-fedfs-v1-16": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedfs-v1", - rounds=N20_ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=16, - lr_initial=LR_INITIAL, - partial_updates=True, - importance_sampling=False, - dynamic_timeout=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "n20-fedavg-16": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=N20_ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=16, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - ### - ### FastAndSlow - ### - "n2020-fedfs-10": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fast-and-slow", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=10, - lr_initial=LR_INITIAL, - partial_updates=True, - importance_sampling=True, - dynamic_timeout=True, - alternating_timeout=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "n2020-fedfs-12": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fast-and-slow", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=12, - lr_initial=LR_INITIAL, - partial_updates=True, - importance_sampling=True, - dynamic_timeout=True, - alternating_timeout=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "n2020-fedfs-14": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fast-and-slow", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=14, - lr_initial=LR_INITIAL, - partial_updates=True, - importance_sampling=True, - dynamic_timeout=True, - alternating_timeout=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "n2020-fedfs-16": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fast-and-slow", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=16, - lr_initial=LR_INITIAL, - partial_updates=True, - importance_sampling=True, - dynamic_timeout=True, - alternating_timeout=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "n2020-fedavg-10": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=10, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - alternating_timeout=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "n2020-fedavg-12": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=12, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - alternating_timeout=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "n2020-fedavg-14": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=14, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - alternating_timeout=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "n2020-fedavg-16": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=16, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - alternating_timeout=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - ######################################## - ### PREVIOUS ### - ######################################## - "dry-run": Baseline( - instances=[ - Instance(name="server", group="server", num_cpu=2, num_ram=8), - Instance(name="client", group="clients", num_cpu=2, num_ram=4), - ], - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=1, - min_num_clients=1, - sample_fraction=1.0, - min_sample_size=1, - training_round_timeout=600, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - dry_run=True, - ), - clients=configure_uniform_clients( - iid_fraction=IID_FRACTION, - instance_names=["client"], - num_clients=4, - dry_run=True, - ), - ), - "minimal": Baseline( - instances=[Instance(name="server", group="server", num_cpu=2, num_ram=8)] - + client_instances_10, - server=ServerSetting( - instance_name="server", - strategy="fast-and-slow", - rounds=3, - min_num_clients=8, - sample_fraction=0.5, - min_sample_size=5, - training_round_timeout=3600, - lr_initial=LR_INITIAL, - partial_updates=True, - importance_sampling=True, - dynamic_timeout=True, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_10, - num_clients=10, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - ), - ), - "fedavg-sync": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=None, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - ), - ), - "fedavg-async": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=20, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - ), - ), - "fast-and-slow-only-partial-updates": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fast-and-slow", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=20, - lr_initial=LR_INITIAL, - partial_updates=True, - importance_sampling=False, - dynamic_timeout=False, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - ), - ), - "fast-and-slow-only-dynamic-timeouts": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fast-and-slow", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=20, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=True, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - ), - ), - "fast-and-slow-only-importance-sampling": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fast-and-slow", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=20, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=True, - dynamic_timeout=False, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - ), - ), - "fast-and-slow": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fast-and-slow", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=20, - lr_initial=LR_INITIAL, - partial_updates=True, - importance_sampling=True, - dynamic_timeout=True, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - ), - ), - "qffedavg": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="qffedavg", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=None, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - ), - ), -} diff --git a/src/py/flwr_experimental/baseline/tf_hotkey/client.py b/src/py/flwr_experimental/baseline/tf_hotkey/client.py deleted file mode 100644 index f59ad5f676ba..000000000000 --- a/src/py/flwr_experimental/baseline/tf_hotkey/client.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower client using TensorFlow for Spoken Keyword classification.""" - - -import argparse -from logging import ERROR - -import tensorflow as tf - -import flwr as fl -from flwr.common.logger import configure, log -from flwr_experimental.baseline.common import VisionClassificationClient -from flwr_experimental.baseline.dataset import tf_hotkey_partitioned -from flwr_experimental.baseline.model import keyword_cnn -from flwr_experimental.baseline.setting import ClientSetting -from flwr_experimental.baseline.tf_hotkey.settings import SETTINGS, get_setting - -from . import DEFAULT_SERVER_ADDRESS, SEED - -tf.get_logger().setLevel("ERROR") - - -class ClientSettingNotFound(Exception): - """Raise when client setting could not be found.""" - - -def parse_args() -> argparse.Namespace: - """Parse and return commandline arguments.""" - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--server_address", - type=str, - default=DEFAULT_SERVER_ADDRESS, - help=f"Server address (IPv6, default: {DEFAULT_SERVER_ADDRESS})", - ) - parser.add_argument( - "--log_host", - type=str, - help="HTTP log handler host (no default)", - ) - parser.add_argument( - "--setting", - type=str, - choices=SETTINGS.keys(), - help="Setting to run.", - ) - parser.add_argument("--cid", type=str, required=True, help="Client cid.") - return parser.parse_args() - - -def get_client_setting(setting: str, cid: str) -> ClientSetting: - """Return client setting based on setting name and cid.""" - for client_setting in get_setting(setting).clients: - if client_setting.cid == cid: - return client_setting - - raise ClientSettingNotFound() - - -def main() -> None: - """Load data, create and start client.""" - args = parse_args() - - client_setting = get_client_setting(args.setting, args.cid) - - # Configure logger - configure(identifier=f"client:{client_setting.cid}", host=args.log_host) - - # Load model - model = keyword_cnn(input_shape=(80, 40, 1), seed=SEED) - - # Load local data partition - ( - (xy_train_partitions, xy_test_partitions), - _, - ) = tf_hotkey_partitioned.load_data( - iid_fraction=client_setting.iid_fraction, - num_partitions=client_setting.num_clients, - ) - (x_train, y_train) = xy_train_partitions[client_setting.partition] - (x_test, y_test) = xy_test_partitions[client_setting.partition] - if client_setting.dry_run: - x_train = x_train[0:100] - y_train = y_train[0:100] - x_test = x_test[0:50] - y_test = y_test[0:50] - - # Start client - client = VisionClassificationClient( - client_setting.cid, - model, - (x_train, y_train), - (x_test, y_test), - client_setting.delay_factor, - 10, - normalization_factor=100.0, - ) - fl.client.start_client(args.server_address, client) - - -if __name__ == "__main__": - # pylint: disable=broad-except - try: - main() - except Exception as err: - log(ERROR, "Fatal error in main") - log(ERROR, err, exc_info=True, stack_info=True) - - # Raise the error again so the exit code is correct - raise err diff --git a/src/py/flwr_experimental/baseline/tf_hotkey/download.py b/src/py/flwr_experimental/baseline/tf_hotkey/download.py deleted file mode 100644 index d9f3c53bdb39..000000000000 --- a/src/py/flwr_experimental/baseline/tf_hotkey/download.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Helper script to download Spoken Keyword dataset.""" - - -from logging import INFO - -from flwr.common.logger import log -from flwr_experimental.baseline.dataset import tf_hotkey_partitioned - - -def main() -> None: - """Download data.""" - log(INFO, "Download Keyword Detection") - tf_hotkey_partitioned.hotkey_load() - - -if __name__ == "__main__": - main() diff --git a/src/py/flwr_experimental/baseline/tf_hotkey/server.py b/src/py/flwr_experimental/baseline/tf_hotkey/server.py deleted file mode 100644 index beba81f58396..000000000000 --- a/src/py/flwr_experimental/baseline/tf_hotkey/server.py +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower server for Spoken Keyword classification.""" - - -import argparse -import math -from logging import ERROR, INFO -from typing import Callable, Dict, Optional - -import flwr as fl -from flwr.common.logger import configure, log -from flwr_experimental.baseline.common import get_evaluate_fn -from flwr_experimental.baseline.dataset import tf_hotkey_partitioned -from flwr_experimental.baseline.model import keyword_cnn -from flwr_experimental.baseline.tf_hotkey.settings import SETTINGS, get_setting - -from . import DEFAULT_SERVER_ADDRESS, SEED - - -def parse_args() -> argparse.Namespace: - """Parse and return commandline arguments.""" - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--log_host", - type=str, - help="HTTP log handler host (no default)", - ) - parser.add_argument( - "--setting", - type=str, - choices=SETTINGS.keys(), - help="Setting to run.", - ) - - return parser.parse_args() - - -def main() -> None: - """Start server and train a number of rounds.""" - args = parse_args() - - # Configure logger - configure(identifier="server", host=args.log_host) - - server_setting = get_setting(args.setting).server - log(INFO, "server_setting: %s", server_setting) - - # Load evaluation data - (_, _), (x_test, y_test) = tf_hotkey_partitioned.load_data( - iid_fraction=0.0, num_partitions=1 - ) - if server_setting.dry_run: - x_test = x_test[0:50] - y_test = y_test[0:50] - - # Load model (for centralized evaluation) - model = keyword_cnn(input_shape=(80, 40, 1), seed=SEED) - - # Strategy - evaluate_fn = get_evaluate_fn(model=model, num_classes=10, xy_test=(x_test, y_test)) - on_fit_config_fn = get_on_fit_config_fn( - lr_initial=server_setting.lr_initial, - timeout=server_setting.training_round_timeout, - partial_updates=server_setting.partial_updates, - ) - - if server_setting.strategy == "fedavg": - strategy = fl.server.strategy.FedAvg( - fraction_fit=server_setting.sample_fraction, - min_fit_clients=server_setting.min_sample_size, - min_available_clients=server_setting.min_num_clients, - evaluate_fn=evaluate_fn, - on_fit_config_fn=on_fit_config_fn, - ) - - if server_setting.strategy == "fast-and-slow": - if server_setting.training_round_timeout is None: - raise ValueError( - "No `training_round_timeout` set for `fast-and-slow` strategy" - ) - strategy = fl.server.strategy.FastAndSlow( - fraction_fit=server_setting.sample_fraction, - min_fit_clients=server_setting.min_sample_size, - min_available_clients=server_setting.min_num_clients, - evaluate_fn=evaluate_fn, - on_fit_config_fn=on_fit_config_fn, - importance_sampling=server_setting.importance_sampling, - dynamic_timeout=server_setting.dynamic_timeout, - dynamic_timeout_percentile=0.9, - alternating_timeout=server_setting.alternating_timeout, - r_fast=1, - r_slow=1, - t_fast=math.ceil(0.5 * server_setting.training_round_timeout), - t_slow=server_setting.training_round_timeout, - ) - - if server_setting.strategy == "fedfs-v0": - if server_setting.training_round_timeout is None: - raise ValueError("No `training_round_timeout` set for `fedfs-v0` strategy") - t_fast = ( - math.ceil(0.5 * server_setting.training_round_timeout) - if server_setting.training_round_timeout_short is None - else server_setting.training_round_timeout_short - ) - strategy = fl.server.strategy.FedFSv0( - fraction_fit=server_setting.sample_fraction, - min_fit_clients=server_setting.min_sample_size, - min_available_clients=server_setting.min_num_clients, - evaluate_fn=evaluate_fn, - on_fit_config_fn=on_fit_config_fn, - r_fast=1, - r_slow=1, - t_fast=t_fast, - t_slow=server_setting.training_round_timeout, - ) - - if server_setting.strategy == "qffedavg": - strategy = fl.server.strategy.QFedAvg( - q_param=0.2, - qffl_learning_rate=0.1, - fraction_fit=server_setting.sample_fraction, - min_fit_clients=server_setting.min_sample_size, - min_available_clients=server_setting.min_num_clients, - evaluate_fn=evaluate_fn, - on_fit_config_fn=on_fit_config_fn, - ) - - # Run server - fl.server.start_server( - DEFAULT_SERVER_ADDRESS, - config={"num_rounds": server_setting.rounds}, - strategy=strategy, - ) - - -def get_on_fit_config_fn( - lr_initial: float, timeout: Optional[int], partial_updates: bool -) -> Callable[[int], Dict[str, fl.common.Scalar]]: - """Return a function which returns training configurations.""" - - def fit_config(server_round: int) -> Dict[str, fl.common.Scalar]: - """Return a configuration with static batch size and (local) epochs.""" - config: Dict[str, fl.common.Scalar] = { - "epoch_global": str(server_round), - "epochs": str(5), - "batch_size": str(32), - "lr_initial": str(lr_initial), - "lr_decay": str(0.99), - "partial_updates": "1" if partial_updates else "0", - } - if timeout is not None: - config["timeout"] = str(timeout) - - return config - - return fit_config - - -if __name__ == "__main__": - # pylint: disable=broad-except - try: - main() - except Exception as err: - log(ERROR, "Fatal error in main") - log(ERROR, err, exc_info=True, stack_info=True) - - # Raise the error again so the exit code is correct - raise err diff --git a/src/py/flwr_experimental/baseline/tf_hotkey/settings.py b/src/py/flwr_experimental/baseline/tf_hotkey/settings.py deleted file mode 100644 index 5bfb7b1e42ad..000000000000 --- a/src/py/flwr_experimental/baseline/tf_hotkey/settings.py +++ /dev/null @@ -1,577 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Provides a variaty of baseline settings for Spoken Keyword -classification.""" - - -from typing import List - -from flwr_experimental.baseline.config import ( - configure_client_instances, - sample_delay_factors, - sample_real_delay_factors, -) -from flwr_experimental.baseline.setting import Baseline, ClientSetting, ServerSetting -from flwr_experimental.ops.instance import Instance - -ROUNDS = 50 -MIN_NUM_CLIENTS = 45 -SAMPLE_FRACTION = 0.2 -MIN_SAMPLE_SIZE = 10 - -LR_INITIAL = 0.01 - -IID_FRACTION = 0.1 -MAX_DELAY_FACTOR = 4.0 # Equals a 5x slowdown - - -FN_NUM_CLIENTS = 50 -FN_ROUNDS = 50 -FN_MIN_NUM_CLIENTS = 45 -FN_LR_INITIAL = 0.001 -FN_IID_FRACTION = 0.1 -FN_MAX_DELAY_FACTOR = 4.0 - -FN_SAMPLE_FRACTION_25 = 0.5 -FN_MIN_SAMPLE_SIZE_25 = 25 - -FN_SAMPLE_FRACTION_10 = 0.2 -FN_MIN_SAMPLE_SIZE_10 = 10 - -FN_TRAINING_ROUND_TIMEOUT = 230 - - -def get_setting(name: str) -> Baseline: - """Return appropriate setting.""" - if name not in SETTINGS: - raise Exception( - f"Baseline {name} does not exist. Valid settings are: {list(SETTINGS.keys())}" - ) - return SETTINGS[name] - - -def get_instance_name( - instance_names: List[str], num_clients: int, client_index: int -) -> str: - """Return instance_name.""" - idx = client_index // (num_clients // len(instance_names)) - idx = min([idx, len(instance_names) - 1]) - return instance_names[min(idx, len(instance_names))] - - -def configure_uniform_clients( - iid_fraction: float, - instance_names: List[str], - num_clients: int, - dry_run: bool, -) -> List[ClientSetting]: - """Configure `num_clients`, all using the same delay factor.""" - clients = [] - for i in range(num_clients): - client = ClientSetting( - # Set instance on which to run - instance_name=get_instance_name(instance_names, num_clients, i), - # Individual - cid=str(i), - partition=i, - delay_factor=0.0, - # Shared - iid_fraction=iid_fraction, - num_clients=num_clients, - dry_run=dry_run, - ) - clients.append(client) - - return clients - - -# pylint: disable=too-many-arguments -def configure_clients( - iid_fraction: float, - instance_names: List[str], - num_clients: int, - dry_run: bool, - delay_factor_fast: float, - delay_factor_slow: float, - sample_delays: bool = True, - real_delays: bool = False, -) -> List[ClientSetting]: - """Configure `num_clients` with different delay factors.""" - if sample_delays: - # Configure clients with sampled delay factors - if real_delays: - delay_factors = sample_real_delay_factors( - num_clients=num_clients, seed=2020 - ) - else: - delay_factors = sample_delay_factors( - num_clients=num_clients, max_delay=delay_factor_slow, seed=2020 - ) - return [ - ClientSetting( - # Set instance on which to run - instance_name=get_instance_name(instance_names, num_clients, i), - # Individual - cid=str(i), - partition=i, - delay_factor=delay_factors[i], - # Shared - iid_fraction=iid_fraction, - num_clients=num_clients, - dry_run=dry_run, - ) - for i in range(num_clients) - ] - # Configure clients with fixed delay factors - clients = [] - for i in range(num_clients): - client = ClientSetting( - # Set instance on which to run - instance_name=get_instance_name(instance_names, num_clients, i), - # Individual - cid=str(i), - partition=i, - # Indices 0 to 49 fast, 50 to 99 slow - delay_factor=( - delay_factor_fast if i < int(num_clients / 2) else delay_factor_slow - ), - # Shared - iid_fraction=iid_fraction, - num_clients=num_clients, - dry_run=dry_run, - ) - clients.append(client) - - return clients - - -client_instances_50, client_names_50 = configure_client_instances( - num_clients=50, num_cpu=2, num_ram=8 -) - -client_instances_10, client_names_10 = configure_client_instances( - num_clients=10, num_cpu=2, num_ram=8 -) - -SETTINGS = { - ### - ### FedFS vs FedAvg - ### - "fn-c25-r50-fedavg-230": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_50, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=FN_ROUNDS, - min_num_clients=FN_MIN_NUM_CLIENTS, - sample_fraction=FN_SAMPLE_FRACTION_25, - min_sample_size=FN_MIN_SAMPLE_SIZE_25, - training_round_timeout=FN_TRAINING_ROUND_TIMEOUT, - lr_initial=FN_LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - ), - clients=configure_clients( - iid_fraction=FN_IID_FRACTION, - instance_names=client_names_50, - num_clients=FN_NUM_CLIENTS, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=FN_MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fn-c25-r50-fedfs-v0-230-230": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_50, - server=ServerSetting( - instance_name="server", - strategy="fedfs-v0", - rounds=FN_ROUNDS, - min_num_clients=FN_MIN_NUM_CLIENTS, - sample_fraction=FN_SAMPLE_FRACTION_25, - min_sample_size=FN_MIN_SAMPLE_SIZE_25, - training_round_timeout=FN_TRAINING_ROUND_TIMEOUT, - lr_initial=FN_LR_INITIAL, - partial_updates=True, - importance_sampling=False, - dynamic_timeout=False, - training_round_timeout_short=FN_TRAINING_ROUND_TIMEOUT, - ), - clients=configure_clients( - iid_fraction=FN_IID_FRACTION, - instance_names=client_names_50, - num_clients=FN_NUM_CLIENTS, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=FN_MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fn-c10-r50-fedavg-230": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_50, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=FN_ROUNDS, - min_num_clients=FN_MIN_NUM_CLIENTS, - sample_fraction=FN_SAMPLE_FRACTION_10, - min_sample_size=FN_MIN_SAMPLE_SIZE_10, - training_round_timeout=FN_TRAINING_ROUND_TIMEOUT, - lr_initial=FN_LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - ), - clients=configure_clients( - iid_fraction=FN_IID_FRACTION, - instance_names=client_names_50, - num_clients=FN_NUM_CLIENTS, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=FN_MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fn-c10-r50-fedfs-v0-230-230": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_50, - server=ServerSetting( - instance_name="server", - strategy="fedfs-v0", - rounds=FN_ROUNDS, - min_num_clients=FN_MIN_NUM_CLIENTS, - sample_fraction=FN_SAMPLE_FRACTION_10, - min_sample_size=FN_MIN_SAMPLE_SIZE_10, - training_round_timeout=FN_TRAINING_ROUND_TIMEOUT, - lr_initial=FN_LR_INITIAL, - partial_updates=True, - importance_sampling=False, - dynamic_timeout=False, - training_round_timeout_short=FN_TRAINING_ROUND_TIMEOUT, - ), - clients=configure_clients( - iid_fraction=FN_IID_FRACTION, - instance_names=client_names_50, - num_clients=FN_NUM_CLIENTS, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=FN_MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - ### - ### - ### - "n2020-fedfs": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_50, - server=ServerSetting( - instance_name="server", - strategy="fast-and-slow", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=200, - lr_initial=LR_INITIAL, - partial_updates=True, - importance_sampling=True, - dynamic_timeout=True, - alternating_timeout=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_50, - num_clients=50, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "n2020-fedavg": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_50, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=200, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - alternating_timeout=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_50, - num_clients=50, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "dry-run": Baseline( - instances=[ - Instance(name="server", group="server", num_cpu=4, num_ram=16), - Instance(name="client", group="clients", num_cpu=4, num_ram=16), - ], - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=1, - min_num_clients=1, - sample_fraction=1.0, - min_sample_size=1, - training_round_timeout=600, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - dry_run=True, - ), - clients=configure_uniform_clients( - iid_fraction=IID_FRACTION, - instance_names=["client"], - num_clients=4, - dry_run=True, - ), - ), - "minimal": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_10, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=2, - min_num_clients=4, - sample_fraction=1.0, - min_sample_size=3, - training_round_timeout=3600, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_10, - num_clients=10, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fedavg-sync": Baseline( - instances=[Instance(name="server", group="server", num_cpu=8, num_ram=32)] - + client_instances_50, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=None, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_50, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fedavg-async": Baseline( - instances=[Instance(name="server", group="server", num_cpu=8, num_ram=32)] - + client_instances_50, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=20, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_50, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fast-and-slow-only-partial-updates": Baseline( - instances=[Instance(name="server", group="server", num_cpu=8, num_ram=32)] - + client_instances_50, - server=ServerSetting( - instance_name="server", - strategy="fast-and-slow", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=20, - lr_initial=LR_INITIAL, - partial_updates=True, - importance_sampling=False, - dynamic_timeout=False, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_50, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fast-and-slow-only-dynamic-timeouts": Baseline( - instances=[Instance(name="server", group="server", num_cpu=8, num_ram=32)] - + client_instances_50, - server=ServerSetting( - instance_name="server", - strategy="fast-and-slow", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=20, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=True, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_50, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fast-and-slow-only-importance-sampling": Baseline( - instances=[Instance(name="server", group="server", num_cpu=8, num_ram=32)] - + client_instances_50, - server=ServerSetting( - instance_name="server", - strategy="fast-and-slow", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=20, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=True, - dynamic_timeout=False, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_50, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fast-and-slow": Baseline( - instances=[Instance(name="server", group="server", num_cpu=8, num_ram=32)] - + client_instances_50, - server=ServerSetting( - instance_name="server", - strategy="fast-and-slow", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=60, - lr_initial=LR_INITIAL, - partial_updates=True, - importance_sampling=True, - dynamic_timeout=True, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_50, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "qffedavg": Baseline( - instances=[Instance(name="server", group="server", num_cpu=8, num_ram=32)] - + client_instances_50, - server=ServerSetting( - instance_name="server", - strategy="qffedavg", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=None, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_50, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), -} diff --git a/src/py/flwr_experimental/logserver/README.md b/src/py/flwr_experimental/logserver/README.md deleted file mode 100644 index f7784c56a20a..000000000000 --- a/src/py/flwr_experimental/logserver/README.md +++ /dev/null @@ -1,34 +0,0 @@ -# Flower LogServer - -A simple server which receives logs from the python standard library `logging.handlers.HTTPHandler` and prints them to the console. - -## Quickstart - -A minimal example showing how centralized logging works. - -Run these commands in 3 different terminals. -Start the log server. - -```bash -python -m flwr_experimental.logserver -``` - -Start the FL server and client. - -```bash -python -m flwr_experimental.baseline.tf_fashion_mnist.server --log_host=localhost:8081 -``` - -```bash -python -m flwr_experimental.baseline.tf_fashion_mnist.client \ - --cid=0 --partition=0 --clients=1 --server_address=localhost:8080 \ - --log_host=localhost:8081 -``` - -## Persist logs to S3 - -If you would like to upload your logs regularly to S3 you can pass the following command line arguments on start. - -```bash -python -m flwr_experimental.logserver --s3_bucket=MY_BUCKET --s3_key=MY_S3_KEY -``` diff --git a/src/py/flwr_experimental/logserver/__init__.py b/src/py/flwr_experimental/logserver/__init__.py deleted file mode 100644 index 352112d5e933..000000000000 --- a/src/py/flwr_experimental/logserver/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Provides simple server to receive logs from python http logger.""" diff --git a/src/py/flwr_experimental/logserver/__main__.py b/src/py/flwr_experimental/logserver/__main__.py deleted file mode 100644 index a3ac56e405f0..000000000000 --- a/src/py/flwr_experimental/logserver/__main__.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Start log server.""" - - -from flwr_experimental.logserver.server import main - -if __name__ == "__main__": - main() diff --git a/src/py/flwr_experimental/logserver/server.py b/src/py/flwr_experimental/logserver/server.py deleted file mode 100644 index 683b12b6db6c..000000000000 --- a/src/py/flwr_experimental/logserver/server.py +++ /dev/null @@ -1,232 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Provides a logserver.""" - - -import argparse -import ast -import json -import logging -import re -import time -import urllib.parse -from http.server import BaseHTTPRequestHandler, HTTPServer -from pathlib import Path -from threading import Event, Thread -from typing import Dict, List, Optional, Tuple, Union - -import boto3 -import numpy as np - -from flwr_experimental.baseline.plot import plot - -LOGDIR = "flower_logs" -LOGFILE = "{logdir}/flower.log".format(logdir=LOGDIR) -LOGFILE_UPLOAD_INTERVAL = 60 -SERVER_TIMEOUT = 1200 - -CONFIG: Dict[str, Optional[str]] = {"s3_bucket": None, "s3_key": None} - -Accuracies = List[Tuple[int, float]] - - -def write_to_logfile(line: str) -> None: - """Write line to logfile.""" - with open(f"{LOGFILE}", "a+") as lfd: - lfd.write(line + "\n") - - -def is_credentials_available() -> bool: - """Return True is credentials are available in CONFIG.""" - return all([v is not None for v in CONFIG.values()]) - - -def upload_file(local_filepath: str, s3_key: Optional[str]) -> None: - """Upload logfile to S3.""" - if not is_credentials_available(): - logging.info( - "Skipping S3 logfile upload as s3_bucket or s3_key was not provided." - ) - elif not Path(LOGFILE).is_file(): - logging.info("No logfile found.") - elif s3_key is not None: - try: - logging.info("Uploading logfile to S3.") - boto3.resource("s3").meta.client.upload_file( - Filename=local_filepath, - Bucket=CONFIG["s3_bucket"], - Key=s3_key, - ExtraArgs={ - "ContentType": ( - "application/pdf" if s3_key.endswith(".pdf") else "text/plain" - ) - }, - ) - # pylint: disable=broad-except - except Exception as err: - logging.error(err) - - -def continuous_logfile_upload(stop_condition: Event, interval: int) -> None: - """Call upload_logfile function regularly until stop_condition Event is - set.""" - while True: - upload_file(LOGFILE, CONFIG["s3_key"]) - - if stop_condition.is_set(): - break - - time.sleep(interval) - - -def on_record(record: Dict[str, str]) -> None: - """Call on each new line.""" - - # Print record as JSON and write it to a logfile - line = str(json.dumps(record)) - print(line) - write_to_logfile(line) - - # Analyze record and if possible extract a plot_type and data from it - plot_type, data = parse_plot_message(record["message"]) - - if plot_type == "accuracies" and data is not None: - plot_accuracies(data) - - -def parse_plot_message( - message: str, -) -> Tuple[Optional[str], Optional[Union[Accuracies]]]: - """Parse message and return its type and the data if possible. - - If the message does not contain plotable data return None. - """ - accuracies_str = "app_fit: accuracies_centralized " - - if accuracies_str in message: - values_str = re.sub(accuracies_str, "", message) - values: Accuracies = ast.literal_eval(values_str) - return "accuracies", values - - return None, None - - -def plot_accuracies(values: Accuracies) -> str: - """Plot accuracies.""" - filename = f'{CONFIG["s3_key"]}.accuracies' - - line = [val * 100 for _, val in values] - - local_path = plot.line_chart( - lines=[np.array(line)], - labels=["Train"], - x_label="Rounds", - y_label="Accuracy", - filename=filename, - ) - upload_file(local_path, filename + ".pdf") - return local_path - - -class RequestHandler(BaseHTTPRequestHandler): - """Provide custom POST handler.""" - - def _set_response(self) -> None: - self.send_response(200) - self.send_header("Content-type", "text/html") - self.end_headers() - - def do_POST(self) -> None: # pylint: disable=invalid-name - """Handle POST request.""" - content_length = int(self.headers["Content-Length"]) - post_qs = self.rfile.read(content_length).decode("utf-8") - record: Dict[str, str] = { - "client_address": f"{self.client_address[0]}:{self.client_address[1]}" - } - - for key, val in urllib.parse.parse_qs(post_qs).items(): - record[key] = str(val[0]) if len(val) == 1 else str(val) - - self._set_response() - self.wfile.write("POST request for {}".format(self.path).encode("utf-8")) - - thread = Thread(target=on_record, args=(record,)) - thread.start() - - -class LogServer(HTTPServer): - """Log server with timeout.""" - - timeout = SERVER_TIMEOUT - - def handle_timeout(self) -> None: - """Cleanup and upload logfile to S3.""" - self.server_close() - raise TimeoutError() - - -def main() -> None: - """Start log server.""" - # Create a flower_logs directory to store the logfiles. - Path(LOGDIR).mkdir(exist_ok=True) - Path(LOGFILE).touch() - - logging.basicConfig(level=logging.INFO) - - parser = argparse.ArgumentParser(description="Flower LogServer") - parser.add_argument( - "--s3_bucket", - type=str, - help="S3 bucket where the logfile should be uploaded to.", - ) - parser.add_argument( - "--s3_key", - type=str, - help="S3 key under which the logfile should be uploaded.", - ) - args = parser.parse_args() - - CONFIG["s3_bucket"] = args.s3_bucket - CONFIG["s3_key"] = args.s3_key - - server = LogServer(("", 8081), RequestHandler) - logging.info("Starting logging server...\n") - - # Start file upload loop - sync_loop_stop_condition = Event() - sync_loop = Thread( - target=continuous_logfile_upload, - args=(sync_loop_stop_condition, LOGFILE_UPLOAD_INTERVAL), - ) - sync_loop.start() - - try: - while True: - server.handle_request() - except TimeoutError: - print( - f"TimeoutError raised as no request was received for {SERVER_TIMEOUT} seconds." - ) - sync_loop_stop_condition.set() - sync_loop.join() - - # Final upload - upload_file(LOGFILE, CONFIG["s3_key"]) - - logging.info("Stopping logging server...\n") - - -if __name__ == "__main__": - main() diff --git a/src/py/flwr_experimental/logserver/server_test.py b/src/py/flwr_experimental/logserver/server_test.py deleted file mode 100644 index becead625a5d..000000000000 --- a/src/py/flwr_experimental/logserver/server_test.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Test for server.""" - - -import os.path -import tempfile -import unittest - -from flwr_experimental.logserver.server import ( - CONFIG, - parse_plot_message, - plot_accuracies, -) - - -# pylint: disable=no-self-use -class LogserverTest(unittest.TestCase): - """Tests for functions in the server module.""" - - def setUp(self) -> None: - # Create a temporary directory - self.test_dir = tempfile.TemporaryDirectory() - - def tearDown(self) -> None: - # Cleanup the directory after the test - # self.test_dir.cleanup() - pass - - def test_parse_plot_message(self) -> None: - """Test parse_plot_message function.""" - # Prepare - message = "app_fit: accuracies_centralized [(0, 0.019), (1, 0.460)]" - expected_plot_type = "accuracies" - expected_values = [(0, 0.019), (1, 0.460)] - - # Execute - plot_type, values = parse_plot_message(message) - - # Assert - assert plot_type == expected_plot_type - assert values == expected_values - - def test_plot_accuracies(self) -> None: - """Test plot accuracies function.""" - # Prepare - values = [(0, 0.019), (1, 0.460), (2, 0.665), (3, 0.845)] - CONFIG["s3_key"] = os.path.join(self.test_dir.name, "foo.log") - - expected_filepath = os.path.join( - self.test_dir.name, f'{CONFIG["s3_key"]}.accuracies.pdf' - ) - - # Execute - plot_accuracies(values) - - # Assert - assert os.path.isfile(expected_filepath) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/py/flwr_experimental/ops/README.md b/src/py/flwr_experimental/ops/README.md deleted file mode 100644 index 855f86821c5f..000000000000 --- a/src/py/flwr_experimental/ops/README.md +++ /dev/null @@ -1,27 +0,0 @@ -# Flower Ops -## Compute -### EC2 Adapter -For permission management an IAM instance profile named `FlowerInstanceProfile` is expected. -The instances will use that profile for all nessecary permissions. In case of logfile upload -the profile must include the permission to upload the logfile from the machine to the respective -S3 bucket. - -An example policy attached to the profile for the logfiles might look like: -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "FlowerInstanceProfileS3Policy", - "Effect": "Allow", - "Action": [ - "s3:PutObject", - "s3:PutObjectRetention", - "s3:PutObjectVersionAcl", - "s3:PutObjectAcl" - ], - "Resource": "arn:aws:s3:::mylogfilebucket/*" - } - ] -} -``` diff --git a/src/py/flwr_experimental/ops/__init__.py b/src/py/flwr_experimental/ops/__init__.py deleted file mode 100644 index bad31028e68c..000000000000 --- a/src/py/flwr_experimental/ops/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower ops provides an opinionated way to provision necessary compute -infrastructure for running Flower runs.""" diff --git a/src/py/flwr_experimental/ops/cluster.py b/src/py/flwr_experimental/ops/cluster.py deleted file mode 100644 index 53a4e9617427..000000000000 --- a/src/py/flwr_experimental/ops/cluster.py +++ /dev/null @@ -1,309 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Implements compute classes for EC2.""" - - -import concurrent.futures -from contextlib import contextmanager -from itertools import groupby -from logging import DEBUG, ERROR -from typing import Dict, Iterator, List, Optional, Tuple, Type, Union, cast - -from paramiko.client import MissingHostKeyPolicy, SSHClient -from paramiko.sftp_attr import SFTPAttributes - -from flwr.common.logger import log - -from .compute.adapter import Adapter -from .instance import Instance - -ExecInfo = Tuple[List[str], List[str]] - - -class StartFailed(Exception): - """Raised when cluster could not start.""" - - -class InstanceIdNotFound(Exception): - """Raised when there was no instance with given id.""" - - -class InstanceMismatch(Exception): - """Raised when instances passed to create_instances do not have the same - values for RAM or CPU.""" - - -class IgnoreHostKeyPolicy: - """Policy for accepting any unknown host key. - - This is used by `paramiko.client.SSHClient`. - """ - - # pylint: disable=no-self-use, unused-argument - def missing_host_key(self, client: SSHClient, hostname: str, key: str) -> None: - """Simply return to ignore the host key. - - As we create and destroy machines quite regularly and don't - reuse them we will not store the host key in the local system to - avoid pollution the local known_hosts file. - """ - return None - - -SSHCredentials = Tuple[str, str] # username, key_filename - - -@contextmanager -def ssh_connection( - instance: Instance, ssh_credentials: SSHCredentials -) -> Iterator[SSHClient]: - """Connect to server and yield SSH client.""" - username, key_filename = ssh_credentials - - instance_ssh_port: int = cast(int, instance.ssh_port) - ignore_host_key_policy: Union[Type[MissingHostKeyPolicy], MissingHostKeyPolicy] = ( - cast( - Union[Type[MissingHostKeyPolicy], MissingHostKeyPolicy], IgnoreHostKeyPolicy - ) - ) - - client = SSHClient() - client.set_missing_host_key_policy(ignore_host_key_policy) - client.connect( - hostname=str(instance.public_ip), - port=instance_ssh_port, - username=username, - key_filename=key_filename, - ) - - yield client - - client.close() - - -def create_instances(adapter: Adapter, instances: List[Instance], timeout: int) -> None: - """Start instances and set props of each instance. - - Fails if CPU and RAM of instances are not all the same. - """ - if not all( - [ - ins.num_cpu == instances[0].num_cpu and ins.num_ram == instances[0].num_ram - for ins in instances - ] - ): - raise InstanceMismatch( - "Values of num_cpu and num_ram have to be equal for all instances." - ) - - # As checked before that each instance has the same num_cpu and num_ram - # we can just take the values from the first => instances[0] - adapter_instances = adapter.create_instances( - num_cpu=instances[0].num_cpu, - num_ram=instances[0].num_ram, - num_instance=len(instances), - gpu=instances[0].gpu, - timeout=timeout, - ) - - for i, adp_ins in enumerate(adapter_instances): - instance_id, private_ip, public_ip, ssh_port, state = adp_ins - - instances[i].instance_id = instance_id - instances[i].private_ip = private_ip - instances[i].public_ip = public_ip - instances[i].ssh_port = ssh_port - instances[i].state = state - - -def group_instances_by_specs(instances: List[Instance]) -> List[List[Instance]]: - """Group instances by num_cpu and num_ram.""" - groups: List[List[Instance]] = [] - keyfunc = lambda ins: f"{ins.num_cpu}-{ins.num_ram}" - instances = sorted(instances, key=keyfunc) - for _, group in groupby(instances, keyfunc): - groups.append(list(group)) - return groups - - -class Cluster: - """Compute environment independend compute cluster.""" - - def __init__( - self, - adapter: Adapter, - ssh_credentials: SSHCredentials, - instances: List[Instance], - timeout: int, - ): - """Create cluster. - - Args: - timeout (int): Minutes after which the machine will shutdown and terminate. - This is a safety mechanism to avoid run aways cost. The user should still - make sure to monitor the progress in case this mechanism fails. - - Example: - To start two groups of instances where the first one has one instance and the - second one has two instances you might define the following list of instances: - - instances = [ - Instance(name='server', group='server', num_cpu=2, num_ram=1.0), - Instance(name='client_0', group='clients', num_cpu=4, num_ram=16.0), - Instance(name='client_1', group='clients', num_cpu=4, num_ram=16.0), - ] - - Depending on the adapter used not every combination of vCPU and RAM might be available. - """ - instance_names = {ins.name for ins in instances} - assert len(instance_names) == len(instances), "Instance names must be unique." - - self.adapter = adapter - self.ssh_credentials = ssh_credentials - self.instances = instances - self.timeout = timeout - - def get_instance(self, instance_name: str) -> Instance: - """Return instance by instance_name.""" - for ins in self.instances: - if ins.name == instance_name: - return ins - - raise InstanceIdNotFound() - - def get_instance_names(self, groups: Optional[List[str]] = None) -> List[str]: - """Return a list of all instance names.""" - return [ - ins.name for ins in self.instances if groups is None or ins.group in groups - ] - - def start(self) -> None: - """Start the instance.""" - instance_groups = group_instances_by_specs(self.instances) - - with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor: - futures = [ - executor.submit( - create_instances, self.adapter, instance_group, self.timeout - ) - for instance_group in instance_groups - ] - concurrent.futures.wait(futures) - - try: - for future in futures: - future.result() - # pylint: disable=broad-except - except Exception as exc: - log( - ERROR, - "Failed to start the cluster completely. Shutting down...", - ) - log(ERROR, exc) - - for future in futures: - future.cancel() - - self.terminate() - raise StartFailed() from exc - - for ins in self.instances: - log(DEBUG, ins) - - def terminate(self) -> None: - """Terminate all instances and shutdown cluster.""" - self.adapter.terminate_all_instances() - - def upload( - self, instance_name: str, local_path: str, remote_path: str - ) -> SFTPAttributes: - """Upload local file to remote instance.""" - instance = self.get_instance(instance_name) - - with ssh_connection(instance, self.ssh_credentials) as client: - sftp = client.open_sftp() - - if sftp is not None: - sftp_file_attributes = sftp.put(local_path, remote_path) - - return sftp_file_attributes - - def upload_all( - self, local_path: str, remote_path: str - ) -> Dict[str, SFTPAttributes]: - """Upload file to all instances.""" - results: Dict[str, SFTPAttributes] = {} - - with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor: - # Start the load operations and mark each future with its URL - future_to_result = { - executor.submit( - self.upload, instance_name, local_path, remote_path - ): instance_name - for instance_name in self.get_instance_names() - } - - for future in concurrent.futures.as_completed(future_to_result): - instance_name = future_to_result[future] - try: - results[instance_name] = future.result() - # pylint: disable=broad-except - except Exception as exc: - log(ERROR, (instance_name, exc)) - - return results - - def exec(self, instance_name: str, command: str) -> ExecInfo: - """Run command on instance and return stdout.""" - log(DEBUG, "Exec on %s: %s", instance_name, command) - - instance = self.get_instance(instance_name) - - with ssh_connection(instance, self.ssh_credentials) as client: - _, stdout, stderr = client.exec_command(command) - lines_stdout = stdout.readlines() - lines_stderr = stderr.readlines() - - print(lines_stdout, lines_stderr) - - return lines_stdout, lines_stderr - - def exec_all( - self, command: str, groups: Optional[List[str]] = None - ) -> Dict[str, ExecInfo]: - """Run command on all instances. - - If provided filter by group. - """ - instance_names = self.get_instance_names(groups) - - results: Dict[str, ExecInfo] = {} - - with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor: - # Start the load operations and mark each future with its URL - future_to_result = { - executor.submit(self.exec, instance_name, command): instance_name - for instance_name in instance_names - } - - for future in concurrent.futures.as_completed(future_to_result): - instance_name = future_to_result[future] - try: - results[instance_name] = future.result() - # pylint: disable=broad-except - except Exception as exc: - log(ERROR, (instance_name, exc)) - - return results diff --git a/src/py/flwr_experimental/ops/cluster_test.py b/src/py/flwr_experimental/ops/cluster_test.py deleted file mode 100644 index 6b00182c4d2f..000000000000 --- a/src/py/flwr_experimental/ops/cluster_test.py +++ /dev/null @@ -1,178 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Implments compute classes for EC2.""" - - -import os -import unittest -import warnings -from typing import Sized, cast -from unittest.mock import MagicMock - -from .cluster import ( - Cluster, - InstanceMismatch, - create_instances, - group_instances_by_specs, -) -from .compute.ec2_adapter import EC2Adapter -from .instance import Instance - -IMAGE_ID = "ami-0370b0294d7241341" -KEY_NAME = "flower" -SSH_CREDENTIALS = ("ubuntu", "/Users/tanto/.ssh/flower.pem") -SUBNET_ID = "subnet-23da286f" -SECURITY_GROUP_IDS = ["sg-0dd0f0080bcf86400"] - - -class CreateInstancesTestCase(unittest.TestCase): - """Test cases for create_instances.""" - - def setUp(self) -> None: - """Prepare tests.""" - self.mock_adapter = MagicMock() - self.mock_adapter.create_instances.return_value = [ - (1, "1.1.1.1", "2.2.2.1", 22, "running"), - (2, "1.1.1.2", "2.2.2.2", 22, "running"), - ] - self.timeout = 10 - - def test_create_instances(self) -> None: - """Test if create_instances works correctly.""" - # Prepare - instances = [ - Instance(name="client_0", group="clients", num_cpu=2, num_ram=8), - Instance(name="client_1", group="clients", num_cpu=2, num_ram=8), - ] - - # Execute - create_instances( - adapter=self.mock_adapter, instances=instances, timeout=self.timeout - ) - - # Assert - self.mock_adapter.create_instances.assert_called_once_with( - num_cpu=instances[0].num_cpu, - num_ram=instances[0].num_ram, - num_instance=len(instances), - timeout=10, - gpu=False, - ) - for ins in instances: - assert ins.instance_id is not None - assert ins.private_ip is not None - assert ins.public_ip is not None - assert ins.ssh_port is not None - assert ins.state is not None - - def test_create_instances_fail(self) -> None: - """Test if create_instances fails when instances list is invalid.""" - # Prepare - instances = [ - Instance(name="client_0", group="clients", num_cpu=2, num_ram=8), - Instance(name="client_1", group="clients", num_cpu=1, num_ram=4), - ] - - # Execute - with self.assertRaises(InstanceMismatch): - create_instances( - adapter=self.mock_adapter, instances=instances, timeout=self.timeout - ) - - -def test_group_instances_by_specs() -> None: - """Test that function works correctly.""" - # Prepare - instances = [ - Instance(name="server", group="server", num_cpu=2, num_ram=4), - Instance(name="client_0", group="clients", num_cpu=2, num_ram=8), - Instance(name="logserver", group="logserver", num_cpu=2, num_ram=4), - Instance(name="client_1", group="clients", num_cpu=2, num_ram=8), - ] - expected_groups = [[instances[0], instances[2]], [instances[1], instances[3]]] - - # Execute - groups = group_instances_by_specs(instances) - - # Assert - assert len(groups) == 2 - assert groups == expected_groups - - -if os.getenv("FLOWER_INTEGRATION"): - - class ClusterIntegrationTestCase(unittest.TestCase): - """Integration tests class Cluster. - - This TestCase will not mock anythin and use a live EC2Adapter - which will be used to provision a single machine and execute a - single command on it. Afterwards the machines will be shut down. - """ - - # pylint: disable=too-many-instance-attributes - def setUp(self) -> None: - """Create an instance.""" - # Filter false positive warning - warnings.filterwarnings( - "ignore", - category=ResourceWarning, - message="unclosed.*", - ) - - adapter = EC2Adapter( - image_id=IMAGE_ID, - key_name=KEY_NAME, - subnet_id=SUBNET_ID, - security_group_ids=SECURITY_GROUP_IDS, - tags=[ - ("Purpose", "integration_test"), - ("Test Name", "ClusterIntegrationTestCase"), - ], - ) - self.cluster = Cluster( - adapter=adapter, - ssh_credentials=SSH_CREDENTIALS, - instances=[ - Instance(name="server", group="server", num_cpu=2, num_ram=2) - ], - # In case the tearDown fails for some reason the machines - # should automatically terminate after 10 minutes - timeout=10, - ) - self.cluster.start() - - def tearDown(self) -> None: - self.cluster.terminate() - - def test_exec(self) -> None: - """Execute on all clients.""" - # Prepare - command = "nproc" - expected_result = "2\n" - - # Execute - stdout, stderr = self.cluster.exec("server", command) - - casted_stderr: Sized = cast(Sized, stderr) - casted_stdout: Sized = cast(Sized, stdout) - - # Assert - assert len(casted_stderr) == 0 - assert len(casted_stdout) == 1 - assert "".join(stdout) == expected_result - - -if __name__ == "__main__": - unittest.main(verbosity=2) diff --git a/src/py/flwr_experimental/ops/compute/__init__.py b/src/py/flwr_experimental/ops/compute/__init__.py deleted file mode 100644 index f6ad468f484d..000000000000 --- a/src/py/flwr_experimental/ops/compute/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Provides various adapters with standard interface to make compute resources -available.""" diff --git a/src/py/flwr_experimental/ops/compute/adapter.py b/src/py/flwr_experimental/ops/compute/adapter.py deleted file mode 100644 index 51c67a226a6e..000000000000 --- a/src/py/flwr_experimental/ops/compute/adapter.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Provides a standardised interface for provisioning compute resources.""" - - -from abc import ABC, abstractmethod -from typing import List, Optional, Tuple - -AdapterInstance = Tuple[ - str, str, Optional[str], int, str -] # (InstanceId, PrivateIpAddress, PublicIpAddress, SSHPort, State) - - -class Adapter(ABC): - """Base class for different Adapter implementations, for example, AWS - EC2.""" - - # pylint: disable=too-many-arguments - @abstractmethod - def create_instances( - self, - num_cpu: int, - num_ram: float, - timeout: int, - num_instance: int = 1, - gpu: bool = False, - ) -> List[AdapterInstance]: - """Create one or more instance(s) of the same type. - - Args: - num_cpu (int): Number of instance CPU - num_ram (int): RAM in GB - num_instance (int): Number of instances to start if currently available - timeout (int): Timeout in minutes - commands (:obj:`str`, optional): List of bash commands which will be joined into a - single string with newline as a seperator - gpu (bool): If true will only consider instances with GPU - """ - - @abstractmethod - def list_instances( - self, instance_ids: Optional[List[str]] = None - ) -> List[AdapterInstance]: - """List all instances with tags belonging to this adapter. - - Args: - instance_ids (:obj:`list` of :obj:`str`, optional): If provided, filter by instance_ids - """ - - @abstractmethod - def terminate_instances(self, instance_ids: List[str]) -> None: - """Terminate instances. - - Should raise an error if something goes wrong. - """ - - @abstractmethod - def terminate_all_instances(self) -> None: - """Terminate all instances. - - Will raise an error if something goes wrong. - """ diff --git a/src/py/flwr_experimental/ops/compute/docker_adapter.py b/src/py/flwr_experimental/ops/compute/docker_adapter.py deleted file mode 100644 index acb7c0c5a4e0..000000000000 --- a/src/py/flwr_experimental/ops/compute/docker_adapter.py +++ /dev/null @@ -1,166 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Provides an Adapter implementation for Docker.""" - - -import socket -import time -from contextlib import closing -from typing import List, Optional -from uuid import uuid4 - -import docker - -from .adapter import Adapter, AdapterInstance - - -class NoPublicFacingPortFound(Exception): - """Raise if public-facing port of container was not bound to private port - of host.""" - - -def get_free_port() -> int: - """Returns a free port.""" - with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as socks: - socks.bind(("", 0)) - socks.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - return int(socks.getsockname()[1]) - - -def _get_container_port(container_id: str) -> int: - """Return container port on host machine.""" - client = docker.APIClient(base_url="unix://var/run/docker.sock") - result = client.port(container_id, 22) - client.close() - if len(result) == 0: - raise NoPublicFacingPortFound - return int(result[0]["HostPort"]) - - -class DockerAdapter(Adapter): - """Adapter for Docker.""" - - def __init__(self, name: str = "flower", network: str = "flower"): - self.name = name - self.network = network - self._create_network() - - def _create_network(self) -> None: - """Create Docker network if it does not exist.""" - client = docker.from_env() - try: - client.networks.get(self.network) - except docker.errors.NotFound: - client.networks.create(self.network, driver="bridge") - client.close() - - # pylint: disable=too-many-arguments - def create_instances( - self, - num_cpu: int, - num_ram: float, - timeout: int, - num_instance: int = 1, - gpu: bool = False, - ) -> List[AdapterInstance]: - """Create one or more docker container instance(s) of the same type. - - Args: - num_cpu (int): Number of instance CPU cores (currently ignored) - num_ram (int): RAM in GB (currently ignored) - timeout (int): Timeout in minutes - num_instance (int): Number of instances to start - """ - instances: List[AdapterInstance] = [] - - client = docker.from_env() - for _ in range(num_instance): - port = get_free_port() - container = client.containers.run( - "flower-sshd:latest", - auto_remove=True, - detach=True, - ports={"22/tcp": port}, - network=self.network, - labels={"adapter_name": self.name}, - # We have to assign a name as the default random name will not work - # as hostname so the containers can reach each other - name=str(uuid4().hex[:8]), - ) - - # Docker needs a little time to start the container - time.sleep(1) - - port = _get_container_port(container.short_id) - instances.append( - (container.short_id, container.name, "127.0.0.1", port, "started") - ) - - client.close() - - return instances - - def list_instances( - self, instance_ids: Optional[List[str]] = None - ) -> List[AdapterInstance]: - """List all container instances with tags belonging to this adapter. - - Args: - instance_ids ([str[]]): If provided, filter by instance_ids - """ - instances: List[AdapterInstance] = [] - - client = docker.from_env() - containers = client.containers.list( - filters={"label": f"adapter_name={self.name}"} - ) - for container in containers: - port = _get_container_port(container.short_id) - instances.append( - ( - container.short_id, - container.name, - "127.0.0.1", - port, - container.status, - ) - ) - client.close() - - return instances - - def terminate_instances(self, instance_ids: List[str]) -> None: - """Terminate container instance(s). - - Will raise an error if something goes wrong. - """ - client = docker.from_env() - for instance_id in instance_ids: - container = client.containers.get(instance_id) - container.remove(force=True) - client.close() - - def terminate_all_instances(self) -> None: - """Terminate all instances. - - Will raise an error if something goes wrong. - """ - client = docker.from_env() - containers = client.containers.list( - filters={"label": f"adapter_name={self.name}"} - ) - for container in containers: - container.remove(force=True) - client.close() diff --git a/src/py/flwr_experimental/ops/compute/docker_adapter_test.py b/src/py/flwr_experimental/ops/compute/docker_adapter_test.py deleted file mode 100644 index c6d5759a8246..000000000000 --- a/src/py/flwr_experimental/ops/compute/docker_adapter_test.py +++ /dev/null @@ -1,131 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests DockerAdapter.""" - - -import os -import time -import unittest - -import docker - -from .docker_adapter import DockerAdapter, get_free_port - -if os.getenv("FLOWER_INTEGRATION"): - - class DockerAdapterIntegrationTestCase(unittest.TestCase): - """Test suite for class DockerAdapter. - - Required docker to be available on the host machine. - """ - - def setUp(self) -> None: - """Prepare tests.""" - self.name = "flower_test" - self.client = docker.from_env() - self.adapter = DockerAdapter(name=self.name) - - def tearDown(self) -> None: - """Cleanup tests.""" - containers = self.client.containers.list( - filters={"label": f"adapter_name={self.name}"} - ) - for container in containers: - container.remove(force=True) - self.client.close() - - def test_create_instances(self) -> None: - """Create and start an instance.""" - # Execute - instances = self.adapter.create_instances( - num_cpu=2, num_ram=2, timeout=1, num_instance=2, gpu=False - ) - - # Assert - assert len(instances) == 2 - - containers = self.client.containers.list( - filters={"label": f"adapter_name={self.name}"} - ) - assert len(containers) == 2 - - def test_list_instances(self) -> None: - """List all instances.""" - # Prepare - for _ in range(2): - port = get_free_port() - self.client.containers.run( - "flower-sshd:latest", - auto_remove=True, - detach=True, - ports={"22/tcp": port}, - labels={"adapter_name": self.name}, - ) - - # Execute - instances = self.adapter.list_instances() - - # Assert - assert len(instances) == 2, "Expected to find two instances." - ports = {i[3] for i in instances} - assert len(ports) == 2, "Each instance should have a distinct port." - - def test_terminate_instance(self) -> None: - """Destroy all instances.""" - # Prepare - port = get_free_port() - container = self.client.containers.run( - "flower-sshd:latest", - name=f"{self.name}_{int(time.time() * 1000)}", - auto_remove=True, - detach=True, - ports={"22/tcp": port}, - labels={"adapter_name": self.name}, - ) - - # Execute - self.adapter.terminate_instances([container.short_id]) - - # Assert - containers = self.client.containers.list( - filters={"label": f"adapter_name={self.name}"} - ) - assert len(containers) == 0 - - def test_terminate_all_instances(self) -> None: - """Destroy all instances.""" - # Prepare - for _ in range(2): - port = get_free_port() - self.client.containers.run( - "flower-sshd:latest", - name=f"{self.name}_{int(time.time() * 1000)}", - auto_remove=True, - detach=True, - ports={"22/tcp": port}, - ) - - # Execute - self.adapter.terminate_all_instances() - - # Assert - containers = self.client.containers.list( - filters={"label": f"adapter_name={self.name}"} - ) - assert len(containers) == 0 - - -if __name__ == "__main__": - unittest.main(verbosity=2) diff --git a/src/py/flwr_experimental/ops/compute/ec2_adapter.py b/src/py/flwr_experimental/ops/compute/ec2_adapter.py deleted file mode 100644 index 43fb66c2d944..000000000000 --- a/src/py/flwr_experimental/ops/compute/ec2_adapter.py +++ /dev/null @@ -1,311 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Provides an Adapter implementation for AWS EC2.""" - - -import time -from logging import INFO -from typing import Dict, List, Optional, Tuple, Union - -import boto3 -from boto3_type_annotations import ec2 - -from flwr.common.logger import log - -from .adapter import Adapter, AdapterInstance - -EC2RunInstancesResult = Dict[str, List[ec2.Instance]] -EC2DescribeInstancesResult = Dict[str, List[Dict[str, List[ec2.Instance]]]] - - -class NoMatchingInstanceType(Exception): - """No matching instance type exists.""" - - -class EC2TerminationFailure(Exception): - """Something went wrong while terminating EC2 instances. - - EC2 should be manually checked to check what went wrong and the - instances might need manual shutdown and terminatation. - """ - - -class EC2CreateInstanceFailure(Exception): - """Instance provisioning failed.""" - - -class EC2StatusTimeout(Exception): - """Indicates that the status check timed out.""" - - -# List of AWS instance types with -# (instance_type, vCPU, Mem) -INSTANCE_TYPES_CPU = [ - ("t3.small", 2, 2, 0.0209), # Beware CPU credit limited - ("c5.large", 2, 4, 0.097), - ("m5a.large", 2, 8, 0.104), - ("m5a.xlarge", 4, 16, 0.208), - ("m5a.2xlarge", 8, 32, 0.416), - ("m5a.4xlarge", 16, 64, 0.832), - ("m5a.12xlarge", 48, 192, 2.496), - ("m5a.24xlarge", 96, 384, 4.992), - ("r5.24xlarge", 96, 768, 7.296), -] - -INSTANCE_TYPES_GPU = [ - ("p3.2xlarge", 8, 61, 3.823), - ("p2.xlarge", 4, 61, 0.900), -] - - -def find_instance_type( - num_cpu: int, num_ram: float, instance_types: List[Tuple[str, int, int, float]] -) -> Tuple[str, float]: - """Return the first matching instance type if one exists, raise - otherwise.""" - for instance_type in instance_types: - if instance_type[1] == num_cpu and instance_type[2] == num_ram: - return instance_type[0], instance_type[3] - - raise NoMatchingInstanceType - - -def flatten_reservations( - reservations: EC2DescribeInstancesResult, -) -> List[ec2.Instance]: - """Extract instances from reservations returned by a call to - describe_instances.""" - instances: List[ec2.Instance] = [] - - # Flatten list of lists - for ins in [res["Instances"] for res in reservations["Reservations"]]: - instances += ins - - return instances - - -def are_all_instances_running(instances: List[ec2.Instance]) -> bool: - """Return True if all instances are running.""" - for ins in instances: - if ins["State"]["Name"] != "running": - return False - - return True - - -def are_all_status_ok(instance_status: List[Tuple[str, str]]) -> bool: - """Return True if all instances are ok.""" - for status in instance_status: - if status[1] != "ok": - return False - - return True - - -def tags_to_filter( - tags: List[Tuple[str, str]] -) -> List[Dict[str, Union[str, List[str]]]]: - """Turn list of tuples with tag name and value in to AWS format.""" - return [{"Name": f"tag:{tag[0]}", "Values": [tag[1]]} for tag in tags] - - -class EC2Adapter(Adapter): - """Adapter for AWS EC2.""" - - # pylint: disable=too-many-arguments - def __init__( - self, - image_id: str, - key_name: str, - subnet_id: str, - security_group_ids: List[str], - tags: Optional[List[Tuple[str, str]]] = None, - boto_ec2_client: Optional[boto3.session.Session] = None, - ): - self.image_id = image_id - self.key_name = key_name - self.subnet_id = subnet_id - self.security_group_ids = security_group_ids - self.tags = [("Flower EC2 Adapter ID", f"{int(time.time())}")] - - if tags is not None: - self.tags += tags - - self.tag_specifications = [ - { - "ResourceType": "instance", - "Tags": [{"Key": tag[0], "Value": tag[1]} for tag in self.tags], - } - ] - - self.ec2 = boto3.client("ec2") if boto_ec2_client is None else boto_ec2_client - - def _wait_until_instances_are_reachable(self, instance_ids: List[str]) -> None: - """Block until all instances are reachable. Raises TimeoutException - after 300s. - - Returns: - bool: True if all are reachable otherwise False. - """ - - for _ in range(30): - result = self.ec2.describe_instance_status( - InstanceIds=instance_ids, - # Also include instances which don't have state "running" yet - IncludeAllInstances=True, - ) - - instance_status = [ - (ins["InstanceId"], ins["InstanceStatus"]["Status"]) - for ins in result["InstanceStatuses"] - ] - - print(instance_status) - - if are_all_status_ok(instance_status): - return - - time.sleep(10) - - raise EC2StatusTimeout() - - # pylint: disable=too-many-arguments - def create_instances( - self, - num_cpu: int, - num_ram: float, - timeout: int, - num_instance: int = 1, - gpu: bool = False, - ) -> List[AdapterInstance]: - """Create one or more EC2 instance(s) of the same type. - - Args: - num_cpu (int): Number of instance vCPU (values in - ec2_adapter.INSTANCE_TYPES_CPU or INSTANCE_TYPES_GPU) - num_ram (int): RAM in GB (values in ec2_adapter.INSTANCE_TYPES_CPU - or INSTANCE_TYPES_GPU) - timeout (int): Timeout in minutes - num_instance (int): Number of instances to start if currently available in EC2 - """ - # The instance will be set to terminate after shutdown - # This is a fail safe in case something happens and the instances - # are not correctly shutdown - user_data = ["#!/bin/bash", f"sudo shutdown -P {timeout}"] - user_data_str = "\n".join(user_data) - - instance_type, hourly_price = find_instance_type( - num_cpu, num_ram, INSTANCE_TYPES_GPU if gpu else INSTANCE_TYPES_CPU - ) - - hourly_price_total = round(num_instance * hourly_price, 2) - - log( - INFO, - "Starting %s instances of type %s which in total will roughly cost $%s an hour.", - num_instance, - instance_type, - hourly_price_total, - ) - - result: EC2RunInstancesResult = self.ec2.run_instances( - BlockDeviceMappings=[ - {"DeviceName": "/dev/sda1", "Ebs": {"DeleteOnTermination": True}} - ], - ImageId=self.image_id, - # We always want an exact number of instances - MinCount=num_instance, - MaxCount=num_instance, - InstanceType=instance_type, - KeyName=self.key_name, - IamInstanceProfile={"Name": "FlowerInstanceProfile"}, - SubnetId=self.subnet_id, - SecurityGroupIds=self.security_group_ids, - TagSpecifications=self.tag_specifications, - InstanceInitiatedShutdownBehavior="terminate", - UserData=user_data_str, - ) - - instance_ids = [ins["InstanceId"] for ins in result["Instances"]] - - # As soon as all instances status is "running" we have to check the InstanceStatus which - # reports impaired functionality that stems from issues internal to the instance, such as - # impaired reachability - try: - self._wait_until_instances_are_reachable(instance_ids=instance_ids) - except EC2StatusTimeout as ec2_status_timeout: - self.terminate_instances(instance_ids) - raise EC2CreateInstanceFailure() from ec2_status_timeout - - return self.list_instances(instance_ids=instance_ids) - - def list_instances( - self, instance_ids: Optional[List[str]] = None - ) -> List[AdapterInstance]: - """List all instances with tags belonging to this adapter. - - Args: - instance_ids ([str[]]): If provided, filter by instance_ids - """ - if instance_ids is None: - instance_ids = [] - - result: EC2DescribeInstancesResult = self.ec2.describe_instances( - InstanceIds=instance_ids, - Filters=tags_to_filter(self.tags), - ) - - instances = flatten_reservations(result) - - instances = [ - ( - ins["InstanceId"], - ins["PrivateIpAddress"], - ins["PublicIpAddress"], - 22, - ins["State"]["Name"], - ) - for ins in instances - ] - - return instances - - def terminate_instances(self, instance_ids: List[str]) -> None: - """Terminate instances. - - Will raise an error if something goes wrong. - """ - res = self.ec2.terminate_instances(InstanceIds=instance_ids) - - for tin in res["TerminatingInstances"]: - if tin["CurrentState"]["Name"] != "shutting-down": - raise EC2TerminationFailure - - def terminate_all_instances(self) -> None: - """Terminate all instances. - - Will raise an error if something goes wrong. - """ - result: EC2DescribeInstancesResult = self.ec2.describe_instances( - Filters=tags_to_filter(self.tags), - ) - - instances = flatten_reservations(result) - instance_ids = [ins["InstanceId"] for ins in instances] - - if not instance_ids: - return - - self.terminate_instances(instance_ids) diff --git a/src/py/flwr_experimental/ops/compute/ec2_adapter_test.py b/src/py/flwr_experimental/ops/compute/ec2_adapter_test.py deleted file mode 100644 index 14827745bf3d..000000000000 --- a/src/py/flwr_experimental/ops/compute/ec2_adapter_test.py +++ /dev/null @@ -1,162 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests EC2Adapter.""" - - -import os -import unittest -import warnings -from unittest.mock import MagicMock - -from .ec2_adapter import EC2Adapter - -IMAGE_ID = "ami-0370b0294d7241341" -KEY_NAME = "flower" -SUBNET_ID = "subnet-23da286f" -SECURITY_GROUP_IDS = ["sg-0dd0f0080bcf86400"] - - -class EC2AdapterTestCase(unittest.TestCase): - """Test suite for class EC2Adapter.""" - - def setUp(self) -> None: - """Create an instance.""" - self.ec2_mock = MagicMock() - - self.ec2_mock.run_instances.return_value = { - "Instances": [ - { - "InstanceId": "1", - "PrivateIpAddress": "1.1.1.1", - "PublicIpAddress": "2.1.1.1", - "State": {"Name": "pending"}, - } - ] - } - - self.ec2_mock.describe_instances.return_value = { - "Reservations": [ - { - "Instances": [ - { - "InstanceId": "1", - "PrivateIpAddress": "1.1.1.1", - "PublicIpAddress": "2.1.1.1", - "State": {"Name": "running"}, - } - ] - } - ] - } - - self.adapter = EC2Adapter( - image_id="ami-0370b0294d7241341", - key_name="flower", - subnet_id="subnet-23da286f", - security_group_ids=["sg-0dd0f0080bcf86400"], - tags=[("Purpose", "integration_test"), ("Test Name", "EC2AdapterTestCase")], - boto_ec2_client=self.ec2_mock, - ) - - def test_create_instances(self) -> None: - """Create and start an instance.""" - # Prepare - reservations = self.ec2_mock.describe_instances.return_value["Reservations"] - ec2_instance = reservations[0]["Instances"][0] - - expected_return_value = ( - ec2_instance["InstanceId"], - ec2_instance["PrivateIpAddress"], - ec2_instance["PublicIpAddress"], - 22, - ec2_instance["State"]["Name"], - ) - - # Execute - instances = self.adapter.create_instances(num_cpu=2, num_ram=2, timeout=1) - - # Assert - assert len(instances) == 1 - assert isinstance(instances[0], tuple) - assert instances[0] == expected_return_value - - def test_list_instances(self) -> None: - """List all instances.""" - # Prepare - reservations = self.ec2_mock.describe_instances.return_value["Reservations"] - ec2_instance = reservations[0]["Instances"][0] - - expected_return_value = ( - ec2_instance["InstanceId"], - ec2_instance["PrivateIpAddress"], - ec2_instance["PublicIpAddress"], - 22, - ec2_instance["State"]["Name"], - ) - - # Execute - instances = self.adapter.list_instances() - - # Assert - assert len(instances) == 1 - assert instances[0] == expected_return_value - - def test_terminate_instances(self) -> None: - """Destroy all instances.""" - # Prepare - instance_id = "1" - result = {"TerminatingInstances": [{"CurrentState": {"Name": "shutting-down"}}]} - self.ec2_mock.terminate_instances.return_value = result - - # Execute - self.adapter.terminate_instances([instance_id]) - - -if os.getenv("FLOWER_INTEGRATION"): - - class EC2AdapterIntegrationTestCase(unittest.TestCase): - """Test suite for class EC2Adapter.""" - - def setUp(self) -> None: - """Prepare tests.""" - # Filter false positive warning - warnings.filterwarnings( - "ignore", - category=ResourceWarning, - message="unclosed.*", - ) - - self.adapter = EC2Adapter( - image_id="ami-0370b0294d7241341", - key_name="flower", - subnet_id="subnet-23da286f", - security_group_ids=["sg-0dd0f0080bcf86400"], - ) - - def test_workflow(self) -> None: - """Create, list and terminate an instance.""" - # Execute & Assert - instances = self.adapter.create_instances( - num_cpu=2, num_ram=2, num_instance=1, timeout=10 - ) - instances = self.adapter.list_instances() - - assert len(instances) == 1 - - self.adapter.terminate_instances([instances[0][0]]) - - -if __name__ == "__main__": - unittest.main(verbosity=2) diff --git a/src/py/flwr_experimental/ops/instance.py b/src/py/flwr_experimental/ops/instance.py deleted file mode 100644 index 20be40552727..000000000000 --- a/src/py/flwr_experimental/ops/instance.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Provides dataclass Instance.""" - - -from dataclasses import dataclass -from typing import Optional - - -# pylint: disable=too-many-instance-attributes -@dataclass -class Instance: - """Represents an instance.""" - - # Specs - name: str - group: str - num_cpu: int - num_ram: float - gpu: bool = False - - # Runtime information - instance_id: Optional[str] = None - private_ip: Optional[str] = None - public_ip: Optional[str] = None - ssh_port: Optional[int] = None - state: Optional[str] = None diff --git a/src/swift/flwr/Package.swift b/src/swift/flwr/Package.swift index 9ebef2d89870..8adf85d67117 100644 --- a/src/swift/flwr/Package.swift +++ b/src/swift/flwr/Package.swift @@ -1,4 +1,4 @@ -// swift-tools-version: 5.6 +// swift-tools-version: 5.9 // The swift-tools-version declares the minimum version of Swift required to build this package. import PackageDescription @@ -6,7 +6,7 @@ import PackageDescription let package = Package( name: "flwr", platforms: [ - .iOS(.v13), + .iOS(.v16), ], products: [ // Products define the executables and libraries a package produces, and make them visible to other packages. @@ -19,8 +19,8 @@ let package = Package( // .package(url: /* package url */, from: "1.0.0"), .package(url: "https://github.com/pvieito/PythonKit.git", branch: "master"), .package(url: "https://github.com/kewlbear/NumPy-iOS.git", branch: "main"), - .package(url: "https://github.com/grpc/grpc-swift.git", from: "1.0.0"), - .package(url: "https://github.com/apple/swift-docc-plugin", from: "1.1.0"), + .package(url: "https://github.com/grpc/grpc-swift.git", from: "1.22.0"), + .package(url: "https://github.com/apple/swift-protobuf.git", from: "1.26.0"), ], targets: [ // Targets are the basic building blocks of a package. A target can define a module or a test suite. @@ -30,7 +30,8 @@ let package = Package( dependencies: [ .product(name: "GRPC", package: "grpc-swift"), .product(name: "NumPy-iOS", package: "NumPy-iOS"), - .product(name: "PythonKit", package: "PythonKit")], + .product(name: "PythonKit", package: "PythonKit"), + .product(name: "SwiftProtobuf", package: "swift-protobuf")], path: "Sources/Flower"), .testTarget( name: "FlowerTests",