From 3dd6cabd62b12c92649d1699401376715a372d69 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 22 Dec 2023 12:35:22 +0000 Subject: [PATCH 01/30] Update types-requests requirement from ==2.31.0.2 to ==2.31.0.10 (#2739) Updates the requirements on [types-requests](https://github.com/python/typeshed) to permit the latest version. - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-requests dependency-type: direct:development ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Taner Topal --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index adadba711787..809b2520ccee 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -81,7 +81,7 @@ rest = ["requests", "starlette", "uvicorn"] [tool.poetry.group.dev.dependencies] types-dataclasses = "==0.6.6" types-protobuf = "==3.19.18" -types-requests = "==2.31.0.2" +types-requests = "==2.31.0.10" types-setuptools = "==68.2.0.0" clang-format = "==17.0.4" isort = "==5.12.0" From 656272455e99f62daa55efc5bedb97e43131bd9f Mon Sep 17 00:00:00 2001 From: Heng Pan <134433891+panh99@users.noreply.github.com> Date: Fri, 22 Dec 2023 12:58:20 +0000 Subject: [PATCH 02/30] Make the optional arg "--callable" in `flower-client` a required positional arg. (#2673) Co-authored-by: Javier --- e2e/test_driver.sh | 4 ++-- examples/mt-pytorch-callable/README.md | 4 ++-- examples/mt-pytorch-callable/client.py | 2 +- src/py/flwr/client/app.py | 17 +++++++++-------- 4 files changed, 14 insertions(+), 13 deletions(-) diff --git a/e2e/test_driver.sh b/e2e/test_driver.sh index ca54dbf4852f..32314bd22533 100755 --- a/e2e/test_driver.sh +++ b/e2e/test_driver.sh @@ -16,10 +16,10 @@ esac timeout 2m flower-server $server_arg & sleep 3 -timeout 2m flower-client $client_arg --callable client:flower --server 127.0.0.1:9092 & +timeout 2m flower-client client:flower $client_arg --server 127.0.0.1:9092 & sleep 3 -timeout 2m flower-client $client_arg --callable client:flower --server 127.0.0.1:9092 & +timeout 2m flower-client client:flower $client_arg --server 127.0.0.1:9092 & sleep 3 timeout 2m python driver.py & diff --git a/examples/mt-pytorch-callable/README.md b/examples/mt-pytorch-callable/README.md index 65ef000c26f2..120e28098344 100644 --- a/examples/mt-pytorch-callable/README.md +++ b/examples/mt-pytorch-callable/README.md @@ -33,13 +33,13 @@ flower-server --insecure In a new terminal window, start the first long-running Flower client: ```bash -flower-client --callable client:flower +flower-client --insecure client:flower ``` In yet another new terminal window, start the second long-running Flower client: ```bash -flower-client --callable client:flower +flower-client --insecure client:flower ``` ## Start the Driver script diff --git a/examples/mt-pytorch-callable/client.py b/examples/mt-pytorch-callable/client.py index 6f9747784ae0..4195a714ca89 100644 --- a/examples/mt-pytorch-callable/client.py +++ b/examples/mt-pytorch-callable/client.py @@ -108,7 +108,7 @@ def client_fn(cid: str): return FlowerClient().to_client() -# To run this: `flower-client --callable client:flower` +# To run this: `flower-client client:flower` flower = fl.flower.Flower( client_fn=client_fn, ) diff --git a/src/py/flwr/client/app.py b/src/py/flwr/client/app.py index 3448e18e20c5..2a2f067c2823 100644 --- a/src/py/flwr/client/app.py +++ b/src/py/flwr/client/app.py @@ -74,10 +74,10 @@ def run_client() -> None: print(args.root_certificates) print(args.server) - print(args.callable_dir) + print(args.dir) print(args.callable) - callable_dir = args.callable_dir + callable_dir = args.dir if callable_dir is not None: sys.path.insert(0, callable_dir) @@ -101,6 +101,10 @@ def _parse_args_client() -> argparse.ArgumentParser: description="Start a long-running Flower client", ) + parser.add_argument( + "callable", + help="For example: `client:flower` or `project.package.module:wrapper.flower`", + ) parser.add_argument( "--insecure", action="store_true", @@ -120,13 +124,10 @@ def _parse_args_client() -> argparse.ArgumentParser: help="Server address", ) parser.add_argument( - "--callable", - help="For example: `client:flower` or `project.package.module:wrapper.flower`", - ) - parser.add_argument( - "--callable-dir", + "--dir", default="", - help="Add specified directory to the PYTHONPATH and load callable from there." + help="Add specified directory to the PYTHONPATH and load Flower " + "callable from there." " Default: current working directory.", ) From d3f12beb1d3addadff9869ffe41dbeffa02a1a01 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 22 Dec 2023 13:06:52 +0000 Subject: [PATCH 03/30] Update jupyterlab requirement from ==4.0.8 to ==4.0.9 (#2740) Updates the requirements on [jupyterlab](https://github.com/jupyterlab/jupyterlab) to permit the latest version. - [Release notes](https://github.com/jupyterlab/jupyterlab/releases) - [Changelog](https://github.com/jupyterlab/jupyterlab/blob/@jupyterlab/lsp@4.0.9/CHANGELOG.md) - [Commits](https://github.com/jupyterlab/jupyterlab/compare/@jupyterlab/lsp@4.0.8...@jupyterlab/lsp@4.0.9) --- updated-dependencies: - dependency-name: jupyterlab dependency-type: direct:development ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Taner Topal --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 809b2520ccee..57f43af6ac73 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -95,7 +95,7 @@ pytest-cov = "==4.1.0" pytest-watch = "==4.2.0" grpcio-tools = "==1.48.2" mypy-protobuf = "==3.2.0" -jupyterlab = "==4.0.8" +jupyterlab = "==4.0.9" rope = "==1.11.0" semver = "==3.0.2" sphinx = "==6.2.1" From 67b652555ffb7e5d276e3b63efe4579fc14f0a91 Mon Sep 17 00:00:00 2001 From: msck72 <122954562+msck72@users.noreply.github.com> Date: Mon, 25 Dec 2023 16:55:25 +0530 Subject: [PATCH 04/30] HeteroFL baseline (#2439) Co-authored-by: jafermarq --- baselines/heterofl/LICENSE | 202 +++++ baselines/heterofl/README.md | 200 +++++ baselines/heterofl/heterofl/__init__.py | 1 + baselines/heterofl/heterofl/client.py | 133 +++ .../heterofl/client_manager_heterofl.py | 207 +++++ baselines/heterofl/heterofl/conf/base.yaml | 47 + baselines/heterofl/heterofl/conf/fedavg.yaml | 41 + baselines/heterofl/heterofl/dataset.py | 83 ++ .../heterofl/heterofl/dataset_preparation.py | 357 ++++++++ .../heterofl/heterofl/datasets/__init__.py | 9 + baselines/heterofl/heterofl/datasets/cifar.py | 150 ++++ baselines/heterofl/heterofl/datasets/mnist.py | 167 ++++ baselines/heterofl/heterofl/datasets/utils.py | 244 +++++ baselines/heterofl/heterofl/main.py | 204 +++++ .../heterofl/heterofl/model_properties.py | 123 +++ baselines/heterofl/heterofl/models.py | 839 ++++++++++++++++++ baselines/heterofl/heterofl/server.py | 101 +++ baselines/heterofl/heterofl/strategy.py | 467 ++++++++++ baselines/heterofl/heterofl/utils.py | 218 +++++ baselines/heterofl/pyproject.toml | 145 +++ doc/source/ref-changelog.md | 2 + 21 files changed, 3940 insertions(+) create mode 100644 baselines/heterofl/LICENSE create mode 100644 baselines/heterofl/README.md create mode 100644 baselines/heterofl/heterofl/__init__.py create mode 100644 baselines/heterofl/heterofl/client.py create mode 100644 baselines/heterofl/heterofl/client_manager_heterofl.py create mode 100644 baselines/heterofl/heterofl/conf/base.yaml create mode 100644 baselines/heterofl/heterofl/conf/fedavg.yaml create mode 100644 baselines/heterofl/heterofl/dataset.py create mode 100644 baselines/heterofl/heterofl/dataset_preparation.py create mode 100644 baselines/heterofl/heterofl/datasets/__init__.py create mode 100644 baselines/heterofl/heterofl/datasets/cifar.py create mode 100644 baselines/heterofl/heterofl/datasets/mnist.py create mode 100644 baselines/heterofl/heterofl/datasets/utils.py create mode 100644 baselines/heterofl/heterofl/main.py create mode 100644 baselines/heterofl/heterofl/model_properties.py create mode 100644 baselines/heterofl/heterofl/models.py create mode 100644 baselines/heterofl/heterofl/server.py create mode 100644 baselines/heterofl/heterofl/strategy.py create mode 100644 baselines/heterofl/heterofl/utils.py create mode 100644 baselines/heterofl/pyproject.toml diff --git a/baselines/heterofl/LICENSE b/baselines/heterofl/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/baselines/heterofl/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/baselines/heterofl/README.md b/baselines/heterofl/README.md new file mode 100644 index 000000000000..6e9c32077e9b --- /dev/null +++ b/baselines/heterofl/README.md @@ -0,0 +1,200 @@ +--- +title: "HeteroFL: Computation And Communication Efficient Federated Learning For Heterogeneous Clients" +url: https://openreview.net/forum?id=TNkPBBYFkXg +labels: [system heterogeneity, image classification] +dataset: [MNIST, CIFAR-10] +--- + +# HeteroFL: Computation And Communication Efficient Federated Learning For Heterogeneous Clients + +**Paper:** [openreview.net/forum?id=TNkPBBYFkXg](https://openreview.net/forum?id=TNkPBBYFkXg) + +**Authors:** Enmao Diao, Jie Ding, Vahid Tarokh + +**Abstract:** Federated Learning (FL) is a method of training machine learning models on private data distributed over a large number of possibly heterogeneous clients such as mobile phones and IoT devices. In this work, we propose a new federated learning framework named HeteroFL to address heterogeneous clients equipped with very different computation and communication capabilities. Our solution can enable the training of heterogeneous local models with varying computation complexities and still produce a single global inference model. For the first time, our method challenges the underlying assumption of existing work that local models have to share the same architecture as the global model. We demonstrate several strategies to enhance FL training and conduct extensive empirical evaluations, including five computation complexity levels of three model architecture on three datasets. We show that adaptively distributing subnetworks according to clients’ capabilities is both computation and communication efficient. + + +## About this baseline + +**What’s implemented:** The code in this directory is an implementation of HeteroFL in PyTorch using Flower. The code incorporates references from the authors' implementation. Implementation of custom model split and aggregation as suggested by [@negedng](https://github.com/negedng), is available [here](https://github.com/msck72/heterofl_custom_aggregation). By modifying the configuration in the `base.yaml`, the results in the paper can be replicated, with both fixed and dynamic computational complexities among clients. + +**Key Terminology:** ++ *Model rate* defines the computational complexity of a client. Authors have defined five different computation complexity levels {a, b, c, d, e} with the hidden channel shrinkage ratio r = 0.5. + ++ *Model split mode* specifies whether the computational complexities of clients are fixed (throughout the experiment), or whether they are dynamic (change their mode_rate/computational-complexity every-round). + ++ *Model mode* determines the proportionality of clients with various computation complexity levels, for example, a4-b2-e4 determines at each round, proportion of clients with computational complexity level a = 4 / (4 + 2 + 4) * num_clients, similarly, proportion of clients with computational complexity level b = 2 / (4 + 2 + 4) * num_clients and so on. + +**Implementation Insights:** +*ModelRateManager* manages the model rate of client in simulation, which changes the model rate based on the model mode of the setup and *ClientManagerHeterofl* keeps track of model rates of the clients, so configure fit knows which/how-much subset of the model that needs to be sent to the client. + +**Datasets:** The code utilized benchmark MNIST and CIFAR-10 datasets from Pytorch's torchvision for its experimentation. + +**Hardware Setup:** The experiments were run on Google colab pro with 50GB RAM and T4 TPU. For MNIST dataset & CNN model, it approximately takes 1.5 hours to complete 200 rounds while for CIFAR10 dataset & ResNet18 model it takes around 3-4 hours to complete 400 rounds (may vary based on the model-mode of the setup). + +**Contributors:** M S Chaitanya Kumar [(github.com/msck72)](https://github.com/msck72) + + +## Experimental Setup + +**Task:** Image Classification. +**Model:** This baseline uses two models: ++ Convolutional Neural Network(CNN) model is used for MNIST dataset. ++ PreResNet (preactivated ResNet) model is used for CIFAR10 dataset. + +These models use static batch normalization (sBN) and they incorporate a Scaler module following each convolutional layer. + +**Dataset:** This baseline includes MNIST and CIFAR10 datasets. + +| Dataset | #Classes | IID Partition | non-IID Partition | +| :---: | :---: | :---: | :---: | +| MNIST
CIFAR10 | 10| Distribution of equal number of data examples among n clients | Distribution of data examples such that each client has at most 2 (customizable) classes | + + +**Training Hyperparameters:** + +| Description | Data Setting | MNIST | CIFAR-10 | +| :---: | :---: | :---:| :---: | +Total Clients | both | 100 | 100 | +Clients Per Round | both | 100 | 100 +Local Epcohs | both | 5 | 5 +Num. ROunds | IID
non-IID| 200
400 | 400
800 +Optimizer | both | SGD | SGD +Momentum | both | 0.9 | 0.9 +Weight-decay | both | 5.00e-04 | 5.00e-04 +Learning Rate | both | 0.01 | 0.1 +Decay Schedule | IID
non-IID| [100]
[150, 250] | [200]
[300,500] +Hidden Layers | both | [64 , 128 , 256 , 512] | [64 , 128 , 256 , 512] + + +The hyperparameters of Fedavg baseline are available in [Liang et al (2020)](https://arxiv.org/abs/2001.01523). + +## Environment Setup + +To construct the Python environment, simply run: + +```bash +# Set python version +pyenv install 3.10.6 +pyenv local 3.10.6 + +# Tell poetry to use python 3.10 +poetry env use 3.10.6 + +# install the base Poetry environment +poetry install + +# activate the environment +poetry shell +``` + + +## Running the Experiments +To run HeteroFL experiments in poetry activated environment: +```bash +# The main experiment implemented in your baseline using default hyperparameters (that should be setup in the Hydra configs) +# should run (including dataset download and necessary partitioning) by executing the command: + +python -m heterofl.main # Which runs the heterofl with arguments availbale in heterfl/conf/base.yaml + +# We could override the settings that were specified in base.yaml using the command-line-arguments +# Here's an example for changing the dataset name, non-iid and model +python -m heterofl.main dataset.dataset_name='CIFAR10' dataset.iid=False model.model_name='resnet18' + +# Similarly, another example for changing num_rounds, model_split_mode, and model_mode +python -m heterofl.main num_rounds=400 control.model_split_mode='dynamic' control.model_mode='a1-b1' + +# Similarly, another example for changing num_rounds, model_split_mode, and model_mode +python -m heterofl.main num_rounds=400 control.model_split_mode='dynamic' control.model_mode='a1-b1' + +``` +To run FedAvg experiments: +```bash +python -m heterofl.main --config-name fedavg +# Similarly to the commands illustrated above, we can modify the default settings in the fedavg.yaml file. +``` + +## Expected Results + +```bash +# running the multirun for IID-MNIST with various model-modes using default config +python -m heterofl.main --multirun control.model_mode='a1','a1-e1','a1-b1-c1-d1-e1' + +# running the multirun for IID-CIFAR10 dataset with various model-modes by modifying default config +python -m heterofl.main --multirun control.model_mode='a1','a1-e1','a1-b1-c1-d1-e1' dataset.dataset_name='CIFAR10' model.model_name='resnet18' num_rounds=400 optim_scheduler.lr=0.1 strategy.milestones=[150, 250] + +# running the multirun for non-IID-MNIST with various model-modes by modifying default config +python -m heterofl.main --multirun control.model_mode='a1','a1-e1','a1-b1-c1-d1-e1' dataset.iid=False num_rounds=400 optim_scheduler.milestones=[200] + +# similarly, we can perform for various model-modes, datasets. But we cannot multirun with both non-iid and iid at once for reproducing the tables below, since the number of rounds and milestones for MultiStepLR are different for non-iid and iid. The tables below are the reproduced results of various multiruns. + +#To reproduce the fedavg results +#for MNIST dataset +python -m heterofl.main --config-name fedavg --multirun dataset.iid=True,False +# for CIFAR10 dataset +python -m heterofl.main --config-name fedavg --multirun num_rounds=1800 dataset.dataset_name='CIFAR10' dataset.iid=True,False dataset.batch_size.train=50 dataset.batch_size.test=128 model.model_name='CNNCifar' optim_scheduler.lr=0.1 +``` +
+ +Results of the combination of various computation complexity levels for **MNIST** dataset with **dynamic** scenario(where a client does not belong to a fixed computational complexity level): + +| Model | Ratio | Parameters | FLOPS | Space(MB) | IID-accuracy | non-IId local-acc | non-IID global-acc | +| :--: | :----: | :-----: | :-------: | :-------: | :----------: | :---------------: | :----------------: | +| a | 1 | 1556.874 K | 80.504 M | 5.939 | 99.47 | 99.82 | 98.87 | +| a-e | 0.502 | 781.734 K | 40.452 M | 2.982 | 99.49 | 99.86 | 98.9 | +| a-b-c-d-e | 0.267 | 415.807 K | 21.625 M | 1.586 | 99.23 | 99.84 | 98.5 | +| b | 1 | 391.37 K | 20.493 M | 1.493 | 99.54 | 99.81 | 98.81 | +| b-e | 0.508 | 198.982 K | 10.447 M | 0.759 | 99.48 | 99.87 | 98.98 | +| b-c-d-e | 0.334 | 130.54 K | 6.905 M | 0.498 | 99.34 | 99.81 | 98.73 | +| c | 1 | 98.922 K | 5.307 M | 0.377 | 99.37 | 99.64 | 97.14 | +| c-e | 0.628 | 62.098 K | 3.363 M | 0.237 | 99.16 | 99.72 | 97.68 | +| c-d-e | 0.441 | 43.5965 K | 2.375 M | 0.166 | 99.28 | 99.69 | 97.27 | +| d | 1 | 25.274 K | 1.418 M | 0.096 | 99.07 | 99.77 | 97.58 | +| d-e | 0.63 | 15.934 K | 0.909 M | 0.0608 | 99.12 | 99.65 | 97.33 | +| e | 1 | 6.594 K | 0.4005 M | 0.025 | 98.46 | 99.53 | 96.5 | +| FedAvg | 1 | 633.226 K | 1.264128 M | 2.416 | 97.85 | 97.76 | 97.74 | + + +
+ +Results of the combination of various computation complexity levels for **CIFAR10** dataset with **dynamic** scenario(where a client does not belong to a fixed computational complexity level): +> *The HeteroFL paper reports a model with 1.8M parameters for their FedAvg baseline. However, as stated by the paper authors, those results are borrowed from [Liang et al (2020)](https://arxiv.org/abs/2001.01523), which uses a small CNN with fewer parameters (~64K as shown in this table below). We believe the HeteroFL authors made a mistake when reporting the number of parameters. We borrowed the model from Liang et al (2020)'s [repo](https://github.com/pliang279/LG-FedAvg/blob/master/models/Nets.py). As in the paper, FedAvg was run for 1800 rounds.* + + +| Model | Ratio | Parameters | FLOPS | Space(MB) | IID-acc | non-IId local-acc
Final   Best| non-IID global-acc
Final    Best| +| :--: | :----: | :-----: | :-------: | :-------: | :----------: | :-----: | :------: | + a | 1 | 9622 K | 330.2 M | 36.705 | 90.83 | 89.04    92.41 | 48.72    59.29 | + a-e | 0.502 | 4830 K | 165.9 M | 18.426 | 89.98 | 87.98    91.25 | 50.16    57.66 | + a-b-c-d-e | 0.267 | 2565 K | 88.4 M | 9.785 | 87.46 | 89.75    91.19 | 46.96    55.6 | + b | 1 | 2409 K | 83.3 M | 9.189 | 88.59 | 89.31    92.07 | 49.85    60.79 | + b-e | 0.508 | 1224 K | 42.4 M | 4.667 | 89.23 | 90.93    92.3 | 55.46    61.98 | + b-c-d-e | 0.332 | 801 K | 27.9 M | 3.054 | 87.61 | 89.23    91.83 | 51.59    59.4 | + c | 1 | 604 K | 21.2 M | 2.303 | 85.74 | 89.83    91.75 | 44.03    58.26 | + c-e | 0.532 | 321 K | 11.4 M | 1.225 | 87.32 | 89.28    91.56 | 53.43    59.5 | + c-d-e | 0.438 | 265 K | 9.4 M | 1.010 | 85.59 | 91.48    92.05 | 58.26    61.79 | + d | 1 | 152 K | 5.5 M | 0.579 | 82.91 | 90.81    91.47 | 55.95    58.34 | + d-e | 0.626 | 95 K | 3.5 M | 0.363 | 82.77 | 88.79    90.13 | 48.49    54.18 | + e | 1 | 38 K | 1.5 M | 0.146 | 76.53 | 90.05    90.91 | 54.68    57.05 | +|FedAvg | 1 | 64 K| 1.3 M | 0.2446 | 70.65 | 53.12    58.6 | 52.93    58.47 | + + + diff --git a/baselines/heterofl/heterofl/__init__.py b/baselines/heterofl/heterofl/__init__.py new file mode 100644 index 000000000000..a5e567b59135 --- /dev/null +++ b/baselines/heterofl/heterofl/__init__.py @@ -0,0 +1 @@ +"""Template baseline package.""" diff --git a/baselines/heterofl/heterofl/client.py b/baselines/heterofl/heterofl/client.py new file mode 100644 index 000000000000..cf325cb7e85b --- /dev/null +++ b/baselines/heterofl/heterofl/client.py @@ -0,0 +1,133 @@ +"""Defines the MNIST Flower Client and a function to instantiate it.""" + +from typing import Callable, Dict, List, Optional, Tuple + +import flwr as fl +import torch +from flwr.common.typing import NDArrays + +from heterofl.models import create_model, get_parameters, set_parameters, test, train + +# from torch.utils.data import DataLoader + + +class FlowerNumPyClient(fl.client.NumPyClient): + """Standard Flower client for training.""" + + def __init__( + self, + # cid: str, + net: torch.nn.Module, + dataloader, + model_rate: Optional[float], + client_train_settings: Dict, + ): + # self.cid = cid + self.net = net + self.trainloader = dataloader["trainloader"] + self.label_split = dataloader["label_split"] + self.valloader = dataloader["valloader"] + self.model_rate = model_rate + self.client_train_settings = client_train_settings + self.client_train_settings["device"] = torch.device( + "cuda:0" if torch.cuda.is_available() else "cpu" + ) + # print( + # "Client_with model rate = {} , cid of client = {}".format( + # self.model_rate, self.cid + # ) + # ) + + def get_parameters(self, config) -> NDArrays: + """Return the parameters of the current net.""" + # print(f"[Client {self.cid}] get_parameters") + return get_parameters(self.net) + + def fit(self, parameters, config) -> Tuple[NDArrays, int, Dict]: + """Implement distributed fit function for a given client.""" + # print(f"cid = {self.cid}") + set_parameters(self.net, parameters) + if "lr" in config: + self.client_train_settings["lr"] = config["lr"] + train( + self.net, + self.trainloader, + self.label_split, + self.client_train_settings, + ) + return get_parameters(self.net), len(self.trainloader), {} + + def evaluate(self, parameters, config) -> Tuple[float, int, Dict]: + """Implement distributed evaluation for a given client.""" + set_parameters(self.net, parameters) + loss, accuracy = test( + self.net, self.valloader, device=self.client_train_settings["device"] + ) + return float(loss), len(self.valloader), {"accuracy": float(accuracy)} + + +def gen_client_fn( + model_config: Dict, + client_to_model_rate_mapping: Optional[List[float]], + client_train_settings: Dict, + data_loaders, +) -> Callable[[str], FlowerNumPyClient]: # pylint: disable=too-many-arguments + """Generate the client function that creates the Flower Clients. + + Parameters + ---------- + model_config : Dict + Dict that contains all the information required to + create a model (data_shape , hidden_layers , classes_size...) + client_to_model_rate: List[float] + List tha contains model_rates of clients. + model_rate of client with cid i = client_to_model_rate_mapping[i] + client_train_settings : Dict + Dict that contains information regarding optimizer , lr , + momentum , device required by the client to train + trainloaders: List[DataLoader] + A list of DataLoaders, each pointing to the dataset training partition + belonging to a particular client. + label_split: torch.tensor + A Tensor of tensors that conatins the labels of the partitioned dataset. + label_split of client with cid i = label_split[i] + valloaders: List[DataLoader] + A list of DataLoaders, each pointing to the dataset validation partition + belonging to a particular client. + + Returns + ------- + Callable[[str], FlowerClient] + A tuple containing the client function that creates Flower Clients + """ + + def client_fn(cid: str) -> FlowerNumPyClient: + """Create a Flower client representing a single organization.""" + # Note: each client gets a different trainloader/valloader, so each client + # will train and evaluate on their own unique data + device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + client_dataloader = { + "trainloader": data_loaders["trainloaders"][int(cid)], + "valloader": data_loaders["valloaders"][int(cid)], + "label_split": data_loaders["label_split"][int(cid)], + } + # trainloader = data_loaders["trainloaders"][int(cid)] + # valloader = data_loaders["valloaders"][int(cid)] + model_rate = None + if client_to_model_rate_mapping is not None: + model_rate = client_to_model_rate_mapping[int(cid)] + + return FlowerNumPyClient( + # cid=cid, + net=create_model( + model_config, + model_rate=model_rate, + device=device, + ), + dataloader=client_dataloader, + model_rate=model_rate, + client_train_settings=client_train_settings, + ) + + return client_fn diff --git a/baselines/heterofl/heterofl/client_manager_heterofl.py b/baselines/heterofl/heterofl/client_manager_heterofl.py new file mode 100644 index 000000000000..be5b2227a159 --- /dev/null +++ b/baselines/heterofl/heterofl/client_manager_heterofl.py @@ -0,0 +1,207 @@ +"""HeteroFL ClientManager.""" + +import random +import threading +from logging import INFO +from typing import Dict, List, Optional + +import flwr as fl +import torch +from flwr.common.logger import log +from flwr.server.client_proxy import ClientProxy +from flwr.server.criterion import Criterion + +# from heterofl.utils import ModelRateManager + + +class ClientManagerHeteroFL(fl.server.ClientManager): + """Provides a pool of available clients.""" + + def __init__( + self, + model_rate_manager=None, + clients_to_model_rate_mapping=None, + client_label_split: Optional[list[torch.tensor]] = None, + ) -> None: + super().__init__() + self.clients: Dict[str, ClientProxy] = {} + + self.is_simulation = False + if model_rate_manager is not None and clients_to_model_rate_mapping is not None: + self.is_simulation = True + + self.model_rate_manager = model_rate_manager + + # have a common array in simulation to access in the client_fn and server side + if self.is_simulation is True: + self.clients_to_model_rate_mapping = clients_to_model_rate_mapping + ans = self.model_rate_manager.create_model_rate_mapping( + len(clients_to_model_rate_mapping) + ) + # copy self.clients_to_model_rate_mapping , ans + for i, model_rate in enumerate(ans): + self.clients_to_model_rate_mapping[i] = model_rate + + # shall handle in case of not_simulation... + self.client_label_split = client_label_split + + self._cv = threading.Condition() + + def __len__(self) -> int: + """Return the length of clients Dict. + + Returns + ------- + len : int + Length of Dict (self.clients). + """ + return len(self.clients) + + def num_available(self) -> int: + """Return the number of available clients. + + Returns + ------- + num_available : int + The number of currently available clients. + """ + return len(self) + + def wait_for(self, num_clients: int, timeout: int = 86400) -> bool: + """Wait until at least `num_clients` are available. + + Blocks until the requested number of clients is available or until a + timeout is reached. Current timeout default: 1 day. + + Parameters + ---------- + num_clients : int + The number of clients to wait for. + timeout : int + The time in seconds to wait for, defaults to 86400 (24h). + + Returns + ------- + success : bool + """ + with self._cv: + return self._cv.wait_for( + lambda: len(self.clients) >= num_clients, timeout=timeout + ) + + def register(self, client: ClientProxy) -> bool: + """Register Flower ClientProxy instance. + + Parameters + ---------- + client : flwr.server.client_proxy.ClientProxy + + Returns + ------- + success : bool + Indicating if registration was successful. False if ClientProxy is + already registered or can not be registered for any reason. + """ + if client.cid in self.clients: + return False + + self.clients[client.cid] = client + + # in case of not a simulation, this type of method can be used + # if self.is_simulation is False: + # prop = client.get_properties(None, timeout=86400) + # self.clients_to_model_rate_mapping[int(client.cid)] = prop["model_rate"] + # self.client_label_split[int(client.cid)] = prop["label_split"] + + with self._cv: + self._cv.notify_all() + + return True + + def unregister(self, client: ClientProxy) -> None: + """Unregister Flower ClientProxy instance. + + This method is idempotent. + + Parameters + ---------- + client : flwr.server.client_proxy.ClientProxy + """ + if client.cid in self.clients: + del self.clients[client.cid] + + with self._cv: + self._cv.notify_all() + + def all(self) -> Dict[str, ClientProxy]: + """Return all available clients.""" + return self.clients + + def get_client_to_model_mapping(self, cid) -> float: + """Return model rate of client with cid.""" + return self.clients_to_model_rate_mapping[int(cid)] + + def get_all_clients_to_model_mapping(self) -> List[float]: + """Return all available clients to model rate mapping.""" + return self.clients_to_model_rate_mapping.copy() + + def update(self, server_round: int) -> None: + """Update the client to model rate mapping.""" + if self.is_simulation is True: + if ( + server_round == 1 and self.model_rate_manager.model_split_mode == "fix" + ) or (self.model_rate_manager.model_split_mode == "dynamic"): + ans = self.model_rate_manager.create_model_rate_mapping( + self.num_available() + ) + # copy self.clients_to_model_rate_mapping , ans + for i, model_rate in enumerate(ans): + self.clients_to_model_rate_mapping[i] = model_rate + print( + "clients to model rate mapping ", self.clients_to_model_rate_mapping + ) + return + + # to be handled in case of not a simulation, i.e. to get the properties + # again from the clients as they can change the model_rate + # for i in range(self.num_available): + # # need to test this , accumilates the + # # changing model rate of the client + # self.clients_to_model_rate_mapping[i] = + # self.clients[str(i)].get_properties['model_rate'] + # return + + def sample( + self, + num_clients: int, + min_num_clients: Optional[int] = None, + criterion: Optional[Criterion] = None, + ) -> List[ClientProxy]: + """Sample a number of Flower ClientProxy instances.""" + # Block until at least num_clients are connected. + if min_num_clients is None: + min_num_clients = num_clients + self.wait_for(min_num_clients) + # Sample clients which meet the criterion + available_cids = list(self.clients) + if criterion is not None: + available_cids = [ + cid for cid in available_cids if criterion.select(self.clients[cid]) + ] + + if num_clients > len(available_cids): + log( + INFO, + "Sampling failed: number of available clients" + " (%s) is less than number of requested clients (%s).", + len(available_cids), + num_clients, + ) + return [] + + random_indices = torch.randperm(len(available_cids))[:num_clients] + # Use the random indices to select clients + sampled_cids = [available_cids[i] for i in random_indices] + sampled_cids = random.sample(available_cids, num_clients) + print(f"Sampled CIDS = {sampled_cids}") + return [self.clients[cid] for cid in sampled_cids] diff --git a/baselines/heterofl/heterofl/conf/base.yaml b/baselines/heterofl/heterofl/conf/base.yaml new file mode 100644 index 000000000000..42edf419cc38 --- /dev/null +++ b/baselines/heterofl/heterofl/conf/base.yaml @@ -0,0 +1,47 @@ +num_clients: 100 +num_epochs: 5 +num_rounds: 800 +seed: 0 +client_resources: + num_cpus: 1 + num_gpus: 0.08 + +control: + model_split_mode: 'dynamic' + model_mode: 'a1-b1-c1-d1-e1' + +dataset: + dataset_name: 'CIFAR10' + iid: False + shard_per_user : 2 # only used in case of non-iid (i.e. iid = false) + balance: false + batch_size: + train: 10 + test: 50 + shuffle: + train: true + test: false + + +model: + model_name: resnet18 # use 'conv' for MNIST + hidden_layers: [64 , 128 , 256 , 512] + norm: bn + scale: 1 + mask: 1 + + +optim_scheduler: + optimizer: SGD + lr: 0.1 + momentum: 0.9 + weight_decay: 5.00e-04 + scheduler: MultiStepLR + milestones: [300, 500] + +strategy: + _target_: heterofl.strategy.HeteroFL + fraction_fit: 0.1 + fraction_evaluate: 0.1 + min_fit_clients: 10 + min_evaluate_clients: 10 diff --git a/baselines/heterofl/heterofl/conf/fedavg.yaml b/baselines/heterofl/heterofl/conf/fedavg.yaml new file mode 100644 index 000000000000..d67d0950654a --- /dev/null +++ b/baselines/heterofl/heterofl/conf/fedavg.yaml @@ -0,0 +1,41 @@ +num_clients: 100 +num_epochs: 1 +num_rounds: 800 +seed: 0 +clip: False +enable_train_on_train_data_while_testing: False +client_resources: + num_cpus: 1 + num_gpus: 0.4 + +dataset: + dataset_name: 'MNIST' + iid: False + shard_per_user : 2 + balance: False + batch_size: + train: 10 + test: 10 + shuffle: + train: true + test: false + + +model: + model_name: MLP #use CNNCifar for CIFAR10 + +optim_scheduler: + optimizer: SGD + lr: 0.05 + lr_decay_rate: 1.0 + momentum: 0.5 + weight_decay: 0 + scheduler: MultiStepLR + milestones: [] + +strategy: + _target_: flwr.server.strategy.FedAvg + fraction_fit: 0.1 + fraction_evaluate: 0.1 + min_fit_clients: 10 + min_evaluate_clients: 10 diff --git a/baselines/heterofl/heterofl/dataset.py b/baselines/heterofl/heterofl/dataset.py new file mode 100644 index 000000000000..0e0f4b726842 --- /dev/null +++ b/baselines/heterofl/heterofl/dataset.py @@ -0,0 +1,83 @@ +"""Utilities for creation of DataLoaders for clients and server.""" + +from typing import List, Optional, Tuple + +import torch +from omegaconf import DictConfig +from torch.utils.data import DataLoader + +from heterofl.dataset_preparation import _partition_data + + +def load_datasets( # pylint: disable=too-many-arguments + strategy_name: str, + config: DictConfig, + num_clients: int, + seed: Optional[int] = 42, +) -> Tuple[ + DataLoader, List[DataLoader], List[torch.tensor], List[DataLoader], DataLoader +]: + """Create the dataloaders to be fed into the model. + + Parameters + ---------- + config: DictConfig + Parameterises the dataset partitioning process + num_clients : int + The number of clients that hold a part of the data + seed : int, optional + Used to set a fix seed to replicate experiments, by default 42 + + Returns + ------- + Tuple[DataLoader, DataLoader, DataLoader, DataLoader] + The entire trainset Dataloader for testing purposes, + The DataLoader for training, the DataLoader for validation, + the DataLoader for testing. + """ + print(f"Dataset partitioning config: {config}") + trainset, datasets, label_split, client_testsets, testset = _partition_data( + num_clients, + dataset_name=config.dataset_name, + strategy_name=strategy_name, + iid=config.iid, + dataset_division={ + "shard_per_user": config.shard_per_user, + "balance": config.balance, + }, + seed=seed, + ) + # Split each partition into train/val and create DataLoader + entire_trainloader = DataLoader( + trainset, batch_size=config.batch_size.train, shuffle=config.shuffle.train + ) + + trainloaders = [] + valloaders = [] + for dataset in datasets: + trainloaders.append( + DataLoader( + dataset, + batch_size=config.batch_size.train, + shuffle=config.shuffle.train, + ) + ) + + for client_testset in client_testsets: + valloaders.append( + DataLoader( + client_testset, + batch_size=config.batch_size.test, + shuffle=config.shuffle.test, + ) + ) + + return ( + entire_trainloader, + trainloaders, + label_split, + valloaders, + DataLoader( + testset, batch_size=config.batch_size.test, shuffle=config.shuffle.test + ), + ) diff --git a/baselines/heterofl/heterofl/dataset_preparation.py b/baselines/heterofl/heterofl/dataset_preparation.py new file mode 100644 index 000000000000..525e815e9e98 --- /dev/null +++ b/baselines/heterofl/heterofl/dataset_preparation.py @@ -0,0 +1,357 @@ +"""Functions for dataset download and processing.""" + +from typing import Dict, List, Optional, Tuple + +import numpy as np +import torch +from torch.utils.data import ConcatDataset, Dataset, Subset, random_split +from torchvision import transforms + +import heterofl.datasets as dt + + +def _download_data(dataset_name: str, strategy_name: str) -> Tuple[Dataset, Dataset]: + root = "./data/{}".format(dataset_name) + if dataset_name == "MNIST": + trainset = dt.MNIST( + root=root, + split="train", + subset="label", + transform=dt.Compose( + [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))] + ), + ) + testset = dt.MNIST( + root=root, + split="test", + subset="label", + transform=dt.Compose( + [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))] + ), + ) + elif dataset_name == "CIFAR10": + normalize = transforms.Normalize( + mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] + ) + if strategy_name == "heterofl": + normalize = transforms.Normalize( + (0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010) + ) + trainset = dt.CIFAR10( + root=root, + split="train", + subset="label", + transform=dt.Compose( + [ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + normalize, + ] + ), + ) + testset = dt.CIFAR10( + root=root, + split="test", + subset="label", + transform=dt.Compose( + [ + transforms.ToTensor(), + normalize, + ] + ), + ) + else: + raise ValueError(f"{dataset_name} is not valid") + + return trainset, testset + + +# pylint: disable=too-many-arguments +def _partition_data( + num_clients: int, + dataset_name: str, + strategy_name: str, + iid: Optional[bool] = False, + dataset_division=None, + seed: Optional[int] = 42, +) -> Tuple[Dataset, List[Dataset], List[torch.tensor], List[Dataset], Dataset]: + trainset, testset = _download_data(dataset_name, strategy_name) + + if dataset_name in ("MNIST", "CIFAR10"): + classes_size = 10 + + if dataset_division["balance"]: + trainset = _balance_classes(trainset, seed) + + if iid: + datasets, label_split = iid_partition(trainset, num_clients, seed=seed) + client_testsets, _ = iid_partition(testset, num_clients, seed=seed) + else: + datasets, label_split = non_iid( + {"dataset": trainset, "classes_size": classes_size}, + num_clients, + dataset_division["shard_per_user"], + ) + client_testsets, _ = non_iid( + { + "dataset": testset, + "classes_size": classes_size, + }, + num_clients, + dataset_division["shard_per_user"], + label_split, + ) + + tensor_label_split = [] + for i in label_split: + tensor_label_split.append(torch.Tensor(i)) + label_split = tensor_label_split + + return trainset, datasets, label_split, client_testsets, testset + + +def iid_partition( + dataset: Dataset, num_clients: int, seed: Optional[int] = 42 +) -> Tuple[List[Dataset], List[torch.tensor]]: + """IID partition of dataset among clients.""" + partition_size = int(len(dataset) / num_clients) + lengths = [partition_size] * num_clients + + divided_dataset = random_split( + dataset, lengths, torch.Generator().manual_seed(seed) + ) + label_split = [] + for i in range(num_clients): + label_split.append( + torch.unique(torch.Tensor([target for _, target in divided_dataset[i]])) + ) + + return divided_dataset, label_split + + +def non_iid( + dataset_info, + num_clients: int, + shard_per_user: int, + label_split=None, + seed=42, +) -> Tuple[List[Dataset], List]: + """Non-IID partition of dataset among clients. + + Adopted from authors (of heterofl) implementation. + """ + data_split: Dict[int, List] = {i: [] for i in range(num_clients)} + + label_idx_split, shard_per_class = _split_dataset_targets_idx( + dataset_info["dataset"], + shard_per_user, + num_clients, + dataset_info["classes_size"], + ) + + if label_split is None: + label_split = list(range(dataset_info["classes_size"])) * shard_per_class + label_split = torch.tensor(label_split)[ + torch.randperm( + len(label_split), generator=torch.Generator().manual_seed(seed) + ) + ].tolist() + label_split = np.array(label_split).reshape((num_clients, -1)).tolist() + + for i, _ in enumerate(label_split): + label_split[i] = np.unique(label_split[i]).tolist() + + for i in range(num_clients): + for label_i in label_split[i]: + idx = torch.arange(len(label_idx_split[label_i]))[ + torch.randperm( + len(label_idx_split[label_i]), + generator=torch.Generator().manual_seed(seed), + )[0] + ].item() + data_split[i].extend(label_idx_split[label_i].pop(idx)) + + return ( + _get_dataset_from_idx(dataset_info["dataset"], data_split, num_clients), + label_split, + ) + + +def _split_dataset_targets_idx(dataset, shard_per_user, num_clients, classes_size): + label = np.array(dataset.target) if hasattr(dataset, "target") else dataset.targets + label_idx_split: Dict = {} + for i, _ in enumerate(label): + label_i = label[i].item() + if label_i not in label_idx_split: + label_idx_split[label_i] = [] + label_idx_split[label_i].append(i) + + shard_per_class = int(shard_per_user * num_clients / classes_size) + + for label_i in label_idx_split: + label_idx = label_idx_split[label_i] + num_leftover = len(label_idx) % shard_per_class + leftover = label_idx[-num_leftover:] if num_leftover > 0 else [] + new_label_idx = ( + np.array(label_idx[:-num_leftover]) + if num_leftover > 0 + else np.array(label_idx) + ) + new_label_idx = new_label_idx.reshape((shard_per_class, -1)).tolist() + + for i, leftover_label_idx in enumerate(leftover): + new_label_idx[i] = np.concatenate([new_label_idx[i], [leftover_label_idx]]) + label_idx_split[label_i] = new_label_idx + return label_idx_split, shard_per_class + + +def _get_dataset_from_idx(dataset, data_split, num_clients): + divided_dataset = [None for i in range(num_clients)] + for i in range(num_clients): + divided_dataset[i] = Subset(dataset, data_split[i]) + return divided_dataset + + +def _balance_classes( + trainset: Dataset, + seed: Optional[int] = 42, +) -> Dataset: + class_counts = np.bincount(trainset.target) + targets = torch.Tensor(trainset.target) + smallest = np.min(class_counts) + idxs = targets.argsort() + tmp = [Subset(trainset, idxs[: int(smallest)])] + tmp_targets = [targets[idxs[: int(smallest)]]] + for count in np.cumsum(class_counts): + tmp.append(Subset(trainset, idxs[int(count) : int(count + smallest)])) + tmp_targets.append(targets[idxs[int(count) : int(count + smallest)]]) + unshuffled = ConcatDataset(tmp) + unshuffled_targets = torch.cat(tmp_targets) + shuffled_idxs = torch.randperm( + len(unshuffled), generator=torch.Generator().manual_seed(seed) + ) + shuffled = Subset(unshuffled, shuffled_idxs) + shuffled.targets = unshuffled_targets[shuffled_idxs] + + return shuffled + + +def _sort_by_class( + trainset: Dataset, +) -> Dataset: + class_counts = np.bincount(trainset.targets) + idxs = trainset.targets.argsort() # sort targets in ascending order + + tmp = [] # create subset of smallest class + tmp_targets = [] # same for targets + + start = 0 + for count in np.cumsum(class_counts): + tmp.append( + Subset(trainset, idxs[start : int(count + start)]) + ) # add rest of classes + tmp_targets.append(trainset.targets[idxs[start : int(count + start)]]) + start += count + sorted_dataset = ConcatDataset(tmp) # concat dataset + sorted_dataset.targets = torch.cat(tmp_targets) # concat targets + return sorted_dataset + + +# pylint: disable=too-many-locals, too-many-arguments +def _power_law_split( + sorted_trainset: Dataset, + num_partitions: int, + num_labels_per_partition: int = 2, + min_data_per_partition: int = 10, + mean: float = 0.0, + sigma: float = 2.0, +) -> Dataset: + """Partition the dataset following a power-law distribution. It follows the. + + implementation of Li et al 2020: https://arxiv.org/abs/1812.06127 with default + values set accordingly. + + Parameters + ---------- + sorted_trainset : Dataset + The training dataset sorted by label/class. + num_partitions: int + Number of partitions to create + num_labels_per_partition: int + Number of labels to have in each dataset partition. For + example if set to two, this means all training examples in + a given partition will be long to the same two classes. default 2 + min_data_per_partition: int + Minimum number of datapoints included in each partition, default 10 + mean: float + Mean value for LogNormal distribution to construct power-law, default 0.0 + sigma: float + Sigma value for LogNormal distribution to construct power-law, default 2.0 + + Returns + ------- + Dataset + The partitioned training dataset. + """ + targets = sorted_trainset.targets + full_idx = list(range(len(targets))) + + class_counts = np.bincount(sorted_trainset.targets) + labels_cs = np.cumsum(class_counts) + labels_cs = [0] + labels_cs[:-1].tolist() + + partitions_idx: List[List[int]] = [] + num_classes = len(np.bincount(targets)) + hist = np.zeros(num_classes, dtype=np.int32) + + # assign min_data_per_partition + min_data_per_class = int(min_data_per_partition / num_labels_per_partition) + for u_id in range(num_partitions): + partitions_idx.append([]) + for cls_idx in range(num_labels_per_partition): + # label for the u_id-th client + cls = (u_id + cls_idx) % num_classes + # record minimum data + indices = list( + full_idx[ + labels_cs[cls] + + hist[cls] : labels_cs[cls] + + hist[cls] + + min_data_per_class + ] + ) + partitions_idx[-1].extend(indices) + hist[cls] += min_data_per_class + + # add remaining images following power-law + probs = np.random.lognormal( + mean, + sigma, + (num_classes, int(num_partitions / num_classes), num_labels_per_partition), + ) + remaining_per_class = class_counts - hist + # obtain how many samples each partition should be assigned for each of the + # labels it contains + # pylint: disable=too-many-function-args + probs = ( + remaining_per_class.reshape(-1, 1, 1) + * probs + / np.sum(probs, (1, 2), keepdims=True) + ) + + for u_id in range(num_partitions): + for cls_idx in range(num_labels_per_partition): + cls = (u_id + cls_idx) % num_classes + count = int(probs[cls, u_id // num_classes, cls_idx]) + + # add count of specific class to partition + indices = full_idx[ + labels_cs[cls] + hist[cls] : labels_cs[cls] + hist[cls] + count + ] + partitions_idx[u_id].extend(indices) + hist[cls] += count + + # construct subsets + partitions = [Subset(sorted_trainset, p) for p in partitions_idx] + return partitions diff --git a/baselines/heterofl/heterofl/datasets/__init__.py b/baselines/heterofl/heterofl/datasets/__init__.py new file mode 100644 index 000000000000..91251db77302 --- /dev/null +++ b/baselines/heterofl/heterofl/datasets/__init__.py @@ -0,0 +1,9 @@ +"""Dataset module. + +The entire datasets module is adopted from authors implementation. +""" +from .cifar import CIFAR10 +from .mnist import MNIST +from .utils import Compose + +__all__ = ("MNIST", "CIFAR10", "Compose") diff --git a/baselines/heterofl/heterofl/datasets/cifar.py b/baselines/heterofl/heterofl/datasets/cifar.py new file mode 100644 index 000000000000..c75194bc8ee7 --- /dev/null +++ b/baselines/heterofl/heterofl/datasets/cifar.py @@ -0,0 +1,150 @@ +"""CIFAR10 dataset class, adopted from authors implementation.""" +import os +import pickle + +import anytree +import numpy as np +import torch +from PIL import Image +from torch.utils.data import Dataset + +from heterofl.datasets.utils import ( + download_url, + extract_file, + make_classes_counts, + make_flat_index, + make_tree, +) +from heterofl.utils import check_exists, load, makedir_exist_ok, save + + +# pylint: disable=too-many-instance-attributes +class CIFAR10(Dataset): + """CIFAR10 dataset.""" + + data_name = "CIFAR10" + file = [ + ( + "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz", + "c58f30108f718f92721af3b95e74349a", + ) + ] + + def __init__(self, root, split, subset, transform=None): + self.root = os.path.expanduser(root) + self.split = split + self.subset = subset + self.transform = transform + if not check_exists(self.processed_folder): + self.process() + self.img, self.target = load( + os.path.join(self.processed_folder, "{}.pt".format(self.split)) + ) + self.target = self.target[self.subset] + self.classes_counts = make_classes_counts(self.target) + self.classes_to_labels, self.classes_size = load( + os.path.join(self.processed_folder, "meta.pt") + ) + self.classes_to_labels, self.classes_size = ( + self.classes_to_labels[self.subset], + self.classes_size[self.subset], + ) + + def __getitem__(self, index): + """Get the item with index.""" + img, target = Image.fromarray(self.img[index]), torch.tensor(self.target[index]) + inp = {"img": img, self.subset: target} + if self.transform is not None: + inp = self.transform(inp) + return inp["img"], inp["label"] + + def __len__(self): + """Length of the dataset.""" + return len(self.img) + + @property + def processed_folder(self): + """Return path of processed folder.""" + return os.path.join(self.root, "processed") + + @property + def raw_folder(self): + """Return path of raw folder.""" + return os.path.join(self.root, "raw") + + def process(self): + """Save the dataset accordingly.""" + if not check_exists(self.raw_folder): + self.download() + train_set, test_set, meta = self.make_data() + save(train_set, os.path.join(self.processed_folder, "train.pt")) + save(test_set, os.path.join(self.processed_folder, "test.pt")) + save(meta, os.path.join(self.processed_folder, "meta.pt")) + + def download(self): + """Download dataset from the url.""" + makedir_exist_ok(self.raw_folder) + for url, md5 in self.file: + filename = os.path.basename(url) + download_url(url, self.raw_folder, filename, md5) + extract_file(os.path.join(self.raw_folder, filename)) + + def __repr__(self): + """Represent CIFAR10 as string.""" + fmt_str = ( + f"Dataset {self.__class__.__name__}\nSize: {self.__len__()}\n" + f"Root: {self.root}\nSplit: {self.split}\nSubset: {self.subset}\n" + f"Transforms: {self.transform.__repr__()}" + ) + return fmt_str + + def make_data(self): + """Make data.""" + train_filenames = [ + "data_batch_1", + "data_batch_2", + "data_batch_3", + "data_batch_4", + "data_batch_5", + ] + test_filenames = ["test_batch"] + train_img, train_label = _read_pickle_file( + os.path.join(self.raw_folder, "cifar-10-batches-py"), train_filenames + ) + test_img, test_label = _read_pickle_file( + os.path.join(self.raw_folder, "cifar-10-batches-py"), test_filenames + ) + train_target, test_target = {"label": train_label}, {"label": test_label} + with open( + os.path.join(self.raw_folder, "cifar-10-batches-py", "batches.meta"), "rb" + ) as fle: + data = pickle.load(fle, encoding="latin1") + classes = data["label_names"] + classes_to_labels = {"label": anytree.Node("U", index=[])} + for cls in classes: + make_tree(classes_to_labels["label"], [cls]) + classes_size = {"label": make_flat_index(classes_to_labels["label"])} + return ( + (train_img, train_target), + (test_img, test_target), + (classes_to_labels, classes_size), + ) + + +def _read_pickle_file(path, filenames): + img, label = [], [] + for filename in filenames: + file_path = os.path.join(path, filename) + with open(file_path, "rb") as file: + entry = pickle.load(file, encoding="latin1") + img.append(entry["data"]) + if "labels" in entry: + label.extend(entry["labels"]) + else: + label.extend(entry["fine_labels"]) + # label.extend(entry["labels"]) if "labels" in entry else label.extend( + # entry["fine_labels"] + # ) + img = np.vstack(img).reshape(-1, 3, 32, 32) + img = img.transpose((0, 2, 3, 1)) + return img, label diff --git a/baselines/heterofl/heterofl/datasets/mnist.py b/baselines/heterofl/heterofl/datasets/mnist.py new file mode 100644 index 000000000000..feae2ea987b4 --- /dev/null +++ b/baselines/heterofl/heterofl/datasets/mnist.py @@ -0,0 +1,167 @@ +"""MNIST dataset class, adopted from authors implementation.""" +import codecs +import os + +import anytree +import numpy as np +import torch +from PIL import Image +from torch.utils.data import Dataset + +from heterofl.datasets.utils import ( + download_url, + extract_file, + make_classes_counts, + make_flat_index, + make_tree, +) +from heterofl.utils import check_exists, load, makedir_exist_ok, save + + +# pylint: disable=too-many-instance-attributes +class MNIST(Dataset): + """MNIST dataset.""" + + data_name = "MNIST" + file = [ + ( + "http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz", + "f68b3c2dcbeaaa9fbdd348bbdeb94873", + ), + ( + "http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz", + "9fb629c4189551a2d022fa330f9573f3", + ), + ( + "http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz", + "d53e105ee54ea40749a09fcbcd1e9432", + ), + ( + "http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz", + "ec29112dd5afa0611ce80d1b7f02629c", + ), + ] + + def __init__(self, root, split, subset, transform=None): + self.root = os.path.expanduser(root) + self.split = split + self.subset = subset + self.transform = transform + if not check_exists(self.processed_folder): + self.process() + self.img, self.target = load( + os.path.join(self.processed_folder, "{}.pt".format(self.split)) + ) + self.target = self.target[self.subset] + self.classes_counts = make_classes_counts(self.target) + self.classes_to_labels, self.classes_size = load( + os.path.join(self.processed_folder, "meta.pt") + ) + self.classes_to_labels, self.classes_size = ( + self.classes_to_labels[self.subset], + self.classes_size[self.subset], + ) + + def __getitem__(self, index): + """Get the item with index.""" + img, target = Image.fromarray(self.img[index]), torch.tensor(self.target[index]) + inp = {"img": img, self.subset: target} + if self.transform is not None: + inp = self.transform(inp) + return inp["img"], inp["label"] + + def __len__(self): + """Length of the dataset.""" + return len(self.img) + + @property + def processed_folder(self): + """Return path of processed folder.""" + return os.path.join(self.root, "processed") + + @property + def raw_folder(self): + """Return path of raw folder.""" + return os.path.join(self.root, "raw") + + def process(self): + """Save the dataset accordingly.""" + if not check_exists(self.raw_folder): + self.download() + train_set, test_set, meta = self.make_data() + save(train_set, os.path.join(self.processed_folder, "train.pt")) + save(test_set, os.path.join(self.processed_folder, "test.pt")) + save(meta, os.path.join(self.processed_folder, "meta.pt")) + + def download(self): + """Download and save the dataset accordingly.""" + makedir_exist_ok(self.raw_folder) + for url, md5 in self.file: + filename = os.path.basename(url) + download_url(url, self.raw_folder, filename, md5) + extract_file(os.path.join(self.raw_folder, filename)) + + def __repr__(self): + """Represent CIFAR10 as string.""" + fmt_str = ( + f"Dataset {self.__class__.__name__}\nSize: {self.__len__()}\n" + f"Root: {self.root}\nSplit: {self.split}\nSubset: {self.subset}\n" + f"Transforms: {self.transform.__repr__()}" + ) + return fmt_str + + def make_data(self): + """Make data.""" + train_img = _read_image_file( + os.path.join(self.raw_folder, "train-images-idx3-ubyte") + ) + test_img = _read_image_file( + os.path.join(self.raw_folder, "t10k-images-idx3-ubyte") + ) + train_label = _read_label_file( + os.path.join(self.raw_folder, "train-labels-idx1-ubyte") + ) + test_label = _read_label_file( + os.path.join(self.raw_folder, "t10k-labels-idx1-ubyte") + ) + train_target, test_target = {"label": train_label}, {"label": test_label} + classes_to_labels = {"label": anytree.Node("U", index=[])} + classes = list(map(str, list(range(10)))) + for cls in classes: + make_tree(classes_to_labels["label"], [cls]) + classes_size = {"label": make_flat_index(classes_to_labels["label"])} + return ( + (train_img, train_target), + (test_img, test_target), + (classes_to_labels, classes_size), + ) + + +def _get_int(num): + return int(codecs.encode(num, "hex"), 16) + + +def _read_image_file(path): + with open(path, "rb") as file: + data = file.read() + assert _get_int(data[:4]) == 2051 + length = _get_int(data[4:8]) + num_rows = _get_int(data[8:12]) + num_cols = _get_int(data[12:16]) + parsed = np.frombuffer(data, dtype=np.uint8, offset=16).reshape( + (length, num_rows, num_cols) + ) + return parsed + + +def _read_label_file(path): + with open(path, "rb") as file: + data = file.read() + assert _get_int(data[:4]) == 2049 + length = _get_int(data[4:8]) + parsed = ( + np.frombuffer(data, dtype=np.uint8, offset=8) + .reshape(length) + .astype(np.int64) + ) + return parsed diff --git a/baselines/heterofl/heterofl/datasets/utils.py b/baselines/heterofl/heterofl/datasets/utils.py new file mode 100644 index 000000000000..6b71811ed50d --- /dev/null +++ b/baselines/heterofl/heterofl/datasets/utils.py @@ -0,0 +1,244 @@ +"""Contains utility functions required for datasests. + +Adopted from authors implementation. +""" +import glob +import gzip +import hashlib +import os +import tarfile +import zipfile +from collections import Counter + +import anytree +import numpy as np +from PIL import Image +from six.moves import urllib +from tqdm import tqdm + +from heterofl.utils import makedir_exist_ok + +IMG_EXTENSIONS = [".jpg", ".jpeg", ".png", ".ppm", ".bmp", ".pgm", ".tif"] + + +def find_classes(drctry): + """Find the classes in a directory.""" + classes = [d.name for d in os.scandir(drctry) if d.is_dir()] + classes.sort() + classes_to_labels = {classes[i]: i for i in range(len(classes))} + return classes_to_labels + + +def pil_loader(path): + """Load image from path using PIL.""" + with open(path, "rb") as file: + img = Image.open(file) + return img.convert("RGB") + + +# def accimage_loader(path): +# """Load image from path using accimage_loader.""" +# import accimage + +# try: +# return accimage.Image(path) +# except IOError: +# return pil_loader(path) + + +def default_loader(path): + """Load image from path using default loader.""" + # if get_image_backend() == "accimage": + # return accimage_loader(path) + + return pil_loader(path) + + +def has_file_allowed_extension(filename, extensions): + """Check whether file possesses any of the extensions listed.""" + filename_lower = filename.lower() + return any(filename_lower.endswith(ext) for ext in extensions) + + +def make_classes_counts(label): + """Count number of classes.""" + label = np.array(label) + if label.ndim > 1: + label = label.sum(axis=tuple(range(1, label.ndim))) + classes_counts = Counter(label) + return classes_counts + + +def _make_bar_updater(pbar): + def bar_update(count, block_size, total_size): + if pbar.total is None and total_size: + pbar.total = total_size + progress_bytes = count * block_size + pbar.update(progress_bytes - pbar.n) + + return bar_update + + +def _calculate_md5(path, chunk_size=1024 * 1024): + md5 = hashlib.md5() + with open(path, "rb") as file: + for chunk in iter(lambda: file.read(chunk_size), b""): + md5.update(chunk) + return md5.hexdigest() + + +def _check_md5(path, md5, **kwargs): + return md5 == _calculate_md5(path, **kwargs) + + +def _check_integrity(path, md5=None): + if not os.path.isfile(path): + return False + if md5 is None: + return True + return _check_md5(path, md5) + + +def download_url(url, root, filename, md5): + """Download files from the url.""" + path = os.path.join(root, filename) + makedir_exist_ok(root) + if os.path.isfile(path) and _check_integrity(path, md5): + print("Using downloaded and verified file: " + path) + else: + try: + print("Downloading " + url + " to " + path) + urllib.request.urlretrieve( + url, path, reporthook=_make_bar_updater(tqdm(unit="B", unit_scale=True)) + ) + except OSError: + if url[:5] == "https": + url = url.replace("https:", "http:") + print( + "Failed download. Trying https -> http instead." + " Downloading " + url + " to " + path + ) + urllib.request.urlretrieve( + url, + path, + reporthook=_make_bar_updater(tqdm(unit="B", unit_scale=True)), + ) + if not _check_integrity(path, md5): + raise RuntimeError("Not valid downloaded file") + + +def extract_file(src, dest=None, delete=False): + """Extract the file.""" + print("Extracting {}".format(src)) + dest = os.path.dirname(src) if dest is None else dest + filename = os.path.basename(src) + if filename.endswith(".zip"): + with zipfile.ZipFile(src, "r") as zip_f: + zip_f.extractall(dest) + elif filename.endswith(".tar"): + with tarfile.open(src) as tar_f: + tar_f.extractall(dest) + elif filename.endswith(".tar.gz") or filename.endswith(".tgz"): + with tarfile.open(src, "r:gz") as tar_f: + tar_f.extractall(dest) + elif filename.endswith(".gz"): + with open(src.replace(".gz", ""), "wb") as out_f, gzip.GzipFile(src) as zip_f: + out_f.write(zip_f.read()) + if delete: + os.remove(src) + + +def make_data(root, extensions): + """Get all the files in the root directory that follows the given extensions.""" + path = [] + files = glob.glob("{}/**/*".format(root), recursive=True) + for file in files: + if has_file_allowed_extension(file, extensions): + path.append(os.path.normpath(file)) + return path + + +# pylint: disable=dangerous-default-value +def make_img(path, classes_to_labels, extensions=IMG_EXTENSIONS): + """Make image.""" + img, label = [], [] + classes = [] + leaf_nodes = classes_to_labels.leaves + for node in leaf_nodes: + classes.append(node.name) + for cls in sorted(classes): + folder = os.path.join(path, cls) + if not os.path.isdir(folder): + continue + for root, _, filenames in sorted(os.walk(folder)): + for filename in sorted(filenames): + if has_file_allowed_extension(filename, extensions): + cur_path = os.path.join(root, filename) + img.append(cur_path) + label.append( + anytree.find_by_attr(classes_to_labels, cls).flat_index + ) + return img, label + + +def make_tree(root, name, attribute=None): + """Create a tree of name.""" + if len(name) == 0: + return + if attribute is None: + attribute = {} + this_name = name[0] + next_name = name[1:] + this_attribute = {k: attribute[k][0] for k in attribute} + next_attribute = {k: attribute[k][1:] for k in attribute} + this_node = anytree.find_by_attr(root, this_name) + this_index = root.index + [len(root.children)] + if this_node is None: + this_node = anytree.Node( + this_name, parent=root, index=this_index, **this_attribute + ) + make_tree(this_node, next_name, next_attribute) + return + + +def make_flat_index(root, given=None): + """Make flat index for each leaf node in the tree.""" + if given: + classes_size = 0 + for node in anytree.PreOrderIter(root): + if len(node.children) == 0: + node.flat_index = given.index(node.name) + classes_size = ( + given.index(node.name) + 1 + if given.index(node.name) + 1 > classes_size + else classes_size + ) + else: + classes_size = 0 + for node in anytree.PreOrderIter(root): + if len(node.children) == 0: + node.flat_index = classes_size + classes_size += 1 + return classes_size + + +class Compose: + """Custom Compose class.""" + + def __init__(self, transforms): + self.transforms = transforms + + def __call__(self, inp): + """Apply transforms when called.""" + for transform in self.transforms: + inp["img"] = transform(inp["img"]) + return inp + + def __repr__(self): + """Represent Compose as string.""" + format_string = self.__class__.__name__ + "(" + for transform in self.transforms: + format_string += "\n" + format_string += " {0}".format(transform) + format_string += "\n)" + return format_string diff --git a/baselines/heterofl/heterofl/main.py b/baselines/heterofl/heterofl/main.py new file mode 100644 index 000000000000..3973841cb60e --- /dev/null +++ b/baselines/heterofl/heterofl/main.py @@ -0,0 +1,204 @@ +"""Runs federated learning for given configuration in base.yaml.""" +import pickle +from pathlib import Path + +import flwr as fl +import hydra +import torch +from hydra.core.hydra_config import HydraConfig +from hydra.utils import instantiate +from omegaconf import DictConfig, OmegaConf + +from heterofl import client, models, server +from heterofl.client_manager_heterofl import ClientManagerHeteroFL +from heterofl.dataset import load_datasets +from heterofl.model_properties import get_model_properties +from heterofl.utils import ModelRateManager, get_global_model_rate, preprocess_input + + +# pylint: disable=too-many-locals,protected-access +@hydra.main(config_path="conf", config_name="base.yaml", version_base=None) +def main(cfg: DictConfig) -> None: + """Run the baseline. + + Parameters + ---------- + cfg : DictConfig + An omegaconf object that stores the hydra config. + """ + # print config structured as YAML + print(OmegaConf.to_yaml(cfg)) + torch.manual_seed(cfg.seed) + + data_loaders = {} + + ( + data_loaders["entire_trainloader"], + data_loaders["trainloaders"], + data_loaders["label_split"], + data_loaders["valloaders"], + data_loaders["testloader"], + ) = load_datasets( + "heterofl" if "heterofl" in cfg.strategy._target_ else "fedavg", + config=cfg.dataset, + num_clients=cfg.num_clients, + seed=cfg.seed, + ) + + model_config = preprocess_input(cfg.model, cfg.dataset) + + model_split_rate = None + model_mode = None + client_to_model_rate_mapping = None + model_rate_manager = None + history = None + + if "HeteroFL" in cfg.strategy._target_: + # send this array(client_model_rate_mapping) as + # an argument to client_manager and client + model_split_rate = {"a": 1, "b": 0.5, "c": 0.25, "d": 0.125, "e": 0.0625} + # model_split_mode = cfg.control.model_split_mode + model_mode = cfg.control.model_mode + + client_to_model_rate_mapping = [float(0) for _ in range(cfg.num_clients)] + model_rate_manager = ModelRateManager( + cfg.control.model_split_mode, model_split_rate, model_mode + ) + + model_config["global_model_rate"] = model_split_rate[ + get_global_model_rate(model_mode) + ] + + test_model = models.create_model( + model_config, + model_rate=model_split_rate[get_global_model_rate(model_mode)] + if model_split_rate is not None + else None, + track=True, + device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu"), + ) + + get_model_properties( + model_config, + model_split_rate, + model_mode + "" if model_mode is not None else None, + data_loaders["entire_trainloader"], + cfg.dataset.batch_size.train, + ) + + # prepare function that will be used to spawn each client + client_train_settings = { + "epochs": cfg.num_epochs, + "optimizer": cfg.optim_scheduler.optimizer, + "lr": cfg.optim_scheduler.lr, + "momentum": cfg.optim_scheduler.momentum, + "weight_decay": cfg.optim_scheduler.weight_decay, + "scheduler": cfg.optim_scheduler.scheduler, + "milestones": cfg.optim_scheduler.milestones, + } + + if "clip" in cfg: + client_train_settings["clip"] = cfg.clip + + optim_scheduler_settings = { + "optimizer": cfg.optim_scheduler.optimizer, + "lr": cfg.optim_scheduler.lr, + "momentum": cfg.optim_scheduler.momentum, + "weight_decay": cfg.optim_scheduler.weight_decay, + "scheduler": cfg.optim_scheduler.scheduler, + "milestones": cfg.optim_scheduler.milestones, + } + + client_fn = client.gen_client_fn( + model_config=model_config, + client_to_model_rate_mapping=client_to_model_rate_mapping, + client_train_settings=client_train_settings, + data_loaders=data_loaders, + ) + + evaluate_fn = server.gen_evaluate_fn( + data_loaders, + torch.device("cuda:0" if torch.cuda.is_available() else "cpu"), + test_model, + models.create_model( + model_config, + model_rate=model_split_rate[get_global_model_rate(model_mode)] + if model_split_rate is not None + else None, + track=False, + device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu"), + ) + .state_dict() + .keys(), + enable_train_on_train_data=cfg.enable_train_on_train_data_while_testing + if "enable_train_on_train_data_while_testing" in cfg + else True, + ) + client_resources = { + "num_cpus": cfg.client_resources.num_cpus, + "num_gpus": cfg.client_resources.num_gpus if torch.cuda.is_available() else 0, + } + + if "HeteroFL" in cfg.strategy._target_: + strategy_heterofl = instantiate( + cfg.strategy, + model_name=cfg.model.model_name, + net=models.create_model( + model_config, + model_rate=model_split_rate[get_global_model_rate(model_mode)] + if model_split_rate is not None + else None, + device="cpu", + ), + optim_scheduler_settings=optim_scheduler_settings, + global_model_rate=model_split_rate[get_global_model_rate(model_mode)] + if model_split_rate is not None + else 1.0, + evaluate_fn=evaluate_fn, + min_available_clients=cfg.num_clients, + ) + + history = fl.simulation.start_simulation( + client_fn=client_fn, + num_clients=cfg.num_clients, + config=fl.server.ServerConfig(num_rounds=cfg.num_rounds), + client_resources=client_resources, + client_manager=ClientManagerHeteroFL( + model_rate_manager, + client_to_model_rate_mapping, + client_label_split=data_loaders["label_split"], + ), + strategy=strategy_heterofl, + ) + else: + strategy_fedavg = instantiate( + cfg.strategy, + # on_fit_config_fn=lambda server_round: { + # "lr": cfg.optim_scheduler.lr + # * pow(cfg.optim_scheduler.lr_decay_rate, server_round) + # }, + evaluate_fn=evaluate_fn, + min_available_clients=cfg.num_clients, + ) + + history = fl.simulation.start_simulation( + client_fn=client_fn, + num_clients=cfg.num_clients, + config=fl.server.ServerConfig(num_rounds=cfg.num_rounds), + client_resources=client_resources, + strategy=strategy_fedavg, + ) + + # save the results + save_path = HydraConfig.get().runtime.output_dir + + # save the results as a python pickle + with open(str(Path(save_path) / "results.pkl"), "wb") as file_handle: + pickle.dump({"history": history}, file_handle, protocol=pickle.HIGHEST_PROTOCOL) + + # save the model + torch.save(test_model.state_dict(), str(Path(save_path) / "model.pth")) + + +if __name__ == "__main__": + main() diff --git a/baselines/heterofl/heterofl/model_properties.py b/baselines/heterofl/heterofl/model_properties.py new file mode 100644 index 000000000000..0739fe4fde22 --- /dev/null +++ b/baselines/heterofl/heterofl/model_properties.py @@ -0,0 +1,123 @@ +"""Determine number of model parameters, space it requires.""" +import numpy as np +import torch +import torch.nn as nn + +from heterofl.models import create_model + + +def get_model_properties( + model_config, model_split_rate, model_mode, data_loader, batch_size +): + """Calculate space occupied & number of parameters of model.""" + model_mode = model_mode.split("-") if model_mode is not None else None + # model = create_model(model_config, model_rate=model_split_rate(i[0])) + + total_flops = 0 + total_model_parameters = 0 + ttl_prcntg = 0 + if model_mode is None: + total_flops = _calculate_model_memory(create_model(model_config), data_loader) + total_model_parameters = _count_parameters(create_model(model_config)) + else: + for i in model_mode: + total_flops += _calculate_model_memory( + create_model(model_config, model_rate=model_split_rate[i[0]]), + data_loader, + ) * int(i[1]) + total_model_parameters += _count_parameters( + create_model(model_config, model_rate=model_split_rate[i[0]]) + ) * int(i[1]) + ttl_prcntg += int(i[1]) + + total_flops = total_flops / ttl_prcntg if ttl_prcntg != 0 else total_flops + total_flops /= batch_size + total_model_parameters = ( + total_model_parameters / ttl_prcntg + if ttl_prcntg != 0 + else total_model_parameters + ) + + space = total_model_parameters * 32.0 / 8 / (1024**2.0) + print("num_of_parameters = ", total_model_parameters / 1000, " K") + print("total_flops = ", total_flops / 1000000, " M") + print("space = ", space) + + return total_model_parameters, total_flops, space + + +def _calculate_model_memory(model, data_loader): + def register_hook(module): + def hook(module, inp, output): + # temp = _make_flops(module, inp, output) + # print(temp) + for _ in module.named_parameters(): + flops.append(_make_flops(module, inp, output)) + + if ( + not isinstance(module, nn.Sequential) + and not isinstance(module, nn.ModuleList) + and not isinstance(module, nn.ModuleDict) + and module != model + ): + hooks.append(module.register_forward_hook(hook)) + + hooks = [] + flops = [] + model.apply(register_hook) + + one_dl = next(iter(data_loader)) + input_dict = {"img": one_dl[0], "label": one_dl[1]} + with torch.no_grad(): + model(input_dict) + + for hook in hooks: + hook.remove() + + return sum(fl for fl in flops) + + +def _count_parameters(model): + return sum(p.numel() for p in model.parameters() if p.requires_grad) + + +def _make_flops(module, inp, output): + if isinstance(inp, tuple): + return _make_flops(module, inp[0], output) + if isinstance(output, tuple): + return _make_flops(module, inp, output[0]) + flops = _compute_flops(module, inp, output) + return flops + + +def _compute_flops(module, inp, out): + flops = 0 + if isinstance(module, nn.Conv2d): + flops = _compute_conv2d_flops(module, inp, out) + elif isinstance(module, (nn.BatchNorm2d, nn.InstanceNorm2d)): + flops = np.prod(inp.shape).item() + if isinstance(module, (nn.BatchNorm2d, nn.InstanceNorm2d)) and module.affine: + flops *= 2 + elif isinstance(module, nn.Linear): + flops = np.prod(inp.size()[:-1]).item() * inp.size()[-1] * out.size()[-1] + # else: + # print(f"[Flops]: {type(module).__name__} is not supported!") + return flops + + +def _compute_conv2d_flops(module, inp, out): + batch_size = inp.size()[0] + in_c = inp.size()[1] + out_c, out_h, out_w = out.size()[1:] + groups = module.groups + filters_per_channel = out_c // groups + conv_per_position_flops = ( + module.kernel_size[0] * module.kernel_size[1] * in_c * filters_per_channel + ) + active_elements_count = batch_size * out_h * out_w + total_conv_flops = conv_per_position_flops * active_elements_count + bias_flops = 0 + if module.bias is not None: + bias_flops = out_c * active_elements_count + total_flops = total_conv_flops + bias_flops + return total_flops diff --git a/baselines/heterofl/heterofl/models.py b/baselines/heterofl/heterofl/models.py new file mode 100644 index 000000000000..9426ee8b2789 --- /dev/null +++ b/baselines/heterofl/heterofl/models.py @@ -0,0 +1,839 @@ +"""Conv & resnet18 model architecture, training, testing functions. + +Classes Conv, Block, Resnet18 are adopted from authors implementation. +""" +import copy +from typing import List, OrderedDict + +import numpy as np +import torch +import torch.nn.functional as F +from flwr.common import parameters_to_ndarrays +from torch import nn + +from heterofl.utils import make_optimizer + + +class Conv(nn.Module): + """Convolutional Neural Network architecture with sBN.""" + + def __init__( + self, + model_config, + ): + super().__init__() + self.model_config = model_config + + blocks = [ + nn.Conv2d( + model_config["data_shape"][0], model_config["hidden_size"][0], 3, 1, 1 + ), + self._get_scale(), + self._get_norm(0), + nn.ReLU(inplace=True), + nn.MaxPool2d(2), + ] + for i in range(len(model_config["hidden_size"]) - 1): + blocks.extend( + [ + nn.Conv2d( + model_config["hidden_size"][i], + model_config["hidden_size"][i + 1], + 3, + 1, + 1, + ), + self._get_scale(), + self._get_norm(i + 1), + nn.ReLU(inplace=True), + nn.MaxPool2d(2), + ] + ) + blocks = blocks[:-1] + blocks.extend( + [ + nn.AdaptiveAvgPool2d(1), + nn.Flatten(), + nn.Linear( + model_config["hidden_size"][-1], model_config["classes_size"] + ), + ] + ) + self.blocks = nn.Sequential(*blocks) + + def _get_norm(self, j: int): + """Return the relavant norm.""" + if self.model_config["norm"] == "bn": + norm = nn.BatchNorm2d( + self.model_config["hidden_size"][j], + momentum=None, + track_running_stats=self.model_config["track"], + ) + elif self.model_config["norm"] == "in": + norm = nn.GroupNorm( + self.model_config["hidden_size"][j], self.model_config["hidden_size"][j] + ) + elif self.model_config["norm"] == "ln": + norm = nn.GroupNorm(1, self.model_config["hidden_size"][j]) + elif self.model_config["norm"] == "gn": + norm = nn.GroupNorm(4, self.model_config["hidden_size"][j]) + elif self.model_config["norm"] == "none": + norm = nn.Identity() + else: + raise ValueError("Not valid norm") + + return norm + + def _get_scale(self): + """Return the relavant scaler.""" + if self.model_config["scale"]: + scaler = _Scaler(self.model_config["rate"]) + else: + scaler = nn.Identity() + return scaler + + def forward(self, input_dict): + """Forward pass of the Conv. + + Parameters + ---------- + input_dict : Dict + Conatins input Tensor that will pass through the network. + label of that input to calculate loss. + label_split if masking is required. + + Returns + ------- + Dict + The resulting Tensor after it has passed through the network and the loss. + """ + # output = {"loss": torch.tensor(0, device=self.device, dtype=torch.float32)} + output = {} + out = self.blocks(input_dict["img"]) + if "label_split" in input_dict and self.model_config["mask"]: + label_mask = torch.zeros( + self.model_config["classes_size"], device=out.device + ) + label_mask[input_dict["label_split"]] = 1 + out = out.masked_fill(label_mask == 0, 0) + output["score"] = out + output["loss"] = F.cross_entropy(out, input_dict["label"], reduction="mean") + return output + + +def conv( + model_rate, + model_config, + device="cpu", +): + """Create the Conv model.""" + model_config["hidden_size"] = [ + int(np.ceil(model_rate * x)) for x in model_config["hidden_layers"] + ] + scaler_rate = model_rate / model_config["global_model_rate"] + model_config["rate"] = scaler_rate + model = Conv(model_config) + model.apply(_init_param) + return model.to(device) + + +class Block(nn.Module): + """Block.""" + + expansion = 1 + + def __init__(self, in_planes, planes, stride, model_config): + super().__init__() + if model_config["norm"] == "bn": + n_1 = nn.BatchNorm2d( + in_planes, momentum=None, track_running_stats=model_config["track"] + ) + n_2 = nn.BatchNorm2d( + planes, momentum=None, track_running_stats=model_config["track"] + ) + elif model_config["norm"] == "in": + n_1 = nn.GroupNorm(in_planes, in_planes) + n_2 = nn.GroupNorm(planes, planes) + elif model_config["norm"] == "ln": + n_1 = nn.GroupNorm(1, in_planes) + n_2 = nn.GroupNorm(1, planes) + elif model_config["norm"] == "gn": + n_1 = nn.GroupNorm(4, in_planes) + n_2 = nn.GroupNorm(4, planes) + elif model_config["norm"] == "none": + n_1 = nn.Identity() + n_2 = nn.Identity() + else: + raise ValueError("Not valid norm") + self.n_1 = n_1 + self.conv1 = nn.Conv2d( + in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False + ) + self.n_2 = n_2 + self.conv2 = nn.Conv2d( + planes, planes, kernel_size=3, stride=1, padding=1, bias=False + ) + if model_config["scale"]: + self.scaler = _Scaler(model_config["rate"]) + else: + self.scaler = nn.Identity() + + if stride != 1 or in_planes != self.expansion * planes: + self.shortcut = nn.Conv2d( + in_planes, + self.expansion * planes, + kernel_size=1, + stride=stride, + bias=False, + ) + + def forward(self, x): + """Forward pass of the Block. + + Parameters + ---------- + x : Dict + Dict that contains Input Tensor that will pass through the network. + label of that input to calculate loss. + label_split if masking is required. + + Returns + ------- + Dict + The resulting Tensor after it has passed through the network and the loss. + """ + out = F.relu(self.n_1(self.scaler(x))) + shortcut = self.shortcut(out) if hasattr(self, "shortcut") else x + out = self.conv1(out) + out = self.conv2(F.relu(self.n_2(self.scaler(out)))) + out += shortcut + return out + + +# pylint: disable=too-many-instance-attributes +class ResNet(nn.Module): + """Implementation of a Residual Neural Network (ResNet) model with sBN.""" + + def __init__( + self, + model_config, + block, + num_blocks, + ): + self.model_config = model_config + super().__init__() + self.in_planes = model_config["hidden_size"][0] + self.conv1 = nn.Conv2d( + model_config["data_shape"][0], + model_config["hidden_size"][0], + kernel_size=3, + stride=1, + padding=1, + bias=False, + ) + + self.layer1 = self._make_layer( + block, + model_config["hidden_size"][0], + num_blocks[0], + stride=1, + ) + self.layer2 = self._make_layer( + block, + model_config["hidden_size"][1], + num_blocks[1], + stride=2, + ) + self.layer3 = self._make_layer( + block, + model_config["hidden_size"][2], + num_blocks[2], + stride=2, + ) + self.layer4 = self._make_layer( + block, + model_config["hidden_size"][3], + num_blocks[3], + stride=2, + ) + + # self.layers = [layer1, layer2, layer3, layer4] + + if model_config["norm"] == "bn": + n_4 = nn.BatchNorm2d( + model_config["hidden_size"][3] * block.expansion, + momentum=None, + track_running_stats=model_config["track"], + ) + elif model_config["norm"] == "in": + n_4 = nn.GroupNorm( + model_config["hidden_size"][3] * block.expansion, + model_config["hidden_size"][3] * block.expansion, + ) + elif model_config["norm"] == "ln": + n_4 = nn.GroupNorm(1, model_config["hidden_size"][3] * block.expansion) + elif model_config["norm"] == "gn": + n_4 = nn.GroupNorm(4, model_config["hidden_size"][3] * block.expansion) + elif model_config["norm"] == "none": + n_4 = nn.Identity() + else: + raise ValueError("Not valid norm") + self.n_4 = n_4 + if model_config["scale"]: + self.scaler = _Scaler(model_config["rate"]) + else: + self.scaler = nn.Identity() + self.linear = nn.Linear( + model_config["hidden_size"][3] * block.expansion, + model_config["classes_size"], + ) + + def _make_layer(self, block, planes, num_blocks, stride): + strides = [stride] + [1] * (num_blocks - 1) + layers = [] + for strd in strides: + layers.append(block(self.in_planes, planes, strd, self.model_config.copy())) + self.in_planes = planes * block.expansion + return nn.Sequential(*layers) + + def forward(self, input_dict): + """Forward pass of the ResNet. + + Parameters + ---------- + input_dict : Dict + Dict that contains Input Tensor that will pass through the network. + label of that input to calculate loss. + label_split if masking is required. + + Returns + ------- + Dict + The resulting Tensor after it has passed through the network and the loss. + """ + output = {} + x = input_dict["img"] + out = self.conv1(x) + # for layer in self.layers: + # out = layer(out) + out = self.layer1(out) + out = self.layer2(out) + out = self.layer3(out) + out = self.layer4(out) + out = F.relu(self.n_4(self.scaler(out))) + out = F.adaptive_avg_pool2d(out, 1) + out = out.view(out.size(0), -1) + out = self.linear(out) + if "label_split" in input_dict and self.model_config["mask"]: + label_mask = torch.zeros( + self.model_config["classes_size"], device=out.device + ) + label_mask[input_dict["label_split"]] = 1 + out = out.masked_fill(label_mask == 0, 0) + output["score"] = out + output["loss"] = F.cross_entropy(output["score"], input_dict["label"]) + return output + + +def resnet18( + model_rate, + model_config, + device="cpu", +): + """Create the ResNet18 model.""" + model_config["hidden_size"] = [ + int(np.ceil(model_rate * x)) for x in model_config["hidden_layers"] + ] + scaler_rate = model_rate / model_config["global_model_rate"] + model_config["rate"] = scaler_rate + model = ResNet(model_config, block=Block, num_blocks=[1, 1, 1, 2]) + model.apply(_init_param) + return model.to(device) + + +class MLP(nn.Module): + """Multi Layer Perceptron.""" + + def __init__(self): + super().__init__() + self.layer_input = nn.Linear(784, 512) + self.relu = nn.ReLU() + self.dropout = nn.Dropout() + self.layer_hidden1 = nn.Linear(512, 256) + self.layer_hidden2 = nn.Linear(256, 256) + self.layer_hidden3 = nn.Linear(256, 128) + self.layer_out = nn.Linear(128, 10) + self.softmax = nn.Softmax(dim=1) + self.weight_keys = [ + ["layer_input.weight", "layer_input.bias"], + ["layer_hidden1.weight", "layer_hidden1.bias"], + ["layer_hidden2.weight", "layer_hidden2.bias"], + ["layer_hidden3.weight", "layer_hidden3.bias"], + ["layer_out.weight", "layer_out.bias"], + ] + + def forward(self, input_dict): + """Forward pass of the Conv. + + Parameters + ---------- + input_dict : Dict + Conatins input Tensor that will pass through the network. + label of that input to calculate loss. + label_split if masking is required. + + Returns + ------- + Dict + The resulting Tensor after it has passed through the network and the loss. + """ + output = {} + x = input_dict["img"] + x = x.view(-1, x.shape[1] * x.shape[-2] * x.shape[-1]) + x = self.layer_input(x) + x = self.relu(x) + + x = self.layer_hidden1(x) + x = self.relu(x) + + x = self.layer_hidden2(x) + x = self.relu(x) + + x = self.layer_hidden3(x) + x = self.relu(x) + + x = self.layer_out(x) + out = self.softmax(x) + output["score"] = out + output["loss"] = F.cross_entropy(out, input_dict["label"], reduction="mean") + return output + + +class CNNCifar(nn.Module): + """Convolutional Neural Network architecture for cifar dataset.""" + + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(3, 6, 5) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.fc2 = nn.Linear(120, 100) + self.fc3 = nn.Linear(100, 10) + + self.weight_keys = [ + ["fc1.weight", "fc1.bias"], + ["fc2.weight", "fc2.bias"], + ["fc3.weight", "fc3.bias"], + ["conv2.weight", "conv2.bias"], + ["conv1.weight", "conv1.bias"], + ] + + def forward(self, input_dict): + """Forward pass of the Conv. + + Parameters + ---------- + input_dict : Dict + Conatins input Tensor that will pass through the network. + label of that input to calculate loss. + label_split if masking is required. + + Returns + ------- + Dict + The resulting Tensor after it has passed through the network and the loss. + """ + output = {} + x = input_dict["img"] + x = self.pool(F.relu(self.conv1(x))) + x = self.pool(F.relu(self.conv2(x))) + x = x.view(-1, 16 * 5 * 5) + x = F.relu(self.fc1(x)) + x = F.relu(self.fc2(x)) + x = self.fc3(x) + out = F.log_softmax(x, dim=1) + output["score"] = out + output["loss"] = F.cross_entropy(out, input_dict["label"], reduction="mean") + return output + + +def create_model(model_config, model_rate=None, track=False, device="cpu"): + """Create the model based on the configuration given in hydra.""" + model = None + model_config = model_config.copy() + model_config["track"] = track + + if model_config["model"] == "MLP": + model = MLP() + model.to(device) + elif model_config["model"] == "CNNCifar": + model = CNNCifar() + model.to(device) + elif model_config["model"] == "conv": + model = conv(model_rate=model_rate, model_config=model_config, device=device) + elif model_config["model"] == "resnet18": + model = resnet18( + model_rate=model_rate, model_config=model_config, device=device + ) + return model + + +def _init_param(m_param): + if isinstance(m_param, (nn.BatchNorm2d, nn.InstanceNorm2d)): + m_param.weight.data.fill_(1) + m_param.bias.data.zero_() + elif isinstance(m_param, nn.Linear): + m_param.bias.data.zero_() + return m_param + + +class _Scaler(nn.Module): + def __init__(self, rate): + super().__init__() + self.rate = rate + + def forward(self, inp): + """Forward of Scalar nn.Module.""" + output = inp / self.rate if self.training else inp + return output + + +def get_parameters(net) -> List[np.ndarray]: + """Return the parameters of model as numpy.NDArrays.""" + return [val.cpu().numpy() for _, val in net.state_dict().items()] + + +def set_parameters(net, parameters: List[np.ndarray]): + """Set the model parameters with given parameters.""" + params_dict = zip(net.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict}) + net.load_state_dict(state_dict, strict=True) + + +def train(model, train_loader, label_split, settings): + """Train a model with given settings. + + Parameters + ---------- + model : nn.Module + The neural network to train. + train_loader : DataLoader + The DataLoader containing the data to train the network on. + label_split : torch.tensor + Tensor containing the labels of the data. + settings: Dict + Dictionary conatining the information about eopchs, optimizer, + lr, momentum, weight_decay, device to train on. + """ + # criterion = torch.nn.CrossEntropyLoss() + optimizer = make_optimizer( + settings["optimizer"], + model.parameters(), + learning_rate=settings["lr"], + momentum=settings["momentum"], + weight_decay=settings["weight_decay"], + ) + + model.train() + for _ in range(settings["epochs"]): + for images, labels in train_loader: + input_dict = {} + input_dict["img"] = images.to(settings["device"]) + input_dict["label"] = labels.to(settings["device"]) + input_dict["label_split"] = label_split.type(torch.int).to( + settings["device"] + ) + optimizer.zero_grad() + output = model(input_dict) + output["loss"].backward() + if ("clip" not in settings) or ( + "clip" in settings and settings["clip"] is True + ): + torch.nn.utils.clip_grad_norm_(model.parameters(), 1) + optimizer.step() + + +def test(model, test_loader, label_split=None, device="cpu"): + """Evaluate the network on the test set. + + Parameters + ---------- + model : nn.Module + The neural network to test. + test_loader : DataLoader + The DataLoader containing the data to test the network on. + device : torch.device + The device on which the model should be tested, either 'cpu' or 'cuda'. + + Returns + ------- + Tuple[float, float] + The loss and the accuracy of the input model on the given data. + """ + model.eval() + size = len(test_loader.dataset) + num_batches = len(test_loader) + test_loss, correct = 0, 0 + + with torch.no_grad(): + model.train(False) + for images, labels in test_loader: + input_dict = {} + input_dict["img"] = images.to(device) + input_dict["label"] = labels.to(device) + if label_split is not None: + input_dict["label_split"] = label_split.type(torch.int).to(device) + output = model(input_dict) + test_loss += output["loss"].item() + correct += ( + (output["score"].argmax(1) == input_dict["label"]) + .type(torch.float) + .sum() + .item() + ) + + test_loss /= num_batches + correct /= size + return test_loss, correct + + +def param_model_rate_mapping( + model_name, parameters, clients_model_rate, global_model_rate=1 +): + """Map the model rate to subset of global parameters(as list of indices). + + Parameters + ---------- + model_name : str + The name of the neural network of global model. + parameters : Dict + state_dict of the global model. + client_model_rate : List[float] + List of model rates of active clients. + global_model_rate: float + Model rate of the global model. + + Returns + ------- + Dict + model rate to parameters indices relative to global model mapping. + """ + unique_client_model_rate = list(set(clients_model_rate)) + print(unique_client_model_rate) + + if "conv" in model_name: + idx = _mr_to_param_idx_conv( + parameters, unique_client_model_rate, global_model_rate + ) + elif "resnet" in model_name: + idx = _mr_to_param_idx_resnet18( + parameters, unique_client_model_rate, global_model_rate + ) + else: + raise ValueError("Not valid model name") + + # add model rate as key to the params calculated + param_idx_model_rate_mapping = OrderedDict() + for i, _ in enumerate(unique_client_model_rate): + param_idx_model_rate_mapping[unique_client_model_rate[i]] = idx[i] + + return param_idx_model_rate_mapping + + +def _mr_to_param_idx_conv(parameters, unique_client_model_rate, global_model_rate): + idx_i = [None for _ in range(len(unique_client_model_rate))] + idx = [OrderedDict() for _ in range(len(unique_client_model_rate))] + output_weight_name = [k for k in parameters.keys() if "weight" in k][-1] + output_bias_name = [k for k in parameters.keys() if "bias" in k][-1] + for k, val in parameters.items(): + parameter_type = k.split(".")[-1] + for index, _ in enumerate(unique_client_model_rate): + if "weight" in parameter_type or "bias" in parameter_type: + scaler_rate = unique_client_model_rate[index] / global_model_rate + _get_key_k_idx_conv( + idx, + idx_i, + { + "index": index, + "parameter_type": parameter_type, + "k": k, + "val": val, + }, + output_names={ + "output_weight_name": output_weight_name, + "output_bias_name": output_bias_name, + }, + scaler_rate=scaler_rate, + ) + else: + pass + return idx + + +def _get_key_k_idx_conv( + idx, + idx_i, + param_info, + output_names, + scaler_rate, +): + if param_info["parameter_type"] == "weight": + if param_info["val"].dim() > 1: + input_size = param_info["val"].size(1) + output_size = param_info["val"].size(0) + if idx_i[param_info["index"]] is None: + idx_i[param_info["index"]] = torch.arange( + input_size, device=param_info["val"].device + ) + input_idx_i_m = idx_i[param_info["index"]] + if param_info["k"] == output_names["output_weight_name"]: + output_idx_i_m = torch.arange( + output_size, device=param_info["val"].device + ) + else: + local_output_size = int(np.ceil(output_size * (scaler_rate))) + output_idx_i_m = torch.arange( + output_size, device=param_info["val"].device + )[:local_output_size] + idx[param_info["index"]][param_info["k"]] = output_idx_i_m, input_idx_i_m + idx_i[param_info["index"]] = output_idx_i_m + else: + input_idx_i_m = idx_i[param_info["index"]] + idx[param_info["index"]][param_info["k"]] = input_idx_i_m + else: + if param_info["k"] == output_names["output_bias_name"]: + input_idx_i_m = idx_i[param_info["index"]] + idx[param_info["index"]][param_info["k"]] = input_idx_i_m + else: + input_idx_i_m = idx_i[param_info["index"]] + idx[param_info["index"]][param_info["k"]] = input_idx_i_m + + +def _mr_to_param_idx_resnet18(parameters, unique_client_model_rate, global_model_rate): + idx_i = [None for _ in range(len(unique_client_model_rate))] + idx = [OrderedDict() for _ in range(len(unique_client_model_rate))] + for k, val in parameters.items(): + parameter_type = k.split(".")[-1] + for index, _ in enumerate(unique_client_model_rate): + if "weight" in parameter_type or "bias" in parameter_type: + scaler_rate = unique_client_model_rate[index] / global_model_rate + _get_key_k_idx_resnet18( + idx, + idx_i, + { + "index": index, + "parameter_type": parameter_type, + "k": k, + "val": val, + }, + scaler_rate=scaler_rate, + ) + else: + pass + return idx + + +def _get_key_k_idx_resnet18( + idx, + idx_i, + param_info, + scaler_rate, +): + if param_info["parameter_type"] == "weight": + if param_info["val"].dim() > 1: + input_size = param_info["val"].size(1) + output_size = param_info["val"].size(0) + if "conv1" in param_info["k"] or "conv2" in param_info["k"]: + if idx_i[param_info["index"]] is None: + idx_i[param_info["index"]] = torch.arange( + input_size, device=param_info["val"].device + ) + input_idx_i_m = idx_i[param_info["index"]] + local_output_size = int(np.ceil(output_size * (scaler_rate))) + output_idx_i_m = torch.arange( + output_size, device=param_info["val"].device + )[:local_output_size] + idx_i[param_info["index"]] = output_idx_i_m + elif "shortcut" in param_info["k"]: + input_idx_i_m = idx[param_info["index"]][ + param_info["k"].replace("shortcut", "conv1") + ][1] + output_idx_i_m = idx_i[param_info["index"]] + elif "linear" in param_info["k"]: + input_idx_i_m = idx_i[param_info["index"]] + output_idx_i_m = torch.arange( + output_size, device=param_info["val"].device + ) + else: + raise ValueError("Not valid k") + idx[param_info["index"]][param_info["k"]] = (output_idx_i_m, input_idx_i_m) + else: + input_idx_i_m = idx_i[param_info["index"]] + idx[param_info["index"]][param_info["k"]] = input_idx_i_m + else: + input_size = param_info["val"].size(0) + if "linear" in param_info["k"]: + input_idx_i_m = torch.arange(input_size, device=param_info["val"].device) + idx[param_info["index"]][param_info["k"]] = input_idx_i_m + else: + input_idx_i_m = idx_i[param_info["index"]] + idx[param_info["index"]][param_info["k"]] = input_idx_i_m + + +def param_idx_to_local_params(global_parameters, client_param_idx): + """Get the local parameters from the list of param indices. + + Parameters + ---------- + global_parameters : Dict + The state_dict of global model. + client_param_idx : List + Local parameters indices with respect to global model. + + Returns + ------- + Dict + state dict of local model. + """ + local_parameters = OrderedDict() + for k, val in global_parameters.items(): + parameter_type = k.split(".")[-1] + if "weight" in parameter_type or "bias" in parameter_type: + if "weight" in parameter_type: + if val.dim() > 1: + local_parameters[k] = copy.deepcopy( + val[torch.meshgrid(client_param_idx[k])] + ) + else: + local_parameters[k] = copy.deepcopy(val[client_param_idx[k]]) + else: + local_parameters[k] = copy.deepcopy(val[client_param_idx[k]]) + else: + local_parameters[k] = copy.deepcopy(val) + return local_parameters + + +def get_state_dict_from_param(model, parameters): + """Get the state dict from model & parameters as np.NDarrays. + + Parameters + ---------- + model : nn.Module + The neural network. + parameters : np.NDarray + Parameters of the model as np.NDarrays. + + Returns + ------- + Dict + state dict of model. + """ + # Load the parameters into the model + for param_tensor, param_ndarray in zip( + model.state_dict(), parameters_to_ndarrays(parameters) + ): + model.state_dict()[param_tensor].copy_(torch.from_numpy(param_ndarray)) + # Step 3: Obtain the state_dict of the model + state_dict = model.state_dict() + return state_dict diff --git a/baselines/heterofl/heterofl/server.py b/baselines/heterofl/heterofl/server.py new file mode 100644 index 000000000000..f82db0a59fff --- /dev/null +++ b/baselines/heterofl/heterofl/server.py @@ -0,0 +1,101 @@ +"""Flower Server.""" +import time +from collections import OrderedDict +from typing import Callable, Dict, Optional, Tuple + +import torch +from flwr.common.typing import NDArrays, Scalar +from torch import nn + +from heterofl.models import test +from heterofl.utils import save_model + + +def gen_evaluate_fn( + data_loaders, + device: torch.device, + model: nn.Module, + keys, + enable_train_on_train_data: bool, +) -> Callable[ + [int, NDArrays, Dict[str, Scalar]], Optional[Tuple[float, Dict[str, Scalar]]] +]: + """Generate the function for centralized evaluation. + + Parameters + ---------- + data_loaders : + A dictionary containing dataloaders for testing and + label split of each client. + device : torch.device + The device to test the model on. + model : + Model for testing. + keys : + keys of the model that it is trained on. + + Returns + ------- + Callable[ [int, NDArrays, Dict[str, Scalar]], + Optional[Tuple[float, Dict[str, Scalar]]] ] + The centralized evaluation function. + """ + intermediate_keys = keys + + def evaluate( + server_round: int, parameters_ndarrays: NDArrays, config: Dict[str, Scalar] + ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + # pylint: disable=unused-argument + """Use the entire test set for evaluation.""" + # if server_round % 5 != 0 and server_round < 395: + # return 1, {} + + net = model + params_dict = zip(intermediate_keys, parameters_ndarrays) + state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict}) + net.load_state_dict(state_dict, strict=False) + net.to(device) + + if server_round % 100 == 0: + save_model(net, f"model_after_round_{server_round}.pth") + + if enable_train_on_train_data is True: + print("start of testing") + start_time = time.time() + with torch.no_grad(): + net.train(True) + for images, labels in data_loaders["entire_trainloader"]: + input_dict = {} + input_dict["img"] = images.to(device) + input_dict["label"] = labels.to(device) + net(input_dict) + print(f"end of stat, time taken = {time.time() - start_time}") + + local_metrics = {} + local_metrics["loss"] = 0 + local_metrics["accuracy"] = 0 + for i, clnt_tstldr in enumerate(data_loaders["valloaders"]): + client_test_res = test( + net, + clnt_tstldr, + data_loaders["label_split"][i].type(torch.int), + device=device, + ) + local_metrics["loss"] += client_test_res[0] + local_metrics["accuracy"] += client_test_res[1] + + global_metrics = {} + global_metrics["loss"], global_metrics["accuracy"] = test( + net, data_loaders["testloader"], device=device + ) + + # return statistics + print(f"global accuracy = {global_metrics['accuracy']}") + print(f"local_accuracy = {local_metrics['accuracy']}") + return global_metrics["loss"], { + "global_accuracy": global_metrics["accuracy"], + "local_loss": local_metrics["loss"], + "local_accuracy": local_metrics["accuracy"], + } + + return evaluate diff --git a/baselines/heterofl/heterofl/strategy.py b/baselines/heterofl/heterofl/strategy.py new file mode 100644 index 000000000000..70dbd19594df --- /dev/null +++ b/baselines/heterofl/heterofl/strategy.py @@ -0,0 +1,467 @@ +"""Flower strategy for HeteroFL.""" +import copy +from collections import OrderedDict +from typing import Dict, List, Optional, Tuple, Union + +import flwr as fl +import torch +from flwr.common import ( + EvaluateIns, + EvaluateRes, + FitIns, + FitRes, + Parameters, + Scalar, + ndarrays_to_parameters, + parameters_to_ndarrays, +) +from flwr.server.client_manager import ClientManager +from flwr.server.client_proxy import ClientProxy +from torch import nn + +from heterofl.client_manager_heterofl import ClientManagerHeteroFL +from heterofl.models import ( + get_parameters, + get_state_dict_from_param, + param_idx_to_local_params, + param_model_rate_mapping, +) +from heterofl.utils import make_optimizer, make_scheduler + + +# pylint: disable=too-many-instance-attributes +class HeteroFL(fl.server.strategy.Strategy): + """HeteroFL strategy. + + Distribute subsets of a global model to clients according to their + + computational complexity and aggregate received models from clients. + """ + + # pylint: disable=too-many-arguments + def __init__( + self, + model_name: str, + net: nn.Module, + optim_scheduler_settings: Dict, + global_model_rate: float = 1.0, + evaluate_fn=None, + fraction_fit: float = 1.0, + fraction_evaluate: float = 1.0, + min_fit_clients: int = 2, + min_evaluate_clients: int = 2, + min_available_clients: int = 2, + ) -> None: + super().__init__() + self.fraction_fit = fraction_fit + self.fraction_evaluate = fraction_evaluate + self.min_fit_clients = min_fit_clients + self.min_evaluate_clients = min_evaluate_clients + self.min_available_clients = min_available_clients + self.evaluate_fn = evaluate_fn + # # created client_to_model_mapping + # self.client_to_model_rate_mapping: Dict[str, ClientProxy] = {} + + self.model_name = model_name + self.net = net + self.global_model_rate = global_model_rate + # info required for configure and aggregate + # to be filled in initialize + self.local_param_model_rate: OrderedDict = OrderedDict() + # to be filled in initialize + self.active_cl_labels: List[torch.tensor] = [] + # to be filled in configure + self.active_cl_mr: OrderedDict = OrderedDict() + # required for scheduling the lr + self.optimizer = make_optimizer( + optim_scheduler_settings["optimizer"], + self.net.parameters(), + learning_rate=optim_scheduler_settings["lr"], + momentum=optim_scheduler_settings["momentum"], + weight_decay=optim_scheduler_settings["weight_decay"], + ) + self.scheduler = make_scheduler( + optim_scheduler_settings["scheduler"], + self.optimizer, + milestones=optim_scheduler_settings["milestones"], + ) + + def __repr__(self) -> str: + """Return a string representation of the HeteroFL object.""" + return "HeteroFL" + + def initialize_parameters( + self, client_manager: ClientManager + ) -> Optional[Parameters]: + """Initialize global model parameters.""" + # self.make_client_to_model_rate_mapping(client_manager) + # net = conv(model_rate = 1) + if not isinstance(client_manager, ClientManagerHeteroFL): + raise ValueError( + "Not valid client manager, use ClientManagerHeterFL instead" + ) + clnt_mngr_heterofl: ClientManagerHeteroFL = client_manager + + ndarrays = get_parameters(self.net) + self.local_param_model_rate = param_model_rate_mapping( + self.model_name, + self.net.state_dict(), + clnt_mngr_heterofl.get_all_clients_to_model_mapping(), + self.global_model_rate, + ) + + if clnt_mngr_heterofl.client_label_split is not None: + self.active_cl_labels = clnt_mngr_heterofl.client_label_split.copy() + + return fl.common.ndarrays_to_parameters(ndarrays) + + def configure_fit( + self, + server_round: int, + parameters: Parameters, + client_manager: ClientManager, + ) -> List[Tuple[ClientProxy, FitIns]]: + """Configure the next round of training.""" + print(f"in configure fit , server round no. = {server_round}") + if not isinstance(client_manager, ClientManagerHeteroFL): + raise ValueError( + "Not valid client manager, use ClientManagerHeterFL instead" + ) + clnt_mngr_heterofl: ClientManagerHeteroFL = client_manager + # Sample clients + # no need to change this + clientts_selection_config = {} + ( + clientts_selection_config["sample_size"], + clientts_selection_config["min_num_clients"], + ) = self.num_fit_clients(clnt_mngr_heterofl.num_available()) + + # for sampling we pass the criterion to select the required clients + clients = clnt_mngr_heterofl.sample( + num_clients=clientts_selection_config["sample_size"], + min_num_clients=clientts_selection_config["min_num_clients"], + ) + + # update client model rate mapping + clnt_mngr_heterofl.update(server_round) + + global_parameters = get_state_dict_from_param(self.net, parameters) + + self.active_cl_mr = OrderedDict() + + # Create custom configs + fit_configurations = [] + learning_rate = self.optimizer.param_groups[0]["lr"] + print(f"lr = {learning_rate}") + for client in clients: + model_rate = clnt_mngr_heterofl.get_client_to_model_mapping(client.cid) + client_param_idx = self.local_param_model_rate[model_rate] + local_param = param_idx_to_local_params( + global_parameters=global_parameters, client_param_idx=client_param_idx + ) + self.active_cl_mr[client.cid] = model_rate + # local param are in the form of state_dict, + # so converting them only to values of tensors + local_param_fitres = [val.cpu() for val in local_param.values()] + fit_configurations.append( + ( + client, + FitIns( + ndarrays_to_parameters(local_param_fitres), + {"lr": learning_rate}, + ), + ) + ) + + self.scheduler.step() + return fit_configurations + + def aggregate_fit( + self, + server_round: int, + results: List[Tuple[ClientProxy, FitRes]], + failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], + ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + """Aggregate fit results using weighted average. + + Adopted from authors implementation. + """ + print("in aggregate fit") + gl_model = self.net.state_dict() + + param_idx = [] + for res in results: + param_idx.append( + copy.deepcopy( + self.local_param_model_rate[self.active_cl_mr[res[0].cid]] + ) + ) + + local_param_as_parameters = [fit_res.parameters for _, fit_res in results] + local_parameters_as_ndarrays = [ + parameters_to_ndarrays(local_param_as_parameters[i]) + for i in range(len(local_param_as_parameters)) + ] + local_parameters: List[OrderedDict] = [ + OrderedDict() for _ in range(len(local_param_as_parameters)) + ] + for i in range(len(results)): + j = 0 + for k, _ in gl_model.items(): + local_parameters[i][k] = local_parameters_as_ndarrays[i][j] + j += 1 + + if "conv" in self.model_name: + self._aggregate_conv(param_idx, local_parameters, results) + + elif "resnet" in self.model_name: + self._aggregate_resnet18(param_idx, local_parameters, results) + else: + raise ValueError("Not valid model name") + + return ndarrays_to_parameters([v for k, v in gl_model.items()]), {} + + def _aggregate_conv(self, param_idx, local_parameters, results): + gl_model = self.net.state_dict() + count = OrderedDict() + output_bias_name = [k for k in gl_model.keys() if "bias" in k][-1] + output_weight_name = [k for k in gl_model.keys() if "weight" in k][-1] + for k, val in gl_model.items(): + parameter_type = k.split(".")[-1] + count[k] = val.new_zeros(val.size(), dtype=torch.float32) + tmp_v = val.new_zeros(val.size(), dtype=torch.float32) + for clnt, _ in enumerate(local_parameters): + if "weight" in parameter_type or "bias" in parameter_type: + self._agg_layer_conv( + { + "cid": int(results[clnt][0].cid), + "param_idx": param_idx, + "local_parameters": local_parameters, + }, + { + "tmp_v": tmp_v, + "count": count, + }, + { + "clnt": clnt, + "parameter_type": parameter_type, + "k": k, + "val": val, + }, + { + "output_weight_name": output_weight_name, + "output_bias_name": output_bias_name, + }, + ) + else: + tmp_v += local_parameters[clnt][k] + count[k] += 1 + tmp_v[count[k] > 0] = tmp_v[count[k] > 0].div_(count[k][count[k] > 0]) + val[count[k] > 0] = tmp_v[count[k] > 0].to(val.dtype) + + def _agg_layer_conv( + self, + clnt_params, + tmp_v_count, + param_info, + output_names, + ): + # pi = param_info + param_idx = clnt_params["param_idx"] + clnt = param_info["clnt"] + k = param_info["k"] + tmp_v = tmp_v_count["tmp_v"] + count = tmp_v_count["count"] + + if param_info["parameter_type"] == "weight": + if param_info["val"].dim() > 1: + if k == output_names["output_weight_name"]: + label_split = self.active_cl_labels[clnt_params["cid"]] + label_split = label_split.type(torch.int) + param_idx[clnt][k] = list(param_idx[clnt][k]) + param_idx[clnt][k][0] = param_idx[clnt][k][0][label_split] + tmp_v[torch.meshgrid(param_idx[clnt][k])] += clnt_params[ + "local_parameters" + ][clnt][k][label_split] + count[k][torch.meshgrid(param_idx[clnt][k])] += 1 + else: + tmp_v[torch.meshgrid(param_idx[clnt][k])] += clnt_params[ + "local_parameters" + ][clnt][k] + count[k][torch.meshgrid(param_idx[clnt][k])] += 1 + else: + tmp_v[param_idx[clnt][k]] += clnt_params["local_parameters"][clnt][k] + count[k][param_idx[clnt][k]] += 1 + else: + if k == output_names["output_bias_name"]: + label_split = self.active_cl_labels[clnt_params["cid"]] + label_split = label_split.type(torch.int) + param_idx[clnt][k] = param_idx[clnt][k][label_split] + tmp_v[param_idx[clnt][k]] += clnt_params["local_parameters"][clnt][k][ + label_split + ] + count[k][param_idx[clnt][k]] += 1 + else: + tmp_v[param_idx[clnt][k]] += clnt_params["local_parameters"][clnt][k] + count[k][param_idx[clnt][k]] += 1 + + def _aggregate_resnet18(self, param_idx, local_parameters, results): + gl_model = self.net.state_dict() + count = OrderedDict() + for k, val in gl_model.items(): + parameter_type = k.split(".")[-1] + count[k] = val.new_zeros(val.size(), dtype=torch.float32) + tmp_v = val.new_zeros(val.size(), dtype=torch.float32) + for clnt, _ in enumerate(local_parameters): + if "weight" in parameter_type or "bias" in parameter_type: + self._agg_layer_resnet18( + { + "cid": int(results[clnt][0].cid), + "param_idx": param_idx, + "local_parameters": local_parameters, + }, + tmp_v, + count, + { + "clnt": clnt, + "parameter_type": parameter_type, + "k": k, + "val": val, + }, + ) + else: + tmp_v += local_parameters[clnt][k] + count[k] += 1 + tmp_v[count[k] > 0] = tmp_v[count[k] > 0].div_(count[k][count[k] > 0]) + val[count[k] > 0] = tmp_v[count[k] > 0].to(val.dtype) + + def _agg_layer_resnet18(self, clnt_params, tmp_v, count, param_info): + param_idx = clnt_params["param_idx"] + k = param_info["k"] + clnt = param_info["clnt"] + + if param_info["parameter_type"] == "weight": + if param_info["val"].dim() > 1: + if "linear" in k: + label_split = self.active_cl_labels[clnt_params["cid"]] + label_split = label_split.type(torch.int) + param_idx[clnt][k] = list(param_idx[clnt][k]) + param_idx[clnt][k][0] = param_idx[clnt][k][0][label_split] + tmp_v[torch.meshgrid(param_idx[clnt][k])] += clnt_params[ + "local_parameters" + ][clnt][k][label_split] + count[k][torch.meshgrid(param_idx[clnt][k])] += 1 + else: + tmp_v[torch.meshgrid(param_idx[clnt][k])] += clnt_params[ + "local_parameters" + ][clnt][k] + count[k][torch.meshgrid(param_idx[clnt][k])] += 1 + else: + tmp_v[param_idx[clnt][k]] += clnt_params["local_parameters"][clnt][k] + count[k][param_idx[clnt][k]] += 1 + else: + if "linear" in k: + label_split = self.active_cl_labels[clnt_params["cid"]] + label_split = label_split.type(torch.int) + param_idx[clnt][k] = param_idx[clnt][k][label_split] + tmp_v[param_idx[clnt][k]] += clnt_params["local_parameters"][clnt][k][ + label_split + ] + count[k][param_idx[clnt][k]] += 1 + else: + tmp_v[param_idx[clnt][k]] += clnt_params["local_parameters"][clnt][k] + count[k][param_idx[clnt][k]] += 1 + + def configure_evaluate( + self, server_round: int, parameters: Parameters, client_manager: ClientManager + ) -> List[Tuple[ClientProxy, EvaluateIns]]: + """Configure the next round of evaluation.""" + # if self.fraction_evaluate == 0.0: + # return [] + # config = {} + # evaluate_ins = EvaluateIns(parameters, config) + + # # Sample clients + # sample_size, min_num_clients = self.num_evaluation_clients( + # client_manager.num_available() + # ) + # clients = client_manager.sample( + # num_clients=sample_size, min_num_clients=min_num_clients + # ) + + # global_parameters = get_state_dict_from_param(self.net, parameters) + + # self.active_cl_mr = OrderedDict() + + # # Create custom configs + # evaluate_configurations = [] + # for idx, client in enumerate(clients): + # model_rate = client_manager.get_client_to_model_mapping(client.cid) + # client_param_idx = self.local_param_model_rate[model_rate] + # local_param = + # param_idx_to_local_params(global_parameters, client_param_idx) + # self.active_cl_mr[client.cid] = model_rate + # # local param are in the form of state_dict, + # # so converting them only to values of tensors + # local_param_fitres = [v.cpu() for v in local_param.values()] + # evaluate_configurations.append( + # (client, EvaluateIns(ndarrays_to_parameters(local_param_fitres), {})) + # ) + # return evaluate_configurations + + return [] + + # return self.configure_fit(server_round , parameters , client_manager) + + def aggregate_evaluate( + self, + server_round: int, + results: List[Tuple[ClientProxy, EvaluateRes]], + failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> Tuple[Optional[float], Dict[str, Scalar]]: + """Aggregate evaluation losses using weighted average.""" + # if not results: + # return None, {} + + # loss_aggregated = weighted_loss_avg( + # [ + # (evaluate_res.num_examples, evaluate_res.loss) + # for _, evaluate_res in results + # ] + # ) + + # accuracy_aggregated = 0 + # for cp, y in results: + # print(f"{cp.cid}-->{y.metrics['accuracy']}", end=" ") + # accuracy_aggregated += y.metrics["accuracy"] + # accuracy_aggregated /= len(results) + + # metrics_aggregated = {"accuracy": accuracy_aggregated} + # print(f"\npaneer lababdar {metrics_aggregated}") + # return loss_aggregated, metrics_aggregated + + return None, {} + + def evaluate( + self, server_round: int, parameters: Parameters + ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + """Evaluate model parameters using an evaluation function.""" + if self.evaluate_fn is None: + # No evaluation function provided + return None + parameters_ndarrays = parameters_to_ndarrays(parameters) + eval_res = self.evaluate_fn(server_round, parameters_ndarrays, {}) + if eval_res is None: + return None + loss, metrics = eval_res + return loss, metrics + + def num_fit_clients(self, num_available_clients: int) -> Tuple[int, int]: + """Return sample size and required number of clients.""" + num_clients = int(num_available_clients * self.fraction_fit) + return max(num_clients, self.min_fit_clients), self.min_available_clients + + def num_evaluation_clients(self, num_available_clients: int) -> Tuple[int, int]: + """Use a fraction of available clients for evaluation.""" + num_clients = int(num_available_clients * self.fraction_evaluate) + return max(num_clients, self.min_evaluate_clients), self.min_available_clients diff --git a/baselines/heterofl/heterofl/utils.py b/baselines/heterofl/heterofl/utils.py new file mode 100644 index 000000000000..3bcb7f3d8ea7 --- /dev/null +++ b/baselines/heterofl/heterofl/utils.py @@ -0,0 +1,218 @@ +"""Contains utility functions.""" +import errno +import os +from pathlib import Path + +import numpy as np +import torch +from hydra.core.hydra_config import HydraConfig + + +def preprocess_input(cfg_model, cfg_data): + """Preprocess the input to get input shape, other derivables. + + Parameters + ---------- + cfg_model : DictConfig + Retrieve model-related information from the base.yaml configuration in Hydra. + cfg_data : DictConfig + Retrieve data-related information required to construct the model. + + Returns + ------- + Dict + Dictionary contained derived information from config. + """ + model_config = {} + # if cfg_model.model_name == "conv": + # model_config["model_name"] = + # elif for others... + model_config["model"] = cfg_model.model_name + if cfg_data.dataset_name == "MNIST": + model_config["data_shape"] = [1, 28, 28] + model_config["classes_size"] = 10 + elif cfg_data.dataset_name == "CIFAR10": + model_config["data_shape"] = [3, 32, 32] + model_config["classes_size"] = 10 + + if "hidden_layers" in cfg_model: + model_config["hidden_layers"] = cfg_model.hidden_layers + if "norm" in cfg_model: + model_config["norm"] = cfg_model.norm + if "scale" in cfg_model: + model_config["scale"] = cfg_model.scale + if "mask" in cfg_model: + model_config["mask"] = cfg_model.mask + + return model_config + + +def make_optimizer(optimizer_name, parameters, learning_rate, weight_decay, momentum): + """Make the optimizer with given config. + + Parameters + ---------- + optimizer_name : str + Name of the optimizer. + parameters : Dict + Parameters of the model. + learning_rate: float + Learning rate of the optimizer. + weight_decay: float + weight_decay of the optimizer. + + Returns + ------- + torch.optim.Optimizer + Optimizer. + """ + optimizer = None + if optimizer_name == "SGD": + optimizer = torch.optim.SGD( + parameters, lr=learning_rate, momentum=momentum, weight_decay=weight_decay + ) + return optimizer + + +def make_scheduler(scheduler_name, optimizer, milestones): + """Make the scheduler with given config. + + Parameters + ---------- + scheduler_name : str + Name of the scheduler. + optimizer : torch.optim.Optimizer + Parameters of the model. + milestones: List[int] + List of epoch indices. Must be increasing. + + Returns + ------- + torch.optim.lr_scheduler.Scheduler + scheduler. + """ + scheduler = None + if scheduler_name == "MultiStepLR": + scheduler = torch.optim.lr_scheduler.MultiStepLR( + optimizer, milestones=milestones + ) + return scheduler + + +def get_global_model_rate(model_mode): + """Give the global model rate from string(cfg.control.model_mode) . + + Parameters + ---------- + model_mode : str + Contains the division of computational complexties among clients. + + Returns + ------- + str + global model computational complexity. + """ + model_mode = "" + model_mode + model_mode = model_mode.split("-")[0][0] + return model_mode + + +class ModelRateManager: + """Control the model rate of clients in case of simulation.""" + + def __init__(self, model_split_mode, model_split_rate, model_mode): + self.model_split_mode = model_split_mode + self.model_split_rate = model_split_rate + self.model_mode = model_mode + self.model_mode = self.model_mode.split("-") + + def create_model_rate_mapping(self, num_users): + """Change the client to model rate mapping accordingly.""" + client_model_rate = [] + + if self.model_split_mode == "fix": + mode_rate, proportion = [], [] + for comp_level_prop in self.model_mode: + mode_rate.append(self.model_split_rate[comp_level_prop[0]]) + proportion.append(int(comp_level_prop[1:])) + num_users_proportion = num_users // sum(proportion) + for i, comp_level in enumerate(mode_rate): + client_model_rate += np.repeat( + comp_level, num_users_proportion * proportion[i] + ).tolist() + client_model_rate = client_model_rate + [ + client_model_rate[-1] for _ in range(num_users - len(client_model_rate)) + ] + # return client_model_rate + + elif self.model_split_mode == "dynamic": + mode_rate, proportion = [], [] + + for comp_level_prop in self.model_mode: + mode_rate.append(self.model_split_rate[comp_level_prop[0]]) + proportion.append(int(comp_level_prop[1:])) + + proportion = (np.array(proportion) / sum(proportion)).tolist() + + rate_idx = torch.multinomial( + torch.tensor(proportion), num_samples=num_users, replacement=True + ).tolist() + client_model_rate = np.array(mode_rate)[rate_idx] + + # return client_model_rate + + else: + raise ValueError("Not valid model split mode") + + return client_model_rate + + +def save_model(model, path): + """To save the model in the given path.""" + # print('in save model') + current_path = HydraConfig.get().runtime.output_dir + model_save_path = Path(current_path) / path + torch.save(model.state_dict(), model_save_path) + + +# """ The following functions(check_exists, makedir_exit_ok, save, load) +# are adopted from authors (of heterofl) implementation.""" + + +def check_exists(path): + """Check if the given path exists.""" + return os.path.exists(path) + + +def makedir_exist_ok(path): + """Create a directory.""" + try: + os.makedirs(path) + except OSError as os_err: + if os_err.errno == errno.EEXIST: + pass + else: + raise + + +def save(inp, path, protocol=2, mode="torch"): + """Save the inp in a given path.""" + dirname = os.path.dirname(path) + makedir_exist_ok(dirname) + if mode == "torch": + torch.save(inp, path, pickle_protocol=protocol) + elif mode == "numpy": + np.save(path, inp, allow_pickle=True) + else: + raise ValueError("Not valid save mode") + + +# pylint: disable=no-else-return +def load(path, mode="torch"): + """Load the file from given path.""" + if mode == "torch": + return torch.load(path, map_location=lambda storage, loc: storage) + elif mode == "numpy": + return np.load(path, allow_pickle=True) + else: + raise ValueError("Not valid save mode") diff --git a/baselines/heterofl/pyproject.toml b/baselines/heterofl/pyproject.toml new file mode 100644 index 000000000000..0f72edf20345 --- /dev/null +++ b/baselines/heterofl/pyproject.toml @@ -0,0 +1,145 @@ +[build-system] +requires = ["poetry-core>=1.4.0"] +build-backend = "poetry.masonry.api" + +[tool.poetry] +name = "heterofl" # <----- Ensure it matches the name of your baseline directory containing all the source code +version = "1.0.0" +description = "HeteroFL : Computation And Communication Efficient Federated Learning For Heterogeneous Clients" +license = "Apache-2.0" +authors = ["M S Chaitanya Kumar ", "The Flower Authors "] +readme = "README.md" +homepage = "https://flower.dev" +repository = "https://github.com/adap/flower" +documentation = "https://flower.dev" +classifiers = [ + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Operating System :: MacOS :: MacOS X", + "Operating System :: POSIX :: Linux", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: Implementation :: CPython", + "Topic :: Scientific/Engineering", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: Scientific/Engineering :: Mathematics", + "Topic :: Software Development", + "Topic :: Software Development :: Libraries", + "Topic :: Software Development :: Libraries :: Python Modules", + "Typing :: Typed", +] + +[tool.poetry.dependencies] +python = ">=3.10.0, <3.11.0" +flwr = { extras = ["simulation"], version = "1.5.0" } +hydra-core = "1.3.2" # don't change this +torch = { url = "https://download.pytorch.org/whl/cu118/torch-2.1.0%2Bcu118-cp310-cp310-linux_x86_64.whl"} +torchvision = { url = "https://download.pytorch.org/whl/cu118/torchvision-0.16.0%2Bcu118-cp310-cp310-linux_x86_64.whl"} +anytree = "^2.12.1" +types-six = "^1.16.21.9" +tqdm = "4.66.1" + +[tool.poetry.dev-dependencies] +isort = "==5.11.5" +black = "==23.1.0" +docformatter = "==1.5.1" +mypy = "==1.4.1" +pylint = "==2.8.2" +flake8 = "==3.9.2" +pytest = "==6.2.4" +pytest-watch = "==4.2.0" +ruff = "==0.0.272" +types-requests = "==2.27.7" +virtualenv = "20.21.0" + +[tool.isort] +line_length = 88 +indent = " " +multi_line_output = 3 +include_trailing_comma = true +force_grid_wrap = 0 +use_parentheses = true + +[tool.black] +line-length = 88 +target-version = ["py38", "py39", "py310", "py311"] + +[tool.pytest.ini_options] +minversion = "6.2" +addopts = "-qq" +testpaths = [ + "flwr_baselines", +] + +[tool.mypy] +ignore_missing_imports = true +strict = false +plugins = "numpy.typing.mypy_plugin" + +[tool.pylint."MESSAGES CONTROL"] +disable = "bad-continuation,duplicate-code,too-few-public-methods,useless-import-alias" +good-names = "i,j,k,_,x,y,X,Y" +signature-mutators="hydra.main.main" + + +[tool.pylint.typecheck] +generated-members="numpy.*, torch.*, tensorflow.*" + + +[[tool.mypy.overrides]] +module = [ + "importlib.metadata.*", + "importlib_metadata.*", +] +follow_imports = "skip" +follow_imports_for_stubs = true +disallow_untyped_calls = false + +[[tool.mypy.overrides]] +module = "torch.*" +follow_imports = "skip" +follow_imports_for_stubs = true + +[tool.docformatter] +wrap-summaries = 88 +wrap-descriptions = 88 + +[tool.ruff] +target-version = "py38" +line-length = 88 +select = ["D", "E", "F", "W", "B", "ISC", "C4"] +fixable = ["D", "E", "F", "W", "B", "ISC", "C4"] +ignore = ["B024", "B027"] +exclude = [ + ".bzr", + ".direnv", + ".eggs", + ".git", + ".hg", + ".mypy_cache", + ".nox", + ".pants.d", + ".pytype", + ".ruff_cache", + ".svn", + ".tox", + ".venv", + "__pypackages__", + "_build", + "buck-out", + "build", + "dist", + "node_modules", + "venv", + "proto", +] + +[tool.ruff.pydocstyle] +convention = "numpy" diff --git a/doc/source/ref-changelog.md b/doc/source/ref-changelog.md index 7168386eaf0a..507489e76e7b 100644 --- a/doc/source/ref-changelog.md +++ b/doc/source/ref-changelog.md @@ -14,6 +14,8 @@ - FedNova [#2179](https://github.com/adap/flower/pull/2179) + - HeteroFL [#2439](https://github.com/adap/flower/pull/2439) + ## v1.6.0 (2023-11-28) ### Thanks to our contributors From 48c061343eb8f44ac276728b2027918bf176d0a5 Mon Sep 17 00:00:00 2001 From: Javier Date: Mon, 25 Dec 2023 12:36:16 +0100 Subject: [PATCH 05/30] Retire `MXNet` examples (#2724) Co-authored-by: Taner Topal --- README.md | 5 +---- doc/source/ref-changelog.md | 2 ++ doc/source/tutorial-quickstart-mxnet.rst | 2 ++ examples/mxnet-from-centralized-to-federated/README.md | 2 ++ examples/mxnet-from-centralized-to-federated/pyproject.toml | 5 ++--- .../mxnet-from-centralized-to-federated/requirements.txt | 4 ++-- examples/quickstart-mxnet/README.md | 2 ++ examples/quickstart-mxnet/pyproject.toml | 5 ++--- examples/quickstart-mxnet/requirements.txt | 4 ++-- 9 files changed, 17 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index b8b62e8c0c43..750b5cdb4b93 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ design of Flower is based on a few guiding principles: - **Framework-agnostic**: Different machine learning frameworks have different strengths. Flower can be used with any machine learning framework, for example, [PyTorch](https://pytorch.org), - [TensorFlow](https://tensorflow.org), [Hugging Face Transformers](https://huggingface.co/), [PyTorch Lightning](https://pytorchlightning.ai/), [MXNet](https://mxnet.apache.org/), [scikit-learn](https://scikit-learn.org/), [JAX](https://jax.readthedocs.io/), [TFLite](https://tensorflow.org/lite/), [fastai](https://www.fast.ai/), [Pandas](https://pandas.pydata.org/) for federated analytics, or even raw [NumPy](https://numpy.org/) + [TensorFlow](https://tensorflow.org), [Hugging Face Transformers](https://huggingface.co/), [PyTorch Lightning](https://pytorchlightning.ai/), [scikit-learn](https://scikit-learn.org/), [JAX](https://jax.readthedocs.io/), [TFLite](https://tensorflow.org/lite/), [fastai](https://www.fast.ai/), [Pandas](https://pandas.pydata.org/) for federated analytics, or even raw [NumPy](https://numpy.org/) for users who enjoy computing gradients by hand. - **Understandable**: Flower is written with maintainability in mind. The @@ -81,7 +81,6 @@ Stay tuned, more tutorials are coming soon. Topics include **Privacy and Securit - [Quickstart (PyTorch)](https://flower.dev/docs/framework/tutorial-quickstart-pytorch.html) - [Quickstart (Hugging Face)](https://flower.dev/docs/framework/tutorial-quickstart-huggingface.html) - [Quickstart (PyTorch Lightning [code example])](https://flower.dev/docs/framework/tutorial-quickstart-pytorch-lightning.html) -- [Quickstart (MXNet)](https://flower.dev/docs/framework/example-mxnet-walk-through.html) - [Quickstart (Pandas)](https://flower.dev/docs/framework/tutorial-quickstart-pandas.html) - [Quickstart (fastai)](https://flower.dev/docs/framework/tutorial-quickstart-fastai.html) - [Quickstart (JAX)](https://flower.dev/docs/framework/tutorial-quickstart-jax.html) @@ -124,7 +123,6 @@ Quickstart examples: - [Quickstart (PyTorch Lightning)](https://github.com/adap/flower/tree/main/examples/quickstart-pytorch-lightning) - [Quickstart (fastai)](https://github.com/adap/flower/tree/main/examples/quickstart-fastai) - [Quickstart (Pandas)](https://github.com/adap/flower/tree/main/examples/quickstart-pandas) -- [Quickstart (MXNet)](https://github.com/adap/flower/tree/main/examples/quickstart-mxnet) - [Quickstart (JAX)](https://github.com/adap/flower/tree/main/examples/quickstart-jax) - [Quickstart (scikit-learn)](https://github.com/adap/flower/tree/main/examples/sklearn-logreg-mnist) - [Quickstart (Android [TFLite])](https://github.com/adap/flower/tree/main/examples/android) @@ -134,7 +132,6 @@ Other [examples](https://github.com/adap/flower/tree/main/examples): - [Raspberry Pi & Nvidia Jetson Tutorial](https://github.com/adap/flower/tree/main/examples/embedded-devices) - [PyTorch: From Centralized to Federated](https://github.com/adap/flower/tree/main/examples/pytorch-from-centralized-to-federated) -- [MXNet: From Centralized to Federated](https://github.com/adap/flower/tree/main/examples/mxnet-from-centralized-to-federated) - [Advanced Flower with TensorFlow/Keras](https://github.com/adap/flower/tree/main/examples/advanced-tensorflow) - [Advanced Flower with PyTorch](https://github.com/adap/flower/tree/main/examples/advanced-pytorch) - Single-Machine Simulation of Federated Learning Systems ([PyTorch](https://github.com/adap/flower/tree/main/examples/simulation_pytorch)) ([Tensorflow](https://github.com/adap/flower/tree/main/examples/simulation_tensorflow)) diff --git a/doc/source/ref-changelog.md b/doc/source/ref-changelog.md index 507489e76e7b..c4aad511a4a5 100644 --- a/doc/source/ref-changelog.md +++ b/doc/source/ref-changelog.md @@ -6,6 +6,8 @@ - **General updates to Flower Examples** ([#2381](https://github.com/adap/flower/pull/2381)) +- **Retiring MXNet examples** The development of the MXNet fremework has ended and the project is now [archived on GitHub](https://github.com/apache/mxnet). Existing MXNet examples won't receive updates [#2724](https://github.com/adap/flower/pull/2724) + - **Update Flower Baselines** - HFedXGBoost [#2226](https://github.com/adap/flower/pull/2226) diff --git a/doc/source/tutorial-quickstart-mxnet.rst b/doc/source/tutorial-quickstart-mxnet.rst index 149d060e4c00..ff8d4b2087dd 100644 --- a/doc/source/tutorial-quickstart-mxnet.rst +++ b/doc/source/tutorial-quickstart-mxnet.rst @@ -4,6 +4,8 @@ Quickstart MXNet ================ +.. warning:: MXNet is no longer maintained and has been moved into `Attic `_. As a result, we would encourage you to use other ML frameworks alongise Flower, for example, PyTorch. This tutorial might be removed in future versions of Flower. + .. meta:: :description: Check out this Federated Learning quickstart tutorial for using Flower with MXNet to train a Sequential model on MNIST. diff --git a/examples/mxnet-from-centralized-to-federated/README.md b/examples/mxnet-from-centralized-to-federated/README.md index 839d3b16a1cf..2c3f240d8978 100644 --- a/examples/mxnet-from-centralized-to-federated/README.md +++ b/examples/mxnet-from-centralized-to-federated/README.md @@ -1,5 +1,7 @@ # MXNet: From Centralized To Federated +> Note the MXNet project has ended, and is now in [Attic](https://attic.apache.org/projects/mxnet.html). The MXNet GitHub has also [been archived](https://github.com/apache/mxnet). As a result, this example won't be receiving more updates. Using MXNet is no longer recommnended. + This example demonstrates how an already existing centralized MXNet-based machine learning project can be federated with Flower. This introductory example for Flower uses MXNet, but you're not required to be a MXNet expert to run the example. The example will help you to understand how Flower can be used to build federated learning use cases based on an existing MXNet project. diff --git a/examples/mxnet-from-centralized-to-federated/pyproject.toml b/examples/mxnet-from-centralized-to-federated/pyproject.toml index a0d31f76ebdd..952683eb90f6 100644 --- a/examples/mxnet-from-centralized-to-federated/pyproject.toml +++ b/examples/mxnet-from-centralized-to-federated/pyproject.toml @@ -10,7 +10,6 @@ authors = ["The Flower Authors "] [tool.poetry.dependencies] python = ">=3.8,<3.11" -flwr = ">=1.0,<2.0" -# flwr = { path = "../../", develop = true } # Development -mxnet = "1.6.0" +flwr = "1.6.0" +mxnet = "1.9.1" numpy = "1.23.1" diff --git a/examples/mxnet-from-centralized-to-federated/requirements.txt b/examples/mxnet-from-centralized-to-federated/requirements.txt index 73060e27c70c..8dd6f7150dfd 100644 --- a/examples/mxnet-from-centralized-to-federated/requirements.txt +++ b/examples/mxnet-from-centralized-to-federated/requirements.txt @@ -1,3 +1,3 @@ -flwr>=1.0,<2.0 -mxnet==1.6.0 +flwr==1.6.0 +mxnet==1.9.1 numpy==1.23.1 diff --git a/examples/quickstart-mxnet/README.md b/examples/quickstart-mxnet/README.md index 930cec5acdfd..37e01ef2707c 100644 --- a/examples/quickstart-mxnet/README.md +++ b/examples/quickstart-mxnet/README.md @@ -1,5 +1,7 @@ # Flower Example using MXNet +> Note the MXNet project has ended, and is now in [Attic](https://attic.apache.org/projects/mxnet.html). The MXNet GitHub has also [been archived](https://github.com/apache/mxnet). As a result, this example won't be receiving more updates. Using MXNet is no longer recommnended. + This example demonstrates how to run a MXNet machine learning project federated with Flower. This introductory example for Flower uses MXNet, but you're not required to be a MXNet expert to run the example. The example will help you to understand how Flower can be used to build federated learning use cases based on an existing MXNet projects. diff --git a/examples/quickstart-mxnet/pyproject.toml b/examples/quickstart-mxnet/pyproject.toml index a0d31f76ebdd..952683eb90f6 100644 --- a/examples/quickstart-mxnet/pyproject.toml +++ b/examples/quickstart-mxnet/pyproject.toml @@ -10,7 +10,6 @@ authors = ["The Flower Authors "] [tool.poetry.dependencies] python = ">=3.8,<3.11" -flwr = ">=1.0,<2.0" -# flwr = { path = "../../", develop = true } # Development -mxnet = "1.6.0" +flwr = "1.6.0" +mxnet = "1.9.1" numpy = "1.23.1" diff --git a/examples/quickstart-mxnet/requirements.txt b/examples/quickstart-mxnet/requirements.txt index 73060e27c70c..8dd6f7150dfd 100644 --- a/examples/quickstart-mxnet/requirements.txt +++ b/examples/quickstart-mxnet/requirements.txt @@ -1,3 +1,3 @@ -flwr>=1.0,<2.0 -mxnet==1.6.0 +flwr==1.6.0 +mxnet==1.9.1 numpy==1.23.1 From 213cc3e947f835e566baf792761babab9ae06aa5 Mon Sep 17 00:00:00 2001 From: Charles Beauville Date: Mon, 25 Dec 2023 12:44:29 +0100 Subject: [PATCH 06/30] Rename draft release workflow and create release (#2658) --- .github/workflows/framework-draft-release.yml | 63 ++++++++++++++ .github/workflows/framework-release.yml | 87 +++++++------------ dev/publish.sh | 21 ----- 3 files changed, 95 insertions(+), 76 deletions(-) create mode 100644 .github/workflows/framework-draft-release.yml delete mode 100755 dev/publish.sh diff --git a/.github/workflows/framework-draft-release.yml b/.github/workflows/framework-draft-release.yml new file mode 100644 index 000000000000..b094350437ec --- /dev/null +++ b/.github/workflows/framework-draft-release.yml @@ -0,0 +1,63 @@ +name: Draft release + +on: + push: + tags: + - "v*.*.*" + +jobs: + publish: + if: ${{ github.repository == 'adap/flower' }} + name: Publish draft + runs-on: ubuntu-22.04 + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Wait for wheel to be built + uses: lewagon/wait-on-check-action@v1.3.1 + with: + ref: ${{ github.ref }} + check-name: 'Build, test and upload wheel' + repo-token: ${{ secrets.GITHUB_TOKEN }} + wait-interval: 10 + - name: Download wheel + run: | + tag_name=$(echo "${GITHUB_REF_NAME}" | cut -c2-) + echo "TAG_NAME=$tag_name" >> "$GITHUB_ENV" + + wheel_name="flwr-${tag_name}-py3-none-any.whl" + echo "WHEEL_NAME=$wheel_name" >> "$GITHUB_ENV" + + tar_name="flwr-${tag_name}.tar.gz" + echo "TAR_NAME=$tar_name" >> "$GITHUB_ENV" + + wheel_url="https://artifact.flower.dev/py/main/${GITHUB_SHA::7}/${wheel_name}" + tar_url="https://artifact.flower.dev/py/main/${GITHUB_SHA::7}/${tar_name}" + + curl $wheel_url --output $wheel_name + curl $tar_url --output $tar_name + - name: Upload wheel + env: + AWS_DEFAULT_REGION: ${{ secrets. AWS_DEFAULT_REGION }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets. AWS_SECRET_ACCESS_KEY }} + run: | + aws s3 cp --content-disposition "attachment" --cache-control "no-cache" ./${{ env.WHEEL_NAME }} s3://artifact.flower.dev/py/release/v${{ env.TAG_NAME }}/${{ env.WHEEL_NAME }} + aws s3 cp --content-disposition "attachment" --cache-control "no-cache" ./${{ env.TAR_NAME }} s3://artifact.flower.dev/py/release/v${{ env.TAG_NAME }}/${{ env.TAR_NAME }} + + - name: Generate body + run: | + ./dev/get-latest-changelog.sh > body.md + cat body.md + + - name: Release + uses: softprops/action-gh-release@v1 + with: + body_path: ./body.md + draft: true + name: Flower ${{ env.TAG_NAME }} + files: | + ${{ env.WHEEL_NAME }} + ${{ env.TAR_NAME }} diff --git a/.github/workflows/framework-release.yml b/.github/workflows/framework-release.yml index eab15a51d217..0f3cda8abae3 100644 --- a/.github/workflows/framework-release.yml +++ b/.github/workflows/framework-release.yml @@ -1,63 +1,40 @@ -name: Release Framework +name: Publish `flwr` release on PyPI on: - push: - tags: - - "v*.*.*" - + release: + types: [released] + +concurrency: + group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/main' && github.run_id || github.event.pull_request.number || github.ref }} + cancel-in-progress: true + jobs: publish: if: ${{ github.repository == 'adap/flower' }} - name: Publish draft + name: Publish release runs-on: ubuntu-22.04 steps: - - name: Checkout - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - name: Wait for wheel to be built - uses: lewagon/wait-on-check-action@v1.3.1 - with: - ref: ${{ github.ref }} - check-name: 'Build, test and upload wheel' - repo-token: ${{ secrets.GITHUB_TOKEN }} - wait-interval: 10 - - name: Download wheel - run: | - tag_name=$(echo "${GITHUB_REF_NAME}" | cut -c2-) - echo "TAG_NAME=$tag_name" >> "$GITHUB_ENV" - - wheel_name="flwr-${tag_name}-py3-none-any.whl" - echo "WHEEL_NAME=$wheel_name" >> "$GITHUB_ENV" - - tar_name="flwr-${tag_name}.tar.gz" - echo "TAR_NAME=$tar_name" >> "$GITHUB_ENV" + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Bootstrap + uses: ./.github/actions/bootstrap + + - name: Get artifacts and publish + env: + GITHUB_REF: ${{ github.ref }} + run: | + TAG_NAME=$(echo "${GITHUB_REF_NAME}" | cut -c2-) + + wheel_name="flwr-${TAG_NAME}-py3-none-any.whl" + tar_name="flwr-${TAG_NAME}.tar.gz" + + wheel_url="https://artifact.flower.dev/py/release/v${TAG_NAME}/${wheel_name}" + tar_url="https://artifact.flower.dev/py/release/v${TAG_NAME}/${tar_name}" + + curl $wheel_url --output $wheel_name + curl $tar_url --output $tar_name - wheel_url="https://artifact.flower.dev/py/main/${GITHUB_SHA::7}/${wheel_name}" - tar_url="https://artifact.flower.dev/py/main/${GITHUB_SHA::7}/${tar_name}" - - curl $wheel_url --output $wheel_name - curl $tar_url --output $tar_name - - name: Upload wheel - env: - AWS_DEFAULT_REGION: ${{ secrets. AWS_DEFAULT_REGION }} - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets. AWS_SECRET_ACCESS_KEY }} - run: | - aws s3 cp --content-disposition "attachment" --cache-control "no-cache" ./${{ env.WHEEL_NAME }} s3://artifact.flower.dev/py/release/v${{ env.TAG_NAME }}/${{ env.WHEEL_NAME }} - aws s3 cp --content-disposition "attachment" --cache-control "no-cache" ./${{ env.TAR_NAME }} s3://artifact.flower.dev/py/release/v${{ env.TAG_NAME }}/${{ env.TAR_NAME }} - - - name: Generate body - run: | - ./dev/get-latest-changelog.sh > body.md - cat body.md - - - name: Release - uses: softprops/action-gh-release@v1 - with: - body_path: ./body.md - draft: true - name: Flower ${{ env.TAG_NAME }} - files: | - ${{ env.WHEEL_NAME }} - ${{ env.TAR_NAME }} + python -m poetry publish -u __token__ -p ${{ secrets.PYPI_TOKEN }} diff --git a/dev/publish.sh b/dev/publish.sh deleted file mode 100755 index fb4df1694530..000000000000 --- a/dev/publish.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -# Copyright 2022 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -set -e -cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/../ - -python -m poetry publish From 0351656ff309989c15efd90a7b5d4ac7d6da5b75 Mon Sep 17 00:00:00 2001 From: Daniel Nata Nugraha Date: Mon, 25 Dec 2023 12:52:30 +0100 Subject: [PATCH 07/30] Update Android manifest to include internet permission (#2672) --- src/kotlin/flwr/src/main/AndroidManifest.xml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/kotlin/flwr/src/main/AndroidManifest.xml b/src/kotlin/flwr/src/main/AndroidManifest.xml index 8bdb7e14b389..3cb3262db448 100644 --- a/src/kotlin/flwr/src/main/AndroidManifest.xml +++ b/src/kotlin/flwr/src/main/AndroidManifest.xml @@ -1,4 +1,5 @@ - + + From fdf0c28c759600b237980addb663a4cac9651d3c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Dec 2023 23:04:43 +0100 Subject: [PATCH 08/30] Bump lewagon/wait-on-check-action from 1.3.1 to 1.3.3 (#2756) Bumps [lewagon/wait-on-check-action](https://github.com/lewagon/wait-on-check-action) from 1.3.1 to 1.3.3. - [Release notes](https://github.com/lewagon/wait-on-check-action/releases) - [Commits](https://github.com/lewagon/wait-on-check-action/compare/v1.3.1...v1.3.3) --- updated-dependencies: - dependency-name: lewagon/wait-on-check-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/framework-draft-release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/framework-draft-release.yml b/.github/workflows/framework-draft-release.yml index b094350437ec..959d17249765 100644 --- a/.github/workflows/framework-draft-release.yml +++ b/.github/workflows/framework-draft-release.yml @@ -16,7 +16,7 @@ jobs: with: fetch-depth: 0 - name: Wait for wheel to be built - uses: lewagon/wait-on-check-action@v1.3.1 + uses: lewagon/wait-on-check-action@v1.3.3 with: ref: ${{ github.ref }} check-name: 'Build, test and upload wheel' From 43aa075a2586bd1f7705b87cabc16842ed1c8054 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Dec 2023 22:13:17 +0000 Subject: [PATCH 09/30] Update ruff requirement from ==0.1.4 to ==0.1.9 (#2753) Updates the requirements on [ruff](https://github.com/astral-sh/ruff) to permit the latest version. - [Release notes](https://github.com/astral-sh/ruff/releases) - [Changelog](https://github.com/astral-sh/ruff/blob/main/CHANGELOG.md) - [Commits](https://github.com/astral-sh/ruff/compare/v0.1.4...v0.1.9) --- updated-dependencies: - dependency-name: ruff dependency-type: direct:development ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Daniel J. Beutel --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 57f43af6ac73..8a300afa8c84 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -109,7 +109,7 @@ furo = "==2023.9.10" sphinx-reredirects = "==0.1.3" nbsphinx = "==0.9.3" nbstripout = "==0.6.1" -ruff = "==0.1.4" +ruff = "==0.1.9" sphinx-argparse = "==0.4.0" pipreqs = "==0.4.13" mdformat-gfm = "==0.3.5" From 1a0471894110cdb366b06d63a76002216a793850 Mon Sep 17 00:00:00 2001 From: Gustavo Bertoli Date: Thu, 28 Dec 2023 20:03:56 +0100 Subject: [PATCH 10/30] Add FedAvgM baseline (#2246) Co-authored-by: Daniel J. Beutel Co-authored-by: jafermarq --- baselines/fedavgm/LICENSE | 202 ++ baselines/fedavgm/README.md | 220 ++ ..._CNN_vs_TF_v1_x_Example_for_CIFAR_10.ipynb | 1851 +++++++++++++++++ ...ifar10_num-rounds=1000_concentration=1.png | Bin 0 -> 46747 bytes .../fedavgm/_static/concentration_cifar10.png | Bin 0 -> 18925 bytes .../_static/concentration_cifar10_v2.png | Bin 0 -> 21208 bytes ...-fedavgm_vs_fedavgm_rounds=1000_fmnist.png | Bin 0 -> 38106 bytes ...fedavgm_vs_fedavg_rounds=10000_cifar10.png | Bin 0 -> 37530 bytes ..._vs_fedavg_rounds=10000_cifar10_w_1e-9.png | Bin 0 -> 38228 bytes .../fedavgm_vs_fedavg_rounds=1000_fmnist.png | Bin 0 -> 38840 bytes baselines/fedavgm/conf-colab.sh | 24 + baselines/fedavgm/fedavgm/__init__.py | 1 + baselines/fedavgm/fedavgm/client.py | 70 + baselines/fedavgm/fedavgm/common.py | 494 +++++ baselines/fedavgm/fedavgm/conf/base.yaml | 24 + .../fedavgm/fedavgm/conf/dataset/cifar10.yaml | 4 + .../fedavgm/fedavgm/conf/dataset/fmnist.yaml | 4 + baselines/fedavgm/fedavgm/conf/model/cnn.yaml | 5 + .../fedavgm/conf/model/tf_example.yaml | 5 + .../fedavgm/conf/strategy/custom-fedavgm.yaml | 13 + .../fedavgm/fedavgm/conf/strategy/fedavg.yaml | 8 + .../fedavgm/conf/strategy/fedavgm.yaml | 13 + baselines/fedavgm/fedavgm/dataset.py | 57 + .../fedavgm/fedavgm/dataset_preparation.py | 1 + baselines/fedavgm/fedavgm/main.py | 100 + baselines/fedavgm/fedavgm/models.py | 121 ++ baselines/fedavgm/fedavgm/server.py | 45 + baselines/fedavgm/fedavgm/strategy.py | 201 ++ baselines/fedavgm/fedavgm/utils.py | 61 + baselines/fedavgm/pyproject.toml | 139 ++ doc/source/ref-changelog.md | 2 + 31 files changed, 3665 insertions(+) create mode 100644 baselines/fedavgm/LICENSE create mode 100644 baselines/fedavgm/README.md create mode 100644 baselines/fedavgm/_static/Comparison_CNN_vs_TF_v1_x_Example_for_CIFAR_10.ipynb create mode 100644 baselines/fedavgm/_static/Figure6_cifar10_num-rounds=1000_concentration=1.png create mode 100644 baselines/fedavgm/_static/concentration_cifar10.png create mode 100644 baselines/fedavgm/_static/concentration_cifar10_v2.png create mode 100644 baselines/fedavgm/_static/custom-fedavgm_vs_fedavgm_rounds=1000_fmnist.png create mode 100644 baselines/fedavgm/_static/fedavgm_vs_fedavg_rounds=10000_cifar10.png create mode 100644 baselines/fedavgm/_static/fedavgm_vs_fedavg_rounds=10000_cifar10_w_1e-9.png create mode 100644 baselines/fedavgm/_static/fedavgm_vs_fedavg_rounds=1000_fmnist.png create mode 100644 baselines/fedavgm/conf-colab.sh create mode 100644 baselines/fedavgm/fedavgm/__init__.py create mode 100644 baselines/fedavgm/fedavgm/client.py create mode 100644 baselines/fedavgm/fedavgm/common.py create mode 100644 baselines/fedavgm/fedavgm/conf/base.yaml create mode 100644 baselines/fedavgm/fedavgm/conf/dataset/cifar10.yaml create mode 100644 baselines/fedavgm/fedavgm/conf/dataset/fmnist.yaml create mode 100644 baselines/fedavgm/fedavgm/conf/model/cnn.yaml create mode 100644 baselines/fedavgm/fedavgm/conf/model/tf_example.yaml create mode 100644 baselines/fedavgm/fedavgm/conf/strategy/custom-fedavgm.yaml create mode 100644 baselines/fedavgm/fedavgm/conf/strategy/fedavg.yaml create mode 100644 baselines/fedavgm/fedavgm/conf/strategy/fedavgm.yaml create mode 100644 baselines/fedavgm/fedavgm/dataset.py create mode 100644 baselines/fedavgm/fedavgm/dataset_preparation.py create mode 100644 baselines/fedavgm/fedavgm/main.py create mode 100644 baselines/fedavgm/fedavgm/models.py create mode 100644 baselines/fedavgm/fedavgm/server.py create mode 100644 baselines/fedavgm/fedavgm/strategy.py create mode 100644 baselines/fedavgm/fedavgm/utils.py create mode 100644 baselines/fedavgm/pyproject.toml diff --git a/baselines/fedavgm/LICENSE b/baselines/fedavgm/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/baselines/fedavgm/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/baselines/fedavgm/README.md b/baselines/fedavgm/README.md new file mode 100644 index 000000000000..0953331964a7 --- /dev/null +++ b/baselines/fedavgm/README.md @@ -0,0 +1,220 @@ +--- +title: Measuring the effects of non-identical data distribution for federated visual classification +url: https://arxiv.org/abs/1909.06335 +labels: [non-iid, image classification] +dataset: [CIFAR-10, Fashion-MNIST] +--- + +# Measuring the effects of non-identical data distribution for federated visual classification + +> Note: If you use this baseline in your work, please remember to cite the original authors of the paper as well as the Flower paper. + +**Paper:** [arxiv.org/abs/1909.06335](https://arxiv.org/abs/1909.06335) + +**Authors:** Tzu-Ming Harry Hsu, Hang Qi, Matthew Brown + +**Abstract:** Federated Learning enables visual models to be trained in a privacy-preserving way using real-world data from mobile devices. Given their distributed nature, the statistics of the data across these devices is likely to differ significantly. In this work, we look at the effect such non-identical data distributions has on visual classification via Federated Learning. We propose a way to synthesize datasets with a continuous range of identicalness and provide performance measures for the Federated Averaging algorithm. We show that performance degrades as distributions differ more, and propose a mitigation strategy via server momentum. Experiments on CIFAR-10 demonstrate improved classification performance over a range of non-identicalness, with classification accuracy improved from 30.1% to 76.9% in the most skewed settings. + + +## About this baseline + +**What’s implemented:** The code in this directory evaluates the effects of non-identical data distribution for visual classification task based on paper _Measuring the effects of non-identical data distribution for federated visual classification_ (Hsu et al., 2019). It reproduces the FedAvgM and FedAvg performance curves for different non-identical-ness of the dataset (CIFAR-10 and Fashion-MNIST). _Figure 5 in the paper, section 4.2._ + +**Datasets:** CIFAR-10, and Fashion-MNIST + +**Hardware Setup:** This baseline was evaluated in a regular PC without GPU (Intel i7-10710U CPU, and 32 Gb RAM). The major constraint is to run a huge number of rounds such as the reference paper that reports 10.000 round for each case evaluated. + +**Contributors:** Gustavo Bertoli [(@gubertoli)](https://github.com/gubertoli) + +## Experimental Setup + +**Task:** Image Classification + +**Model:** This directory implements a CNN model similar to the one used on the seminal FedAvg paper (`models.py`): + +- McMahan, B., Moore, E., Ramage, D., Hampson, S., & y Arcas, B. A. (2017, April). Communication-efficient learning of deep networks from decentralized data. In Artificial intelligence and statistics (pp. 1273-1282). PMLR. ([Link](http://proceedings.mlr.press/v54/mcmahan17a/mcmahan17a.pdf)): + +As the following excerpt: + +> "*We also ran experiments on the CIFAR-10 dataset... The model architecture was taken from the TensorFlow tutorial [38], which consists of two convolutional layers followed by two fully connected layers and then a linear transformation layer to produce logits, for a total of about 10 parameters."* + +Regarding this architecture, the historical references mentioned on the FedAvg and FedAvgM papers are [this](https://web.archive.org/web/20190415103404/https://www.tensorflow.org/tutorials/images/deep_cnn) and [this](https://web.archive.org/web/20170807002954/https://github.com/tensorflow/models/blob/master/tutorials/image/cifar10/cifar10.py). + +Important to highlight the rationale with this CNN model stated on FedAvgM paper: + +> "*This model is not the state-of-the-art on the CIFAR-10 dataset, but is sufficient to show relative performance for the purposes of our investigation."* + +**The default CNN model in use on this baseline have a centralized accuracy of ~0.74. That is different from the reported 0.86 accuracy from the original FedAvg paper. But it is still sufficient to show the relative performance for the purposes of FedAvgM investigation.** + +**Dataset:** This baseline includes the CIFAR-10 and Fashion-MNIST datasets. By default it will run with the CIFAR-10. The data partition uses a configurable Latent Dirichlet Allocation (LDA) distribution (`concentration` parameter equals 0.1 as default) to create **non-iid distributions** between the clients. The understanding for this `concentration` (α) is that α→∞ all clients have identical distribution, and α→𝟢 each client hold samples from only one class. + +| Dataset | # classes | # partitions | partition method | partition settings| +| :------ | :---: | :---: | :---: | :---: | +| CIFAR-10 | 10 | `num_clients` | Latent Dirichlet Allocation (LDA) | `concentration` | +| Fashion-MNIST | 10 | `num_clients` | Latent Dirichlet Allocation (LDA) | `concentration` | + +**Data distribution:** The following figure illustrates the use of multiple `concentration` values to generate the data distribution over 30 clients for CIFAR-10 (10 classes) - [source code](fedavgm/utils.py): + +![](_static/concentration_cifar10_v2.png) + +**Training Hyperparameters:** +The following table shows the main hyperparameters for this baseline with their default value (i.e. the value used if you run `python main.py` directly) + +| Description | Default Value | +| ----------- | ----- | +| total clients | 10 | +| number of rounds | 5 | +| model | CNN | +| strategy | Custom FedAvgM | +| dataset | CIFAR-10 | +| concentration | 0.1 | +| fraction evaluate | 0 | +| num cpus | 1 | +| num gpus | 0 | +| server momentum | 0.9 | +| server learning rate | 1.0 | +| server reporting fraction | 0.05 | +| client local epochs | 1 | +| client batch size | 64 | +| client learning rate | 0.01 | + +### Custom FedAvgM +In contrast to the initial implementation found in Flower v1.5.0, our baseline incorporates the Nesterov accelerated gradient as a pivotal component of the momentum applied to the server model. It is worth emphasizing that the inclusion of Nesterov momentum aligns with the original definition of FedAvgM in the research paper. + +To use the original Flower implementation, use the argument `strategy=fedavgm`. By default, the custom implementation is used. But, you can also refer to it on the command line as `strategy=custom-fedavgm`. + +## Environment Setup + +### Specifying the Python Version + +This baseline was tested with Python 3.10.6 and following the steps below to construct the Python environment and install all dependencies. Both [`pyenv`](https://github.com/pyenv/pyenv) and [`poetry`](https://python-poetry.org/docs/) are assumed to be already present in your system. + +```bash +# Cd to your baseline directory (i.e. where the `pyproject.toml` is), then +pyenv local 3.10.6 + +# Set that version for poetry +poetry env use 3.10.6 + +# Install the base Poetry environment +poetry install + +# Activate the environment +poetry shell +``` + +### Google Colab +If you want to setup the environemnt on Google Colab, please executed the script `conf-colab.sh`, just use the Colab terminal and the following: + +```bash +chmod +x conf-colab.sh +./conf-colab.sh +``` + +## Running the Experiments + +To run this FedAvgM with CIFAR-10 baseline, first ensure you have activated your Poetry environment (execute `poetry shell` from this directory), then: + +```bash +python -m fedavgm.main # this will run using the default setting in the `conf/base.yaml` + +# you can override settings directly from the command line + +python -m fedavgm.main strategy=fedavg num_clients=1000 num_rounds=50 # will set the FedAvg with 1000 clients and 50 rounds + +python -m fedavgm.main dataset=fmnist noniid.concentration=10 # use the Fashion-MNIST dataset and a different concentration for the LDA-based partition + +python -m fedavgm.main server.reporting_fraction=0.2 client.local_epochs=5 # will set the reporting fraction to 20% and the local epochs in the clients to 5 +``` + +## Expected Results + +### CIFAR-10 +Similar to FedAvgM paper as reference, the CIFAR-10 evaluation runs 10,000 rounds. + +> In order to speedup the execution of these experiments, the evaluation of the _global model_ on the test set only takes place after the last round. The highest accuracy is achieved towards the last rounds, not necessarily in the last. If you wish to evaluate the _global model_ on the test set (or a validation set) more frequently, edit `get_evaluate_fn` in `server.py`. Overal, running the experiments as shown below demonstrate that `FedAvgM` is consistently superior to `FedAvg`. + +For FedAvgM evaluation, it was performed a hyperparameter search of server momentum and client learning rate (similar to Figure 6 reported below) for each of the concentrations under analysis, using the following commands: + +- Concentration = 1e-5 and 1e-9 (extreme non-iid) +```bash +python -m fedavgm.main --multirun client.local_epochs=1 noniid.concentration=1e-5,1e-9 strategy=custom-fedavgm,fedavg \ +server.reporting_fraction=0.05 num_rounds=10000 num_clients=100 \ +dataset=cifar10 client.lr=0.0003 server.momentum=0.99 +``` + +- Concentration = 0.01 +```bash +python -m fedavgm.main --multirun client.local_epochs=1 noniid.concentration=0.01 strategy=custom-fedavgm,fedavg \ +server.reporting_fraction=0.05 num_rounds=10000 num_clients=100 \ +dataset=cifar10 client.lr=0.003 server.momentum=0.97 +``` + +- Concentration = 0.1 +```bash +python -m fedavgm.main --multirun client.local_epochs=1 noniid.concentration=0.1 strategy=custom-fedavgm,fedavg \ +server.reporting_fraction=0.05 num_rounds=10000 num_clients=100 \ +dataset=cifar10 client.lr=0.0003 server.momentum=0.99 +``` + +- Concentration = 1 +```bash +python -m fedavgm.main --multirun client.local_epochs=1 noniid.concentration=1 strategy=custom-fedavgm,fedavg \ +server.reporting_fraction=0.05 num_rounds=10000 num_clients=100 \ +dataset=cifar10 client.lr=0.0003 server.momentum=0.997 +``` + +- Concentration = 10 +```bash +python -m fedavgm.main --multirun client.local_epochs=1 noniid.concentration=10 strategy=custom-fedavgm,fedavg \ +server.reporting_fraction=0.05 num_rounds=10000 num_clients=100 \ +dataset=cifar10 client.lr=0.003 server.momentum=0.9 +``` + +Summarizing all the results: + +![](_static/fedavgm_vs_fedavg_rounds=10000_cifar10_w_1e-9.png) + +The findings aligns with the report on the original FedAvgM paper that *"To prevent client updates from diverging, we additionally have to use a combination of low absolute learning rate and high momentum"*. + +The following command reproduces the same behavior of Figure 6 from FedAvgM paper for the case of Local Epoch E=1, Reporting Fraction C=0.05, and concentration (α) = 1. In this example, it runs just 1,000 rounds: + +```bash +python -m fedavgm.main --multirun client.local_epochs=1 noniid.concentration=1 \ +strategy=custom-fedavgm server.reporting_fraction=0.05 num_rounds=100 num_clients=100 \ +dataset=cifar10 client.lr=0.0001,0.0003,0.001,0.003,0.01,0.03,0.1,0.3 \ +server.momentum=0.7,0.9,0.97,0.99,0.997 +``` + +![](_static/Figure6_cifar10_num-rounds=1000_concentration=1.png) + + +--- +### Fashion-MNIST + +```bash +python -m fedavgm.main --multirun client.local_epochs=1 \ +noniid.concentration=0.001,0.01,0.1,1,10,100 strategy=custom-fedavgm,fedavg \ +server.reporting_fraction=0.05 num_rounds=1000 \ +num_clients=100 dataset=fmnist server.momentum=0.97 client.lr=0.003 +``` +The above command will evaluate the custom FedAvgM versus FedAvg on Fashion-MNIST datasets. It uses 100 clients with a reporting fraction of 5% during 1000 rounds. To evaluate the non-iid aspects, this exececution exercises concentration of [100, 10, 1, 0.1, 0.01, 0.001]: + +![](_static/fedavgm_vs_fedavg_rounds=1000_fmnist.png) + +#### Comparison between the Custom-FedAvgM and FedAvgM + +To compare the improvement of the FedAvgM with Nesterov momentum (`strategy=custom-fedavgm`) and the FedAvgM without the Nesterov momentum (`strategy=fedavgm`), here we use the results of previous running with addition of the same conditions for the `fedavgm` strategy as follows: + +```bash +python -m fedavgm.main --multirun client.local_epochs=1 \ +noniid.concentration=0.001,0.01,0.1,1,10,100 strategy=fedavgm \ +server.reporting_fraction=0.05 num_rounds=1000 \ +num_clients=100 dataset=fmnist server.momentum=0.97 client.lr=0.003 +``` + +![](_static/custom-fedavgm_vs_fedavgm_rounds=1000_fmnist.png) + +Overall, FedAvgM with Nesterov momentum outperforms the FedAvgM without Nesterov momentum, being clear this behavior for higher non-iidness (0.01 and 0.001). In these higher non-iidness, the test accuracy for FedAvg without Nesterov momentum are worse than the FedAvg. +For larger concentrations (1, 10, 100), it was observed some runs that the centralized evaluation resulted in a loss equal NaN or Inf, thus it was required multiple runs to guarantee the accuracies reported. + diff --git a/baselines/fedavgm/_static/Comparison_CNN_vs_TF_v1_x_Example_for_CIFAR_10.ipynb b/baselines/fedavgm/_static/Comparison_CNN_vs_TF_v1_x_Example_for_CIFAR_10.ipynb new file mode 100644 index 000000000000..fac837d145e3 --- /dev/null +++ b/baselines/fedavgm/_static/Comparison_CNN_vs_TF_v1_x_Example_for_CIFAR_10.ipynb @@ -0,0 +1,1851 @@ +{ + "cells": [ + { + "cell_type": "code", + "source": [ + "import numpy as np\n", + "from keras.optimizers import SGD\n", + "from keras.regularizers import l2\n", + "from tensorflow import keras\n", + "from tensorflow.nn import local_response_normalization\n", + "from keras.utils import to_categorical\n", + "import matplotlib.pyplot as plt" + ], + "metadata": { + "id": "Rp9LUn54SUTu" + }, + "execution_count": 15, + "outputs": [] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": { + "id": "7tTxE8D6bD6g" + }, + "outputs": [], + "source": [ + "def tf_example(input_shape, num_classes):\n", + " \"\"\"CNN Model from TensorFlow v1.x example.\n", + "\n", + " This is the model referenced on the FedAvg paper.\n", + "\n", + " Reference:\n", + " https://web.archive.org/web/20170807002954/https://github.com/tensorflow/models/blob/master/tutorials/image/cifar10/cifar10.py\n", + " \"\"\"\n", + " input_shape = tuple(input_shape)\n", + "\n", + " weight_decay = 0.004\n", + " model = keras.Sequential(\n", + " [\n", + " keras.layers.Conv2D(\n", + " 64,\n", + " (5, 5),\n", + " padding=\"same\",\n", + " activation=\"relu\",\n", + " input_shape=input_shape,\n", + " ),\n", + " keras.layers.MaxPooling2D((3, 3), strides=(2, 2), padding=\"same\"),\n", + " keras.layers.Lambda(\n", + " local_response_normalization,\n", + " arguments={\n", + " \"depth_radius\": 4,\n", + " \"bias\": 1.0,\n", + " \"alpha\": 0.001 / 9.0,\n", + " \"beta\": 0.75,\n", + " },\n", + " ),\n", + " keras.layers.Conv2D(\n", + " 64,\n", + " (5, 5),\n", + " padding=\"same\",\n", + " activation=\"relu\",\n", + " ),\n", + " keras.layers.Lambda(\n", + " local_response_normalization,\n", + " arguments={\n", + " \"depth_radius\": 4,\n", + " \"bias\": 1.0,\n", + " \"alpha\": 0.001 / 9.0,\n", + " \"beta\": 0.75,\n", + " },\n", + " ),\n", + " keras.layers.MaxPooling2D((3, 3), strides=(2, 2), padding=\"same\"),\n", + " keras.layers.Flatten(),\n", + " keras.layers.Dense(\n", + " 384, activation=\"relu\", kernel_regularizer=l2(weight_decay)\n", + " ),\n", + " keras.layers.Dense(\n", + " 192, activation=\"relu\", kernel_regularizer=l2(weight_decay)\n", + " ),\n", + " keras.layers.Dense(num_classes, activation=\"softmax\"),\n", + " ]\n", + " )\n", + " optimizer = SGD(learning_rate=0.1)\n", + " model.compile(\n", + " loss=\"categorical_crossentropy\", optimizer=optimizer, metrics=[\"accuracy\"]\n", + " )\n", + "\n", + " return model\n", + "\n" + ] + }, + { + "cell_type": "code", + "source": [ + "def cifar10(num_classes, input_shape):\n", + " \"\"\"Prepare the CIFAR-10.\n", + "\n", + " This method considers CIFAR-10 for creating both train and test sets. The sets are\n", + " already normalized.\n", + " \"\"\"\n", + " print(f\">>> [Dataset] Loading CIFAR-10. {num_classes} | {input_shape}.\")\n", + " (x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()\n", + " x_train = x_train.astype(\"float32\") / 255\n", + " x_test = x_test.astype(\"float32\") / 255\n", + " input_shape = x_train.shape[1:]\n", + " num_classes = len(np.unique(y_train))\n", + "\n", + " return x_train, y_train, x_test, y_test, input_shape, num_classes" + ], + "metadata": { + "id": "vuQykx1uSXHk" + }, + "execution_count": 17, + "outputs": [] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "FMph7H-qbHHR", + "outputId": "45cf4a68-7054-460e-bcd7-c353338dc387" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + ">>> [Dataset] Loading CIFAR-10. 10 | (32, 32, 3).\n" + ] + } + ], + "source": [ + "x_train, y_train, x_test, y_test, input_shape,num_classes = cifar10(10, (32,32,3))\n" + ] + }, + { + "cell_type": "code", + "source": [ + "EPOCHS=350\n", + "BATCH_SIZE=128" + ], + "metadata": { + "id": "AD2qsybwX6uR" + }, + "execution_count": 19, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "---" + ], + "metadata": { + "id": "531ZRrY2SY85" + } + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": { + "id": "1BO5D4ZBbJJo" + }, + "outputs": [], + "source": [ + "model = tf_example(input_shape, num_classes)" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "8DMMAgw6bK2C", + "outputId": "9c1203a2-7152-4c25-dc1c-1c58b1cf8b8b" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Epoch 1/350\n", + "391/391 [==============================] - 8s 18ms/step - loss: 4.8242 - accuracy: 0.2914\n", + "Epoch 2/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 3.0276 - accuracy: 0.4814\n", + "Epoch 3/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 2.1395 - accuracy: 0.5609\n", + "Epoch 4/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 1.6463 - accuracy: 0.6129\n", + "Epoch 5/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 1.3656 - accuracy: 0.6504\n", + "Epoch 6/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 1.1851 - accuracy: 0.6868\n", + "Epoch 7/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 1.0698 - accuracy: 0.7147\n", + "Epoch 8/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.9918 - accuracy: 0.7350\n", + "Epoch 9/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.9465 - accuracy: 0.7551\n", + "Epoch 10/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.8991 - accuracy: 0.7747\n", + "Epoch 11/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.8534 - accuracy: 0.7971\n", + "Epoch 12/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.8305 - accuracy: 0.8111\n", + "Epoch 13/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.8070 - accuracy: 0.8265\n", + "Epoch 14/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.7805 - accuracy: 0.8434\n", + "Epoch 15/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.7672 - accuracy: 0.8527\n", + "Epoch 16/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.7504 - accuracy: 0.8647\n", + "Epoch 17/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.7418 - accuracy: 0.8715\n", + "Epoch 18/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.7244 - accuracy: 0.8819\n", + "Epoch 19/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.7205 - accuracy: 0.8871\n", + "Epoch 20/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.7032 - accuracy: 0.8966\n", + "Epoch 21/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6965 - accuracy: 0.8999\n", + "Epoch 22/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6998 - accuracy: 0.9026\n", + "Epoch 23/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6952 - accuracy: 0.9065\n", + "Epoch 24/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6795 - accuracy: 0.9120\n", + "Epoch 25/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6913 - accuracy: 0.9100\n", + "Epoch 26/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6822 - accuracy: 0.9144\n", + "Epoch 27/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6773 - accuracy: 0.9174\n", + "Epoch 28/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6885 - accuracy: 0.9155\n", + "Epoch 29/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6588 - accuracy: 0.9239\n", + "Epoch 30/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6631 - accuracy: 0.9230\n", + "Epoch 31/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6819 - accuracy: 0.9193\n", + "Epoch 32/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6557 - accuracy: 0.9271\n", + "Epoch 33/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6806 - accuracy: 0.9224\n", + "Epoch 34/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6525 - accuracy: 0.9299\n", + "Epoch 35/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6500 - accuracy: 0.9303\n", + "Epoch 36/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6701 - accuracy: 0.9234\n", + "Epoch 37/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6627 - accuracy: 0.9297\n", + "Epoch 38/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6507 - accuracy: 0.9321\n", + "Epoch 39/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6497 - accuracy: 0.9323\n", + "Epoch 40/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6593 - accuracy: 0.9304\n", + "Epoch 41/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6528 - accuracy: 0.9325\n", + "Epoch 42/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6294 - accuracy: 0.9365\n", + "Epoch 43/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6596 - accuracy: 0.9304\n", + "Epoch 44/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6493 - accuracy: 0.9343\n", + "Epoch 45/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6440 - accuracy: 0.9351\n", + "Epoch 46/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6233 - accuracy: 0.9392\n", + "Epoch 47/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6631 - accuracy: 0.9301\n", + "Epoch 48/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6341 - accuracy: 0.9397\n", + "Epoch 49/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6440 - accuracy: 0.9351\n", + "Epoch 50/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6540 - accuracy: 0.9354\n", + "Epoch 51/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6371 - accuracy: 0.9407\n", + "Epoch 52/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6464 - accuracy: 0.9373\n", + "Epoch 53/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6489 - accuracy: 0.9371\n", + "Epoch 54/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6471 - accuracy: 0.9386\n", + "Epoch 55/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6342 - accuracy: 0.9414\n", + "Epoch 56/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6447 - accuracy: 0.9379\n", + "Epoch 57/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6291 - accuracy: 0.9431\n", + "Epoch 58/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6447 - accuracy: 0.9376\n", + "Epoch 59/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6493 - accuracy: 0.9401\n", + "Epoch 60/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6317 - accuracy: 0.9425\n", + "Epoch 61/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6179 - accuracy: 0.9450\n", + "Epoch 62/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6549 - accuracy: 0.9370\n", + "Epoch 63/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6333 - accuracy: 0.9449\n", + "Epoch 64/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6345 - accuracy: 0.9409\n", + "Epoch 65/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6320 - accuracy: 0.9440\n", + "Epoch 66/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6361 - accuracy: 0.9423\n", + "Epoch 67/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6285 - accuracy: 0.9444\n", + "Epoch 68/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6324 - accuracy: 0.9427\n", + "Epoch 69/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6427 - accuracy: 0.9397\n", + "Epoch 70/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6429 - accuracy: 0.9436\n", + "Epoch 71/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6226 - accuracy: 0.9465\n", + "Epoch 72/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6406 - accuracy: 0.9411\n", + "Epoch 73/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6197 - accuracy: 0.9470\n", + "Epoch 74/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6285 - accuracy: 0.9434\n", + "Epoch 75/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6307 - accuracy: 0.9447\n", + "Epoch 76/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6243 - accuracy: 0.9465\n", + "Epoch 77/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6274 - accuracy: 0.9468\n", + "Epoch 78/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6397 - accuracy: 0.9432\n", + "Epoch 79/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6282 - accuracy: 0.9468\n", + "Epoch 80/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6408 - accuracy: 0.9434\n", + "Epoch 81/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6368 - accuracy: 0.9468\n", + "Epoch 82/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6158 - accuracy: 0.9499\n", + "Epoch 83/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6100 - accuracy: 0.9478\n", + "Epoch 84/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6321 - accuracy: 0.9429\n", + "Epoch 85/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6260 - accuracy: 0.9477\n", + "Epoch 86/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6235 - accuracy: 0.9463\n", + "Epoch 87/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6186 - accuracy: 0.9493\n", + "Epoch 88/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6155 - accuracy: 0.9481\n", + "Epoch 89/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6702 - accuracy: 0.9374\n", + "Epoch 90/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6188 - accuracy: 0.9502\n", + "Epoch 91/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6410 - accuracy: 0.9439\n", + "Epoch 92/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6052 - accuracy: 0.9528\n", + "Epoch 93/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6349 - accuracy: 0.9431\n", + "Epoch 94/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6216 - accuracy: 0.9486\n", + "Epoch 95/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6128 - accuracy: 0.9497\n", + "Epoch 96/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6286 - accuracy: 0.9469\n", + "Epoch 97/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6095 - accuracy: 0.9515\n", + "Epoch 98/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6124 - accuracy: 0.9487\n", + "Epoch 99/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6267 - accuracy: 0.9482\n", + "Epoch 100/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6323 - accuracy: 0.9459\n", + "Epoch 101/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6116 - accuracy: 0.9507\n", + "Epoch 102/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6192 - accuracy: 0.9478\n", + "Epoch 103/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6229 - accuracy: 0.9482\n", + "Epoch 104/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6261 - accuracy: 0.9486\n", + "Epoch 105/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6140 - accuracy: 0.9521\n", + "Epoch 106/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6256 - accuracy: 0.9476\n", + "Epoch 107/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6118 - accuracy: 0.9525\n", + "Epoch 108/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6064 - accuracy: 0.9502\n", + "Epoch 109/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6161 - accuracy: 0.9487\n", + "Epoch 110/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6105 - accuracy: 0.9513\n", + "Epoch 111/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6302 - accuracy: 0.9468\n", + "Epoch 112/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6022 - accuracy: 0.9534\n", + "Epoch 113/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5993 - accuracy: 0.9518\n", + "Epoch 114/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6260 - accuracy: 0.9462\n", + "Epoch 115/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6026 - accuracy: 0.9538\n", + "Epoch 116/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6144 - accuracy: 0.9499\n", + "Epoch 117/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6054 - accuracy: 0.9516\n", + "Epoch 118/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6122 - accuracy: 0.9504\n", + "Epoch 119/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6187 - accuracy: 0.9506\n", + "Epoch 120/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6030 - accuracy: 0.9524\n", + "Epoch 121/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6078 - accuracy: 0.9513\n", + "Epoch 122/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6113 - accuracy: 0.9503\n", + "Epoch 123/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6080 - accuracy: 0.9525\n", + "Epoch 124/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5991 - accuracy: 0.9539\n", + "Epoch 125/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5985 - accuracy: 0.9529\n", + "Epoch 126/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6103 - accuracy: 0.9509\n", + "Epoch 127/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5947 - accuracy: 0.9557\n", + "Epoch 128/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5945 - accuracy: 0.9532\n", + "Epoch 129/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6059 - accuracy: 0.9520\n", + "Epoch 130/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6076 - accuracy: 0.9517\n", + "Epoch 131/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6134 - accuracy: 0.9520\n", + "Epoch 132/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5950 - accuracy: 0.9546\n", + "Epoch 133/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5881 - accuracy: 0.9557\n", + "Epoch 134/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6095 - accuracy: 0.9494\n", + "Epoch 135/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6116 - accuracy: 0.9537\n", + "Epoch 136/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5860 - accuracy: 0.9554\n", + "Epoch 137/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6058 - accuracy: 0.9519\n", + "Epoch 138/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6043 - accuracy: 0.9542\n", + "Epoch 139/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5921 - accuracy: 0.9556\n", + "Epoch 140/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5983 - accuracy: 0.9530\n", + "Epoch 141/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5987 - accuracy: 0.9537\n", + "Epoch 142/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5983 - accuracy: 0.9544\n", + "Epoch 143/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5734 - accuracy: 0.9576\n", + "Epoch 144/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5895 - accuracy: 0.9534\n", + "Epoch 145/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6068 - accuracy: 0.9519\n", + "Epoch 146/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5973 - accuracy: 0.9548\n", + "Epoch 147/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5786 - accuracy: 0.9566\n", + "Epoch 148/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5833 - accuracy: 0.9547\n", + "Epoch 149/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6056 - accuracy: 0.9511\n", + "Epoch 150/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6060 - accuracy: 0.9517\n", + "Epoch 151/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5907 - accuracy: 0.9567\n", + "Epoch 152/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5922 - accuracy: 0.9541\n", + "Epoch 153/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5961 - accuracy: 0.9527\n", + "Epoch 154/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5878 - accuracy: 0.9580\n", + "Epoch 155/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5790 - accuracy: 0.9580\n", + "Epoch 156/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5977 - accuracy: 0.9523\n", + "Epoch 157/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5968 - accuracy: 0.9540\n", + "Epoch 158/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5950 - accuracy: 0.9547\n", + "Epoch 159/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5907 - accuracy: 0.9554\n", + "Epoch 160/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5817 - accuracy: 0.9560\n", + "Epoch 161/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5962 - accuracy: 0.9536\n", + "Epoch 162/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5876 - accuracy: 0.9572\n", + "Epoch 163/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5818 - accuracy: 0.9558\n", + "Epoch 164/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5896 - accuracy: 0.9541\n", + "Epoch 165/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5915 - accuracy: 0.9552\n", + "Epoch 166/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5928 - accuracy: 0.9555\n", + "Epoch 167/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5773 - accuracy: 0.9576\n", + "Epoch 168/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5817 - accuracy: 0.9560\n", + "Epoch 169/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5817 - accuracy: 0.9563\n", + "Epoch 170/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5877 - accuracy: 0.9565\n", + "Epoch 171/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5893 - accuracy: 0.9554\n", + "Epoch 172/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5946 - accuracy: 0.9543\n", + "Epoch 173/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5841 - accuracy: 0.9571\n", + "Epoch 174/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5745 - accuracy: 0.9598\n", + "Epoch 175/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5715 - accuracy: 0.9580\n", + "Epoch 176/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5809 - accuracy: 0.9552\n", + "Epoch 177/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5845 - accuracy: 0.9557\n", + "Epoch 178/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5720 - accuracy: 0.9591\n", + "Epoch 179/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5901 - accuracy: 0.9541\n", + "Epoch 180/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5667 - accuracy: 0.9608\n", + "Epoch 181/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5857 - accuracy: 0.9552\n", + "Epoch 182/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5694 - accuracy: 0.9613\n", + "Epoch 183/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5732 - accuracy: 0.9574\n", + "Epoch 184/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5861 - accuracy: 0.9562\n", + "Epoch 185/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5737 - accuracy: 0.9580\n", + "Epoch 186/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5816 - accuracy: 0.9584\n", + "Epoch 187/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5566 - accuracy: 0.9602\n", + "Epoch 188/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5664 - accuracy: 0.9576\n", + "Epoch 189/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5911 - accuracy: 0.9535\n", + "Epoch 190/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5742 - accuracy: 0.9595\n", + "Epoch 191/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5748 - accuracy: 0.9559\n", + "Epoch 192/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5606 - accuracy: 0.9604\n", + "Epoch 193/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.6116 - accuracy: 0.9508\n", + "Epoch 194/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5868 - accuracy: 0.9591\n", + "Epoch 195/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5400 - accuracy: 0.9650\n", + "Epoch 196/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5624 - accuracy: 0.9574\n", + "Epoch 197/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5864 - accuracy: 0.9554\n", + "Epoch 198/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5773 - accuracy: 0.9585\n", + "Epoch 199/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5699 - accuracy: 0.9580\n", + "Epoch 200/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5703 - accuracy: 0.9595\n", + "Epoch 201/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5723 - accuracy: 0.9601\n", + "Epoch 202/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5641 - accuracy: 0.9591\n", + "Epoch 203/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5812 - accuracy: 0.9565\n", + "Epoch 204/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5653 - accuracy: 0.9612\n", + "Epoch 205/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5697 - accuracy: 0.9592\n", + "Epoch 206/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5726 - accuracy: 0.9591\n", + "Epoch 207/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5570 - accuracy: 0.9612\n", + "Epoch 208/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5598 - accuracy: 0.9599\n", + "Epoch 209/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5709 - accuracy: 0.9578\n", + "Epoch 210/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5836 - accuracy: 0.9563\n", + "Epoch 211/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5621 - accuracy: 0.9613\n", + "Epoch 212/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5722 - accuracy: 0.9582\n", + "Epoch 213/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5483 - accuracy: 0.9624\n", + "Epoch 214/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5708 - accuracy: 0.9563\n", + "Epoch 215/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5746 - accuracy: 0.9572\n", + "Epoch 216/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5840 - accuracy: 0.9584\n", + "Epoch 217/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5545 - accuracy: 0.9623\n", + "Epoch 218/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5402 - accuracy: 0.9628\n", + "Epoch 219/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5546 - accuracy: 0.9591\n", + "Epoch 220/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5762 - accuracy: 0.9552\n", + "Epoch 221/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5596 - accuracy: 0.9604\n", + "Epoch 222/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5548 - accuracy: 0.9610\n", + "Epoch 223/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5586 - accuracy: 0.9608\n", + "Epoch 224/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5504 - accuracy: 0.9612\n", + "Epoch 225/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5496 - accuracy: 0.9607\n", + "Epoch 226/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5763 - accuracy: 0.9562\n", + "Epoch 227/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5664 - accuracy: 0.9602\n", + "Epoch 228/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5404 - accuracy: 0.9648\n", + "Epoch 229/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5603 - accuracy: 0.9580\n", + "Epoch 230/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5574 - accuracy: 0.9610\n", + "Epoch 231/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5575 - accuracy: 0.9586\n", + "Epoch 232/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5660 - accuracy: 0.9585\n", + "Epoch 233/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5427 - accuracy: 0.9640\n", + "Epoch 234/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5468 - accuracy: 0.9611\n", + "Epoch 235/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5678 - accuracy: 0.9581\n", + "Epoch 236/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5472 - accuracy: 0.9622\n", + "Epoch 237/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5561 - accuracy: 0.9601\n", + "Epoch 238/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5471 - accuracy: 0.9621\n", + "Epoch 239/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5539 - accuracy: 0.9601\n", + "Epoch 240/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5492 - accuracy: 0.9619\n", + "Epoch 241/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5674 - accuracy: 0.9581\n", + "Epoch 242/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5451 - accuracy: 0.9618\n", + "Epoch 243/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5280 - accuracy: 0.9646\n", + "Epoch 244/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5628 - accuracy: 0.9579\n", + "Epoch 245/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5504 - accuracy: 0.9625\n", + "Epoch 246/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5284 - accuracy: 0.9647\n", + "Epoch 247/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5277 - accuracy: 0.9629\n", + "Epoch 248/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5490 - accuracy: 0.9599\n", + "Epoch 249/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5582 - accuracy: 0.9601\n", + "Epoch 250/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5701 - accuracy: 0.9587\n", + "Epoch 251/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5274 - accuracy: 0.9664\n", + "Epoch 252/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5343 - accuracy: 0.9618\n", + "Epoch 253/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5601 - accuracy: 0.9586\n", + "Epoch 254/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5544 - accuracy: 0.9608\n", + "Epoch 255/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5447 - accuracy: 0.9631\n", + "Epoch 256/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5355 - accuracy: 0.9634\n", + "Epoch 257/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5321 - accuracy: 0.9625\n", + "Epoch 258/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5554 - accuracy: 0.9593\n", + "Epoch 259/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5533 - accuracy: 0.9608\n", + "Epoch 260/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5511 - accuracy: 0.9618\n", + "Epoch 261/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5180 - accuracy: 0.9667\n", + "Epoch 262/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5495 - accuracy: 0.9582\n", + "Epoch 263/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5347 - accuracy: 0.9640\n", + "Epoch 264/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5289 - accuracy: 0.9639\n", + "Epoch 265/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5340 - accuracy: 0.9623\n", + "Epoch 266/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5463 - accuracy: 0.9604\n", + "Epoch 267/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5383 - accuracy: 0.9639\n", + "Epoch 268/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5421 - accuracy: 0.9614\n", + "Epoch 269/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5213 - accuracy: 0.9651\n", + "Epoch 270/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5470 - accuracy: 0.9599\n", + "Epoch 271/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5388 - accuracy: 0.9634\n", + "Epoch 272/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5384 - accuracy: 0.9630\n", + "Epoch 273/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5326 - accuracy: 0.9638\n", + "Epoch 274/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5442 - accuracy: 0.9609\n", + "Epoch 275/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5384 - accuracy: 0.9634\n", + "Epoch 276/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5302 - accuracy: 0.9627\n", + "Epoch 277/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5403 - accuracy: 0.9617\n", + "Epoch 278/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5325 - accuracy: 0.9647\n", + "Epoch 279/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5370 - accuracy: 0.9619\n", + "Epoch 280/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5357 - accuracy: 0.9640\n", + "Epoch 281/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5287 - accuracy: 0.9640\n", + "Epoch 282/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5315 - accuracy: 0.9613\n", + "Epoch 283/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5361 - accuracy: 0.9649\n", + "Epoch 284/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5382 - accuracy: 0.9614\n", + "Epoch 285/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5313 - accuracy: 0.9637\n", + "Epoch 286/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5414 - accuracy: 0.9618\n", + "Epoch 287/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5197 - accuracy: 0.9667\n", + "Epoch 288/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5287 - accuracy: 0.9613\n", + "Epoch 289/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5433 - accuracy: 0.9610\n", + "Epoch 290/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5371 - accuracy: 0.9637\n", + "Epoch 291/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5274 - accuracy: 0.9636\n", + "Epoch 292/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5302 - accuracy: 0.9638\n", + "Epoch 293/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5418 - accuracy: 0.9611\n", + "Epoch 294/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5264 - accuracy: 0.9648\n", + "Epoch 295/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5397 - accuracy: 0.9614\n", + "Epoch 296/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5217 - accuracy: 0.9652\n", + "Epoch 297/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5193 - accuracy: 0.9648\n", + "Epoch 298/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5296 - accuracy: 0.9643\n", + "Epoch 299/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5312 - accuracy: 0.9621\n", + "Epoch 300/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5321 - accuracy: 0.9632\n", + "Epoch 301/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5151 - accuracy: 0.9664\n", + "Epoch 302/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5239 - accuracy: 0.9634\n", + "Epoch 303/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5264 - accuracy: 0.9640\n", + "Epoch 304/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5168 - accuracy: 0.9652\n", + "Epoch 305/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5238 - accuracy: 0.9649\n", + "Epoch 306/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5178 - accuracy: 0.9635\n", + "Epoch 307/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5119 - accuracy: 0.9650\n", + "Epoch 308/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5237 - accuracy: 0.9634\n", + "Epoch 309/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5284 - accuracy: 0.9635\n", + "Epoch 310/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5121 - accuracy: 0.9660\n", + "Epoch 311/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5394 - accuracy: 0.9599\n", + "Epoch 312/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5048 - accuracy: 0.9697\n", + "Epoch 313/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5069 - accuracy: 0.9650\n", + "Epoch 314/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5089 - accuracy: 0.9657\n", + "Epoch 315/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5242 - accuracy: 0.9627\n", + "Epoch 316/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5258 - accuracy: 0.9638\n", + "Epoch 317/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5221 - accuracy: 0.9643\n", + "Epoch 318/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5045 - accuracy: 0.9666\n", + "Epoch 319/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5195 - accuracy: 0.9652\n", + "Epoch 320/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5005 - accuracy: 0.9680\n", + "Epoch 321/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5238 - accuracy: 0.9615\n", + "Epoch 322/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5321 - accuracy: 0.9618\n", + "Epoch 323/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5160 - accuracy: 0.9674\n", + "Epoch 324/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5245 - accuracy: 0.9628\n", + "Epoch 325/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5109 - accuracy: 0.9669\n", + "Epoch 326/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5138 - accuracy: 0.9656\n", + "Epoch 327/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.4976 - accuracy: 0.9667\n", + "Epoch 328/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5200 - accuracy: 0.9624\n", + "Epoch 329/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.4939 - accuracy: 0.9700\n", + "Epoch 330/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.4973 - accuracy: 0.9646\n", + "Epoch 331/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5258 - accuracy: 0.9619\n", + "Epoch 332/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5384 - accuracy: 0.9623\n", + "Epoch 333/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5265 - accuracy: 0.9655\n", + "Epoch 334/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5038 - accuracy: 0.9678\n", + "Epoch 335/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5162 - accuracy: 0.9643\n", + "Epoch 336/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5066 - accuracy: 0.9665\n", + "Epoch 337/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5063 - accuracy: 0.9660\n", + "Epoch 338/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5078 - accuracy: 0.9658\n", + "Epoch 339/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5310 - accuracy: 0.9632\n", + "Epoch 340/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.4861 - accuracy: 0.9703\n", + "Epoch 341/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5143 - accuracy: 0.9631\n", + "Epoch 342/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5199 - accuracy: 0.9637\n", + "Epoch 343/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.4992 - accuracy: 0.9685\n", + "Epoch 344/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5109 - accuracy: 0.9644\n", + "Epoch 345/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5066 - accuracy: 0.9657\n", + "Epoch 346/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5142 - accuracy: 0.9651\n", + "Epoch 347/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5092 - accuracy: 0.9649\n", + "Epoch 348/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5188 - accuracy: 0.9636\n", + "Epoch 349/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.5069 - accuracy: 0.9677\n", + "Epoch 350/350\n", + "391/391 [==============================] - 7s 18ms/step - loss: 0.4872 - accuracy: 0.9686\n" + ] + } + ], + "source": [ + "history = model.fit(x_train, to_categorical(y_train, num_classes), epochs=EPOCHS, batch_size=BATCH_SIZE)" + ] + }, + { + "cell_type": "code", + "source": [ + "loss = history.history['loss']\n", + "epochs = range(1, len(loss) + 1)\n", + "\n", + "plt.plot(epochs, loss, 'b', label='Training Loss')\n", + "plt.title('Training Loss')\n", + "plt.xlabel('Epochs')\n", + "plt.ylabel('Loss')\n", + "plt.legend()\n", + "plt.show()\n" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 472 + }, + "id": "6lrFuQrNRCyv", + "outputId": "3bc66200-18f3-483e-8c8c-3b44072fe7bb" + }, + "execution_count": 22, + "outputs": [ + { + "output_type": "display_data", + "data": { + "text/plain": [ + "
" + ], + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAioAAAHHCAYAAACRAnNyAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABEw0lEQVR4nO3deXhU1f3H8c+EkCGBLCyBhB0BWQUREEEBKwhEpYD4UykqoIUKqNBKq6gsai2i1lq1xbVQkUqVAlIEBVRcEBUUEBARlE22yJaFJUByfn+czoSBTDaSnEnyfj3PPJm5986dc++M3g/fc+69HmOMEQAAQAgKc90AAACAYAgqAAAgZBFUAABAyCKoAACAkEVQAQAAIYugAgAAQhZBBQAAhCyCCgAACFkEFQAAELIIKgDybejQoWrYsGGh3jt58mR5PJ6ibRCAMo+gApQBHo8nX4/ly5e7bqoTQ4cOVZUqVVw3A0AheLjXD1D6vf766wGvX3vtNS1dulQzZ84MmH711VerVq1ahf6cU6dOKSsrS16vt8DvPX36tE6fPq1KlSoV+vMLa+jQoZozZ47S09NL/LMBnJ9w1w0AcP5uueWWgNeff/65li5des70sx07dkxRUVH5/pyKFSsWqn2SFB4ervBw/pcDoGDo+gHKiSuvvFKtW7fWV199pW7duikqKkoPPPCAJOntt9/Wtddeq9q1a8vr9apx48Z69NFHlZmZGbCOs8eobN++XR6PR0899ZReeuklNW7cWF6vVx07dtSqVasC3pvTGBWPx6O77rpL8+fPV+vWreX1etWqVSu9++6757R/+fLl6tChgypVqqTGjRvrxRdfLPJxL2+99Zbat2+vyMhI1ahRQ7fccot2794dsMy+ffs0bNgw1a1bV16vV4mJierXr5+2b9/uX2b16tXq3bu3atSoocjISDVq1Ei33357kbUTKE/45w1Qjhw8eFBJSUm6+eabdcstt/i7gWbMmKEqVarod7/7napUqaIPPvhAEydOVGpqqp588sk81/uvf/1LaWlp+s1vfiOPx6MnnnhC119/vX788cc8qzCffvqp5s6dq1GjRik6OlrPPvusBg4cqJ07d6p69eqSpDVr1qhPnz5KTEzUww8/rMzMTD3yyCOKj48//53yPzNmzNCwYcPUsWNHTZkyRfv379df//pXrVixQmvWrFFcXJwkaeDAgdq4caPuvvtuNWzYUMnJyVq6dKl27tzpf92rVy/Fx8fr/vvvV1xcnLZv3665c+cWWVuBcsUAKHNGjx5tzv7Pu3v37kaSeeGFF85Z/tixY+dM+81vfmOioqLMiRMn/NOGDBliGjRo4H+9bds2I8lUr17dHDp0yD/97bffNpLMf//7X/+0SZMmndMmSSYiIsJs3brVP23dunVGknnuuef80/r27WuioqLM7t27/dO2bNliwsPDz1lnToYMGWIqV64cdP7JkydNzZo1TevWrc3x48f90xcuXGgkmYkTJxpjjDl8+LCRZJ588smg65o3b56RZFatWpVnuwDkja4foBzxer0aNmzYOdMjIyP9z9PS0nTgwAF17dpVx44d03fffZfnem+66SZVrVrV/7pr166SpB9//DHP9/bs2VONGzf2v27Tpo1iYmL8783MzNSyZcvUv39/1a5d279ckyZNlJSUlOf682P16tVKTk7WqFGjAgb7XnvttWrevLneeecdSXY/RUREaPny5Tp8+HCO6/JVXhYuXKhTp04VSfuA8oygApQjderUUURExDnTN27cqAEDBig2NlYxMTGKj4/3D8RNSUnJc73169cPeO0LLcEO5rm91/d+33uTk5N1/PhxNWnS5JzlcppWGDt27JAkNWvW7Jx5zZs398/3er2aOnWqFi9erFq1aqlbt2564okntG/fPv/y3bt318CBA/Xwww+rRo0a6tevn6ZPn66MjIwiaStQ3hBUgHLkzMqJz5EjR9S9e3etW7dOjzzyiP773/9q6dKlmjp1qiQpKysrz/VWqFAhx+kmH1c/OJ/3ujB27Fh9//33mjJliipVqqQJEyaoRYsWWrNmjSQ7QHjOnDlauXKl7rrrLu3evVu333672rdvz+nRQCEQVIBybvny5Tp48KBmzJihMWPG6LrrrlPPnj0DunJcqlmzpipVqqStW7eeMy+naYXRoEEDSdLmzZvPmbd582b/fJ/GjRvr3nvv1ZIlS7RhwwadPHlSf/7znwOWueyyy/TYY49p9erVmjVrljZu3KjZs2cXSXuB8oSgApRzvorGmRWMkydP6u9//7urJgWoUKGCevbsqfnz52vPnj3+6Vu3btXixYuL5DM6dOigmjVr6oUXXgjoolm8eLE2bdqka6+9VpK97syJEycC3tu4cWNFR0f733f48OFzqkEXX3yxJNH9AxQCpycD5VyXLl1UtWpVDRkyRPfcc488Ho9mzpwZUl0vkydP1pIlS3T55Zdr5MiRyszM1PPPP6/WrVtr7dq1+VrHqVOn9Mc//vGc6dWqVdOoUaM0depUDRs2TN27d9egQYP8pyc3bNhQv/3tbyVJ33//vXr06KEbb7xRLVu2VHh4uObNm6f9+/fr5ptvliT985//1N///ncNGDBAjRs3Vlpaml5++WXFxMTommuuKbJ9ApQXBBWgnKtevboWLlyoe++9Vw899JCqVq2qW265RT169FDv3r1dN0+S1L59ey1evFjjxo3ThAkTVK9ePT3yyCPatGlTvs5KkmyVaMKECedMb9y4sUaNGqWhQ4cqKipKjz/+uO677z5VrlxZAwYM0NSpU/1n8tSrV0+DBg3S+++/r5kzZyo8PFzNmzfXm2++qYEDB0qyg2m//PJLzZ49W/v371dsbKwuvfRSzZo1S40aNSqyfQKUF9zrB0Cp1b9/f23cuFFbtmxx3RQAxYQxKgBKhePHjwe83rJlixYtWqQrr7zSTYMAlAgqKgBKhcTERA0dOlQXXHCBduzYoWnTpikjI0Nr1qxR06ZNXTcPQDFhjAqAUqFPnz564403tG/fPnm9XnXu3Fl/+tOfCClAGUdFBQAAhCzGqAAAgJBFUAEAACHL6RiVyZMn6+GHHw6Y1qxZs3xfFyErK0t79uxRdHS0PB5PcTQRAAAUMWOM0tLSVLt2bYWF5V4zcT6YtlWrVlq2bJn/dXh4/pu0Z88e1atXrziaBQAAitmuXbtUt27dXJdxHlTCw8OVkJBQqPdGR0dLshsaExNTlM0CAADFJDU1VfXq1fMfx3PjPKhs2bJFtWvXVqVKldS5c2dNmTJF9evXz3HZjIyMgJt6paWlSZJiYmIIKgAAlDL5GbbhdDBtp06dNGPGDL377ruaNm2atm3bpq5du/oDyNmmTJmi2NhY/4NuHwAAyraQuo7KkSNH1KBBAz399NO64447zpl/dkXFVzpKSUmhogIAQCmRmpqq2NjYfB2/nXf9nCkuLk4XXnihtm7dmuN8r9crr9dbwq0CAACuhFRQSU9P1w8//KBbb73VdVMAALnIysrSyZMnXTcDIapixYqqUKFCkazLaVAZN26c+vbtqwYNGmjPnj2aNGmSKlSooEGDBrlsFgAgFydPntS2bduUlZXluikIYXFxcUpISDjv65w5DSo//fSTBg0apIMHDyo+Pl5XXHGFPv/8c8XHx7tsFgAgCGOM9u7dqwoVKqhevXp5XqwL5Y8xRseOHVNycrIke+fz8+E0qMyePdvlxwMACuj06dM6duyYateuraioKNfNQYiKjIyUJCUnJ6tmzZrn1Q1EFAYA5FtmZqYkKSIiwnFLEOp8QfbUqVPntR6CCgCgwLi/GvJSVL8RggoAAAhZBBUAAAqhYcOGeuaZZ/K9/PLly+XxeHTkyJFia1NZRFABAJRpHo8n18fkyZMLtd5Vq1ZpxIgR+V6+S5cu2rt3r2JjYwv1eflV1gJRSF3wLVQcPSodOCB5vVIhb+wMAAgRe/fu9T//97//rYkTJ2rz5s3+aVWqVPE/N8YoMzNT4eF5Hx4LeimNiIgIJXBQKTAqKjlYsEBq2FC65RbXLQEAnK+EhAT/IzY2Vh6Px//6u+++U3R0tBYvXqz27dvL6/Xq008/1Q8//KB+/fqpVq1aqlKlijp27Khly5YFrPfsrh+Px6NXXnlFAwYMUFRUlJo2baoFCxb4559d6ZgxY4bi4uL03nvvqUWLFqpSpYr69OkTEKxOnz6te+65R3Fxcapevbruu+8+DRkyRP379y/0/jh8+LBuu+02Va1aVVFRUUpKStKWLVv883fs2KG+ffuqatWqqly5slq1aqVFixb53zt48GDFx8crMjJSTZs21fTp0wvdlvwgqOTAd/2i/52FBwAIwhhbhXbxKMpb6t5///16/PHHtWnTJrVp00bp6em65ppr9P7772vNmjXq06eP+vbtq507d+a6nocfflg33nijvvnmG11zzTUaPHiwDh06FHT5Y8eO6amnntLMmTP18ccfa+fOnRo3bpx//tSpUzVr1ixNnz5dK1asUGpqqubPn39e2zp06FCtXr1aCxYs0MqVK2WM0TXXXOM/jXj06NHKyMjQxx9/rPXr12vq1Kn+qtOECRP07bffavHixdq0aZOmTZumGjVqnFd78mRKsZSUFCPJpKSkFOl633rLGMmYbt2KdLUAUOodP37cfPvtt+b48ePGGGPS0+3/L1080tML3v7p06eb2NhY/+sPP/zQSDLz58/P872tWrUyzz33nP91gwYNzF/+8hf/a0nmoYce8r9OT083kszixYsDPuvw4cP+tkgyW7du9b/nb3/7m6lVq5b/da1atcyTTz7pf3369GlTv359069fv6DtPPtzzvT9998bSWbFihX+aQcOHDCRkZHmzTffNMYYc9FFF5nJkyfnuO6+ffuaYcOGBf3sM539WzlTQY7fVFRy4KuocBsLACgfOnToEPA6PT1d48aNU4sWLRQXF6cqVapo06ZNeVZU2rRp439euXJlxcTE+C8ln5OoqCg1btzY/zoxMdG/fEpKivbv369LL73UP79ChQpq3759gbbtTJs2bVJ4eLg6derkn1a9enU1a9ZMmzZtkiTdc889+uMf/6jLL79ckyZN0jfffONfduTIkZo9e7Yuvvhi/eEPf9Bnn31W6LbkF0ElB3T9AED+REVJ6eluHkV5Bf/KlSsHvB43bpzmzZunP/3pT/rkk0+0du1aXXTRRXneMbpixYoBrz0eT643b8xpeVOUfVqF8Otf/1o//vijbr31Vq1fv14dOnTQc889J0lKSkrSjh079Nvf/lZ79uxRjx49ArqqigNBJQe+WxJQUQGA3Hk8UuXKbh7FeXHcFStWaOjQoRowYIAuuugiJSQkaPv27cX3gTmIjY1VrVq1tGrVKv+0zMxMff3114VeZ4sWLXT69Gl98cUX/mkHDx7U5s2b1bJlS/+0evXq6c4779TcuXN177336uWXX/bPi4+P15AhQ/T666/rmWee0UsvvVTo9uQHpyfngK4fACjfmjZtqrlz56pv377yeDyaMGFCrpWR4nL33XdrypQpatKkiZo3b67nnntOhw8fztfl6devX6/o6Gj/a4/Ho7Zt26pfv34aPny4XnzxRUVHR+v+++9XnTp11K9fP0nS2LFjlZSUpAsvvFCHDx/Whx9+qBYtWkiSJk6cqPbt26tVq1bKyMjQwoUL/fOKC0ElB3T9AED59vTTT+v2229Xly5dVKNGDd13331KTU0t8Xbcd9992rdvn2677TZVqFBBI0aMUO/evfN1N+Ju3boFvK5QoYJOnz6t6dOna8yYMbruuut08uRJdevWTYsWLfJ3Q2VmZmr06NH66aefFBMToz59+ugvf/mLJHstmPHjx2v79u2KjIxU165dNXv27KLf8DN4jOvOsPOQmpqq2NhYpaSkKCYmpsjWu2SJ1Lu3dPHF0po1RbZaACj1Tpw4oW3btqlRo0aqVKmS6+aUO1lZWWrRooVuvPFGPfroo66bk6vcfisFOX5TUckBFRUAQCjYsWOHlixZou7duysjI0PPP/+8tm3bpl/96leum1ZiGEybAwbTAgBCQVhYmGbMmKGOHTvq8ssv1/r167Vs2bJiHxcSSqio5IDBtACAUFCvXj2tWLHCdTOcoqKSA7p+AAAIDQSVHND1AwC5K8XnYaCEFNVvhKCSA7p+ACBnvtNi87pCK3Ds2DFJ5159t6AYo5IDun4AIGfh4eGKiorSzz//rIoVKyosjH/vIpAxRseOHVNycrLi4uLydc2X3BBUckDXDwDkzOPxKDExUdu2bdOOHTtcNwchLC4uTgkJCee9HoJKDqioAEBwERERatq0Kd0/CKpixYrnXUnxIajkgIoKAOQuLCyMK9OiRNC5mAMG0wIAEBoIKjmg6wcAgNBAUMkBXT8AAIQGgkoO6PoBACA0EFRyQNcPAAChgaCSA7p+AAAIDQSVHFBRAQAgNBBUckBFBQCA0EBQyQGDaQEACA0ElRzQ9QMAQGggqOTgzNsTGOOuHQAAlHcElRyceddyun8AAHCHoJKDM4MK3T8AALhDUMnBmV0/VFQAAHCHoJIDKioAAIQGgkoOqKgAABAaCCo5YDAtAAChgaCSA7p+AAAIDQSVHND1AwBAaCCo5MDjyX5OUAEAwB2CShBcRh8AAPcIKkFwB2UAANwjqARBRQUAAPcIKkFQUQEAwD2CShC+igpBBQAAdwgqQdD1AwCAewSVIOj6AQDAPYJKEHT9AADgHkElCLp+AABwj6ASBF0/AAC4R1AJgooKAADuEVSCoKICAIB7BJUgGEwLAIB7BJUg6PoBAMA9gkoQdP0AAOAeQSUIun4AAHCPoBIEXT8AALhHUAmCrh8AANwjqARBRQUAAPcIKkFQUQEAwD2CShAMpgUAwL2QCSqPP/64PB6Pxo4d67opkuj6AQAgFIREUFm1apVefPFFtWnTxnVT/Oj6AQDAPedBJT09XYMHD9bLL7+sqlWrum6OH10/AAC45zyojB49Wtdee6169uzpuikB6PoBAMC9cJcfPnv2bH399ddatWpVvpbPyMhQRkaG/3VqampxNY2uHwAAQoCzisquXbs0ZswYzZo1S5UqVcrXe6ZMmaLY2Fj/o169esXWPioqAAC45yyofPXVV0pOTtYll1yi8PBwhYeH66OPPtKzzz6r8PBwZeaQEMaPH6+UlBT/Y9euXcXWPioqAAC456zrp0ePHlq/fn3AtGHDhql58+a67777VMGXFM7g9Xrl9XpLpH0MpgUAwD1nQSU6OlqtW7cOmFa5cmVVr179nOku0PUDAIB7zs/6CVV0/QAA4J7Ts37Otnz5ctdN8KPrBwAA96ioBEHXDwAA7hFUgqDrBwAA9wgqQVBRAQDAPYJKEFRUAABwj6ASBINpAQBwj6ASBF0/AAC4R1AJgq4fAADcI6gEQdcPAADuEVSCoOsHAAD3CCpB0PUDAIB7BJUgqKgAAOAeQSUIKioAALhHUAmCwbQAALhHUAmCrh8AANwjqARB1w8AAO4RVIKg6wcAAPcIKkHQ9QMAgHsElSDo+gEAwD2CShBUVAAAcI+gEgQVFQAA3COoBMFgWgAA3COoBEHXDwAA7hFUgqDrBwAA9wgqQdD1AwCAewSVIOj6AQDAPYJKEHT9AADgHkElCCoqAAC4R1AJgooKAADuEVSCYDAtAADuEVSCoOsHAAD3CCpB0PUDAIB7BJUg6PoBAMA9gkoQdP0AAOAeQSUIun4AAHCPoBIEFRUAANwjqARBRQUAAPcIKkEwmBYAAPcIKkHQ9QMAgHsElSDo+gEAwD2CShB0/QAA4B5BJQi6fgAAcI+gEgRdPwAAuEdQCYKKCgAA7hFUgqCiAgCAewSVIBhMCwCAewSVIOj6AQDAPYJKEHT9AADgHkElCLp+AABwj6ASBF0/AAC4R1AJgq4fAADcI6gEQUUFAAD3CCpBUFEBAMA9gkoQDKYFAMA9gkoQdP0AAOAeQSUIun4AAHCPoBIEXT8AALhHUAmCrh8AANwjqARB1w8AAO4RVIKgogIAgHsElSCoqAAA4B5BJQgG0wIA4B5BJQi6fgAAcI+gEgRdPwAAuEdQCYKuHwAA3COoBEHXDwAA7hFUgqDrBwAA9wgqQYSdsWcIKwAAuOE0qEybNk1t2rRRTEyMYmJi1LlzZy1evNhlk/x8FRWJoAIAgCtOg0rdunX1+OOP66uvvtLq1at11VVXqV+/ftq4caPLZkmiogIAQCjwGGOM60acqVq1anryySd1xx135LlsamqqYmNjlZKSopiYmCJtR3q6FB1tnx89KkVFFenqAQAotwpy/A4voTblKTMzU2+99ZaOHj2qzp0757hMRkaGMjIy/K9TU1OLrT0VK2Y/P3Wq2D4GAADkwvlg2vXr16tKlSryer268847NW/ePLVs2TLHZadMmaLY2Fj/o169esXWLoIKAADuOe/6OXnypHbu3KmUlBTNmTNHr7zyij766KMcw0pOFZV69eoVS9ePZAfUZmVJe/dKCQlFvnoAAMqlgnT9OA8qZ+vZs6caN26sF198Mc9li3OMiiR5vdLJk9LOnVIxFm8AAChXCnL8dt71c7asrKyAqolLvu4fun4AAHDD6WDa8ePHKykpSfXr11daWpr+9a9/afny5XrvvfdcNsuPoAIAgFtOg0pycrJuu+027d27V7GxsWrTpo3ee+89XX311S6b5UdQAQDALadB5dVXX3X58XkiqAAA4FbIjVEJJeH/i3GnT7ttBwAA5RVBJRdUVAAAcIugkguCCgAAbhFUckFQAQDALYJKLggqAAC4RVDJBYNpAQBwi6CSCyoqAAC4RVDJBUEFAAC3ChVUdu3apZ9++sn/+ssvv9TYsWP10ksvFVnDQgFBBQAAtwoVVH71q1/pww8/lCTt27dPV199tb788ks9+OCDeuSRR4q0gS4RVAAAcKtQQWXDhg269NJLJUlvvvmmWrdurc8++0yzZs3SjBkzirJ9TjGYFgAAtwoVVE6dOiWv1ytJWrZsmX75y19Kkpo3b669e/cWXesco6ICAIBbhQoqrVq10gsvvKBPPvlES5cuVZ8+fSRJe/bsUfXq1Yu0gS4RVAAAcKtQQWXq1Kl68cUXdeWVV2rQoEFq27atJGnBggX+LqGygKACAIBb4YV505VXXqkDBw4oNTVVVatW9U8fMWKEoqKiiqxxrhFUAABwq1AVlePHjysjI8MfUnbs2KFnnnlGmzdvVs2aNYu0gS4RVAAAcKtQQaVfv3567bXXJElHjhxRp06d9Oc//1n9+/fXtGnTirSBLnHWDwAAbhUqqHz99dfq2rWrJGnOnDmqVauWduzYoddee03PPvtskTbQJSoqAAC4VaigcuzYMUVHR0uSlixZouuvv15hYWG67LLLtGPHjiJtoEsEFQAA3CpUUGnSpInmz5+vXbt26b333lOvXr0kScnJyYqJiSnSBrpEUAEAwK1CBZWJEydq3LhxatiwoS699FJ17txZkq2utGvXrkgb6BJBBQAAtwp1evINN9ygK664Qnv37vVfQ0WSevTooQEDBhRZ41xjMC0AAG4VKqhIUkJCghISEvx3Ua5bt26ZutibREUFAADXCtX1k5WVpUceeUSxsbFq0KCBGjRooLi4OD366KPKysoq6jY6Q1ABAMCtQlVUHnzwQb366qt6/PHHdfnll0uSPv30U02ePFknTpzQY489VqSNdIWgAgCAW4UKKv/85z/1yiuv+O+aLElt2rRRnTp1NGrUKIIKAAAoEoXq+jl06JCaN29+zvTmzZvr0KFD592oUMFgWgAA3CpUUGnbtq2ef/75c6Y///zzatOmzXk3KlRQUQEAwK1Cdf088cQTuvbaa7Vs2TL/NVRWrlypXbt2adGiRUXaQJcIKgAAuFWoikr37t31/fffa8CAATpy5IiOHDmi66+/Xhs3btTMmTOLuo3OEFQAAHDLY4wxRbWydevW6ZJLLlFmZmZRrTJXqampio2NVUpKSrFcun/+fGnAAKlzZ+mzz4p89QAAlEsFOX4XqqJSXlBRAQDALYJKLjjrBwAAtwgquaCiAgCAWwU66+f666/Pdf6RI0fOpy0hh6ACAIBbBQoqsbGxec6/7bbbzqtBoYSgAgCAWwUKKtOnTy+udoQkggoAAG4xRiUXDKYFAMAtgkouqKgAAOAWQSUXBBUAANwiqOSCoAIAgFsElVwQVAAAcIugkoszB9MW3R2RAABAfhFUcuGrqEhSCd1nEQAAnIGgkoszgwrdPwAAlDyCSi4IKgAAuEVQyQVBBQAAtwgquahQQfJ47HOCCgAAJY+gkgcuow8AgDsElTxwLRUAANwhqOSBoAIAgDsElTwQVAAAcIegkgeCCgAA7hBU8sBgWgAA3CGo5IGKCgAA7hBU8kBQAQDAHYJKHggqAAC4Q1DJA0EFAAB3CCp5YDAtAADuEFTy4KuonDzpth0AAJRHBJU8eL32b0aG23YAAFAeEVTyEBlp/5444bYdAACURwSVPPiCyvHjbtsBAEB5RFDJQ6VK9i9BBQCAkkdQyQNdPwAAuENQyQNdPwAAuOM0qEyZMkUdO3ZUdHS0atasqf79+2vz5s0um3QOX9cPFRUAAEqe06Dy0UcfafTo0fr888+1dOlSnTp1Sr169dLRo0ddNisAFRUAANwJd/nh7777bsDrGTNmqGbNmvrqq6/UrVs3R60KRFABAMCdkBqjkpKSIkmqVq2a45Zko+sHAAB3nFZUzpSVlaWxY8fq8ssvV+vWrXNcJiMjQxlnXCI2NTW12NtFRQUAAHdCpqIyevRobdiwQbNnzw66zJQpUxQbG+t/1KtXr9jbRVABAMCdkAgqd911lxYuXKgPP/xQdevWDbrc+PHjlZKS4n/s2rWr2NtG1w8AAO447foxxujuu+/WvHnztHz5cjVq1CjX5b1er7y+uwSWECoqAAC44zSojB49Wv/617/09ttvKzo6Wvv27ZMkxcbGKtKXEBwjqAAA4I7Trp9p06YpJSVFV155pRITE/2Pf//73y6bFYCuHwAA3HHe9RPqqKgAAOBOSAymDWUEFQAA3CGo5IGuHwAA3CGo5OHMikop6KkCAKBMIajkwRdUsrKk06fdtgUAgPKGoJIHX9ePxDgVAABKGkElD16v5PHY5wQVAABKFkElDx4PA2oBAHCFoJIPvqBCRQUAgJJFUMkHrqUCAIAbBJV88AUVun4AAChZBJV8oOsHAAA3CCr5QNcPAABuEFTyga4fAADcIKjkA10/AAC4QVDJB7p+AABwg6CSD3T9AADgBkElH+j6AQDADYJKPlBRAQDADYJKPjBGBQAANwgq+UDXDwAAbhBU8oGuHwAA3CCo5EPlyvZvWprbdgAAUN4QVPIhNtb+TUlx2w4AAMobgko+EFQAAHCDoJIPcXH2L0EFAICSRVDJByoqAAC4QVDJB19QOXLEaTMAACh3CCr54AsqaWlSVpbbtgAAUJ4QVPLBF1SM4RRlAABKEkElHypVkrxe+5xxKgAAlByCSj4xTgUAgJJHUMknzvwBAKDkEVTyiaACAEDJI6jkE0EFAICSR1DJJ65OCwBAySOo5BODaQEAKHkElXyi6wcAgJJHUMknggoAACWPoJJPBBUAAEoeQSWffINpGaMCAEDJIajkExUVAABKHkElnwgqAACUPIJKPnF6MgAAJY+gkk/x8fbvzz9LxrhtCwAA5QVBJZ9q1bJ/T52SDh922xYAAMoLgko+eb3ZZ/7s3++0KQAAlBsElQJISLB/9+1z2w4AAMoLgkoB+IIKFRUAAEoGQaUAfONUqKgAAFAyCCoFQNcPAAAli6BSAHT9AABQsggqBUDXDwAAJYugUgBUVAAAKFkElQJgjAoAACWLoFIAvq6f5GQpK8ttWwAAKA8IKgUQHy95PFJmpnTwoOvWAABQ9hFUCqBiRalGDft87163bQEAoDwgqBRQgwb2748/um0HAADlAUGlgJo2tX+3bHHbDgAAygOCSgERVAAAKDkElQIiqAAAUHIIKgVEUAEAoOQQVArIF1R275aOHnXbFgAAyjqCSgFVq2YfkrR1q9u2AABQ1hFUCoHuHwAASgZBpRAuvND+3bzZbTsAACjrCCqF0Lat/bt6tdt2AABQ1jkNKh9//LH69u2r2rVry+PxaP78+S6bk2+dOtm/X3whGeO2LQAAlGVOg8rRo0fVtm1b/e1vf3PZjAK75BKpQgV7v5/du123BgCAsivc5YcnJSUpKSnJZRMKJSpKuugiae1aW1WpW9d1iwAAKJtK1RiVjIwMpaamBjxcObP7BwAAFI9SFVSmTJmi2NhY/6NevXrO2nLppfbvp586awIAAGVeqQoq48ePV0pKiv+xa9cuZ23p1UvyeKSVK6Xt2501AwCAMq1UBRWv16uYmJiAhyt160q/+IV9/q9/OWsGAABlWqkKKqHmllvs35kzOU0ZAIDi4DSopKena+3atVq7dq0kadu2bVq7dq127tzpsln5NnCgVKmS9N130tdfu24NAABlj9Ogsnr1arVr107t2rWTJP3ud79Tu3btNHHiRJfNyreYGKl/f/t85kynTQEAoEzyGFN6Oy1SU1MVGxurlJQUZ+NV3nlHuu46qWZNe/G3cKdXpgEAIPQV5PjNGJXz1KuXFB8vJSdLS5e6bg0AAGULQeU8Vawo3XyzfU73DwAARYugUgRuvdX+nT9fSktz2hQAAMoUgkoR6NBBatZMOn5cmjvXdWsAACg7CCpFwOPJrqo88ojk8BZEAACUKQSVInLXXVLDhtKPP0qjR7tuDQAAZQNBpYjExkqzZklhYdLrr9sHAAA4PwSVItSlizRpkn0+apS0Y4fb9gAAUNoRVIrYAw9Il19uz/4ZPZp7AAEAcD4IKkUsPFx6+WUpIsJetfaZZwgrAAAUFkGlGLRoIU2YYJ//7ne2Gygry22bAAAojQgqxeTBB6UnnrCDa194QbrjDunIEdetAgCgdCGoFBOPR/r976XXXrNhZcYM6YILpA8+cN0yAABKD4JKMRs8WFq4UGrZUjp8WPrlL6Vnn5V++sl1ywAACH0ElRKQlCR9/bW90/LRo9KYMVKrVtKqVa5bBgBAaAt33YDywuuV5s2TnnvOXhhu/Xrp6qul22+3l9yvWVO6/np73yBjbNcRAADlnceY0nvybGpqqmJjY5WSkqKYmBjXzcm3tDSpTx/ps88Cp3s8dtDtvHlSx47SxIm2u+iFF6Rx46Ru3ezZQ5s3S82bS+npUmamFBfnZDMAACiUghy/CSqOnDolzZ8vvfuuVKeOtG6dtGBB8OXr1ZM2bpRuucUud/fd0ttv2zs2r1snJSZKX31lB+62a1dimxHS0tNtGLz6aipUABBKCCqlkDH2zssvvGCvu7Jli62sHD0qRUfb7qFGjaRt28597623SjfcIPXvb9fTtav00kvShRfaUJOcLA0YIMXHS3Pm2JB000021EjSn/4kbdggvfKKFBVl52dl2e6qwmzHrl02WLkOB7/+tfTqq9K0adKdd7ptCwAgG0GljMjIkE6ftpWTwYPttLAwqU0bae1a+9wY+6hY0QYMn0qVpFq1su83VKmSPV360Uft6/bt7fiYX/zCDuw1xnY11aljbwMQGWlPqV6+3FZyKlSQLr7YDgT+4AOpbVsbZF5+WfriC2nEiOwzmsaMsV1VTz6Z83ZlZUmrV9uKx2WXZYejihWD7wvfuJ3Tp+3r8DxGV506ZYNZSop0xRXSsmV2G/J6HwCg+BFUyhhjpOeft908fftKVavasSzXXmtPc54yxS7Xo4etigwfbg/MklSliq3ErF+f87orV7ZVm/yKipKOHbNtiIqSdu+208PDpdmzpbvukvbts9NuvVWqVk2KiZG+/dYOFG7RQnr8cenzz+0yDRva7Zgxww40vuEGW905ccLeMyk5WRo2zIalm2+2laKwMLvcCy/YWxV8/LFd/qqrsoPIBx/Y/eHj9dr9sGSJ9Pe/SwMH2vZINszExmYv++OP0kcfSdddZ8NOQXz3nR0YXa1awd4HAOVJgY7fphRLSUkxkkxKSorrpji1a5cxK1YYc/y4fZ2ZaczXXxuzdKkxycl2eps2tvZSpYoxa9caM3WqMZGRvnqMMbVq2b8xMXbeRRfZ1926GfPss8b8+c/GxMdnL+97NGtmzLXXBk4LCzt3ubMflSsbU7Vq4LSKFY2Jjs5+HRNjTIUKwdcxcqQxCxcGLn/ZZcaMH29MixY5v6dmTfs3IsKYv/zFmFtuMcbjMWbKFGNOnDBm6NDsZVu3NubDD4154AFjrr7amDp1jBk0yJh77zWmZUtjFiyw+/vkSWP27zfm3/+262rRwq5rwwZjLrzQmD/+0S53+rT9m5VlzOrVdt0AUB4V5PhNRaWc+O47W4UZPlwaOtRO+/e/bZWiaVNblZg7154inZBgqzdbtkgXXZQ91mT3bjsAuHdvafJkO27m1VftWUfDhtnTriU75iUiwp6xlJEhHTggNW4s/fe/trvnyitt91JamtSpk11Py5a26iJJtWvbLp7kZPu6Uyd7xtObb0r33GPHv9x0k40TFSrYM5+8XvtZZ+vS5dyzq4Jp0MB2lYWF2a6vvCpNHo/0f/9n1//TT/a177+mP/7Rjg/68svsbfjmG9uFt2qVHQAtSffeK9WoYSs4J0/a5UaNsoOj//pXu08ef9wus2aN9OGHdtD00qW2u+3RR+24pQkT7Nlg/frZgdbvvGOrXt262apaTjIz7W+gZUvbreezfr1td9269to/rscaFbUjR+z3kJRkK4AASh4VFeTb118bs3v3+a8nK8uY55+3VY709Py/b/t2Ww1KSzNm0iRblcjMNObUKWNWrTLmp5/sus/217/aqohkTMeO9jPXrTPmtdeMGTzYVjK6d7cVpVGjjPnvf7MrOKNGGTNtmq0iRUUZM2BAdhUlNtaYJUtsm7xeW+UZNMguv2CBraTUqWPMTTflXLFp0iTvapKvohNsntdrTO3a2a9btDBm2DBbrTl72fvuM6Z588BpjRtnP2/QwJjhw41p186Ytm3tY+JEW9351a+ylxsxwu7z994L/Jzhw4156y1bpXr4YVs5ysiwn9url13H735nzBtvGHP4sDFffmnMXXcZ89ln9ns6fdqYTZuMef99u8yPP577XR47Zsznn9vvdN263H8vhw/n/HvIzQ8/2N+Uz9NPZ2/fihUFWxeAolGQ4zdBBaXW4cM2gBw5kr/l58835vbbjTl0yL7OyDAmJcUexObONeadd7LnGWNDUnJy4DqysrK7cFautAfwZ54xZs8e29V24oQNSr4D4TPPGNOnjzE9exrzz38aM2SInXbwoDEvvmhDxYAB9iD9yivGXHFF9nujo41JSAgMIddcY8z999vHmdPr1DHm5psDA9eZYefsR5cu9u+ZXWsDBmR377Vtm3MwiooyplOnnNcZF2dMpUrZrzt3NuaCCwKXiYy02//QQ8aMHWsD5dndhnffbb/TEyeMefllY2691Zjf/taYp54yJjzcbuezzxqTlGTM99/bkDtihO2e++QTG5g3bbLf0VNP2fX26GHM0aN2Wteu2Z/XtKntuktPN+aXvzTmuuvsOs524oQxM2bY0BPMrl3GjBljfwdny8qyv7877jBm69bAeRkZwdfp+2ygrCGoAI4dPpz7QS2YrCw7tuWjj4z5+Wdjdu60B/UxY4x5993AZf/xDztGKDbWhqbUVPs6MtK+PyXFVjeGDbNB7L337AHdd5AODzfmP/+xjzPHFbVta6sc//mPMR062GAybJitXPmWqVDBjmV66ikbLM4MHK1bBwagqChb9Tk7lJz5iImxwebMkJaYmHdlqlYtYxo2PHd67do2cJxZuQoPt2O0zl524UJbOTpzWrdudvxSz57G3HCDMb172+lVq9rxWsOG2fbOmWO/izlzsit2lSrZitPAgbZiOH68DUS+dV9xRXZV6D//seO1br3VVrTONmGCDYxDhtjfw9l27TJm48bs16tXZ1ezcvptLV0aWEHNrTq1cqWtcvK/VxQHggpQjvgqPMbYgHHgQPBljx+33UAREfYg6TNzpjG/+IUNRWdXkXyysmz3V//+ge/1tWH2bGP+9jf7fPdu+/yVV7K7AjMybCBo1cqYX//aVlRmzrTVJd8Bc+lSG7Z8B/WEBGMefDC7O6tt2+x5Zw7GTkzMrpSc3a122WXnDtzu0MGGP19VRbKB4P/+zwaavAKS7+HxZFenfKHszPlnfq7Xax+S7U7btCkwOF1xhR2ofcMNxkyfbsPmmQHy0kttOGzTxla/+vWzbQ0LsyH0gw9sQPR4jHnpJWMee8x2xW3YYMyiRcb84Q/Z+3T/fhtoqla1gergwezvMiUlsGI3dqyd/sILdlsfftiYvXsDv/tXXrEVwiVLbCXS93sBgiGoAAgqIyPwwBRqTp2yY6feecd26xhjw84XX9iD4sMPG/Pkk7bC8MQTtppx5Ig9MO7YYcynn2Z3W3XsaMy+fbbbZ+tWe3CX7EH1yy8DQ8X999vP2r7dVk2efNJ29/z613ac0Ny5dkzOVVfZM8Fuuy0wsNx/vz2At2xpg1Plytnzn3nGditOmpRdMfKdade6tR0LFSwM9eplq2a5Bab4eGOqVct/wOrZM7AL7MIL7T7csuXcSlb16nZfnRngqlSxQejYMTvm6ezwNnu2DVTdutluUd93OHas7bp7+20bfHbsMObvf7ftmTXLmG++sd/VZ58Zc889trssI8OG748+st//yZP2cbaUFPtdZ2UZs3x5dqUpI8Pu/1mzAsfPvf++HRuVmWk/98xxTOcrLS3n8VjFITMz+D8uQhlBBUC5tmSJrfqcffDJyrLdaVlZ9uHrjurYMeeDX258FaZXXzXm22+zp586ZefNmGEP2qNHZ887etSYK6/MPqi3aWMPrps32wP/yJG2glSnjp1fr54NPy++mP2e3/zGmOees6fXf/BB4Kn4l1ySvf7Wre3nh4cb06iRfX733edWfXyhLikp+7IEjRvb/edrx5ldYmd2AdaokV31ufJKO3hbCuz6q13bdkHmNNC8bt1zL2cQFRU4rX797MpU3brZ46Dq17cVqKFD7XPf8o0a2b+RkfYSAFddFRi6HnvMVs4k+znt29vn111nfwNbtxrz6KO2zYMG2e7NzEwbPtauDRzHdqYdO2xA27vX7qMKFc7thjtwwFa4UlPPfb+vKjl8uB17depU3r/JnTuzu0xHjsweh3W2776z3Yg5fa4rBBUAyIdFi2w3SnH96/fAgXO7QHxnTY0Ykfv4j5Mns4NWZqYx48bZ7puzw9cPP9hqzuuv2wPVyZO2MpKVZUOQb2yL7zpLCxdmV3CGD7fVkjMrOrVqZY9jmTAh8CC/Z4/9/Ndft6HBN+/WW+3yycmBlSTfdYt8j8REO9i8cWMbcnzT27e3XXBnVoWuvjq76nR2+Cnoo0qV7ACT26N58+zuuTMfHTpkf36VKvaMwshIG4JeeSU7+Pjm+5736ZP9Xf7619nTw8Jsde/zz+30lSttVenMClmNGjYoLlliA09Ghu3SGz7cVoZSU22QPbOdffoYs369rR75BmEfOZL9nYwfb0Px739vvzNfJSYrywaxFSts0PrTn2ygK86B3FxHBQAQ1HvvSa+/Lj31lL3Vxrvv2ntinTolTZpkr+cjSYcO2WsXNWhgr+9Tp072OjIy7K0wfvrJ3mfMd2+w8ePttX+6dLFXgp47V1q50t62Y+DA7Lu9r1tnb29RubK9dk98vD3crl5trzTdtau9ntPMmfaK0wMH2us9NW5sr/68dKl91K0rde5sr2SdliYtXGhv8fF//2fb1q6d9I9/SK1b2ytbv/OOVL++vYXI++/bx3XXSX/4g72WkWSv9dSli72e01NP2dt+SPaaROnpOe/TsDB7X7aUlMDpffrYa1L98IN9HRt77jJhYfYzKle215HasuXc9bduba/aLdnrL4WHS1On2qt7P/KIva7SiRPZ13Nq1Mi+Z+tWadMm+77Kle1f3zWi2rWTDh603+XJk/baV2e6+GK7v2rXznmbzweX0AcAOHHypDR9ur33V2Ji7svu22cPuDVqFH079u2zF7rs1i37Bqy52b3bXowxNtYGFd+FDufNs4+RI22AmzNH2r7drveDD6Q33rDb8MorNvTddpsNdMZkXwRTsheR/Pe/7W1QPv3UBpijRwMvVjlrlp0+ZYq93cjSpTboGRN4L7ewMNu+zEx7Ic3rrrMh7De/sfN9tzoJpk0be6HItLTA6dHRdlqjRvbvgQPSJZfYgOgLOUWFoAIAgEOHDtmbtsbF2apHu3a2EuSzbp30ySe2GjVmjJ3/0EPnricry96Etn9/W02Kj5feesvOGzzYVpx8VZRXX7WB6Je/tJWQtDR7r7VLL7U3iZ02zYaqdets2Bo71laeeve21aPu3W2FpVo1aedOG8wOHLCf/Z//5C/w5RdBBQCAMiQz0waFjAzbpXbBBfbO9/m9xcWWLdI119guot//Pn/vWbFCuvpqe0uQ3/62aG+nQVABAADnbe/evLvwCqMgx+8iLOQAAICypDhCSkERVAAAQMgiqAAAgJBFUAEAACGLoAIAAEIWQQUAAIQsggoAAAhZBBUAABCyCCoAACBkEVQAAEDIIqgAAICQRVABAAAhi6ACAABCFkEFAACErHDXDTgfxhhJ9nbRAACgdPAdt33H8dyU6qCSlpYmSapXr57jlgAAgIJKS0tTbGxsrst4TH7iTIjKysrSnj17FB0dLY/HUyTrTE1NVb169bRr1y7FxMQUyTpLG/YB+6C8b7/EPijv2y+xD6Ti2wfGGKWlpal27doKC8t9FEqprqiEhYWpbt26xbLumJiYcvvD9GEfsA/K+/ZL7IPyvv0S+0Aqnn2QVyXFh8G0AAAgZBFUAABAyCKonMXr9WrSpEnyer2um+IM+4B9UN63X2IflPftl9gHUmjsg1I9mBYAAJRtVFQAAEDIIqgAAICQRVABAAAhi6ACAABCFkHlLH/729/UsGFDVapUSZ06ddKXX37puknFYvLkyfJ4PAGP5s2b++efOHFCo0ePVvXq1VWlShUNHDhQ+/fvd9ji8/fxxx+rb9++ql27tjwej+bPnx8w3xijiRMnKjExUZGRkerZs6e2bNkSsMyhQ4c0ePBgxcTEKC4uTnfccYfS09NLcCvOT177YOjQoef8Lvr06ROwTGneB1OmTFHHjh0VHR2tmjVrqn///tq8eXPAMvn57e/cuVPXXnutoqKiVLNmTf3+97/X6dOnS3JTCiU/23/llVee8xu48847A5YprdsvSdOmTVObNm38FzDr3LmzFi9e7J9flr9/n7z2Qcj9Bgz8Zs+ebSIiIsw//vEPs3HjRjN8+HATFxdn9u/f77ppRW7SpEmmVatWZu/evf7Hzz//7J9/5513mnr16pn333/frF692lx22WWmS5cuDlt8/hYtWmQefPBBM3fuXCPJzJs3L2D+448/bmJjY838+fPNunXrzC9/+UvTqFEjc/z4cf8yffr0MW3btjWff/65+eSTT0yTJk3MoEGDSnhLCi+vfTBkyBDTp0+fgN/FoUOHApYpzfugd+/eZvr06WbDhg1m7dq15pprrjH169c36enp/mXy+u2fPn3atG7d2vTs2dOsWbPGLFq0yNSoUcOMHz/exSYVSH62v3v37mb48OEBv4GUlBT//NK8/cYYs2DBAvPOO++Y77//3mzevNk88MADpmLFimbDhg3GmLL9/fvktQ9C7TdAUDnDpZdeakaPHu1/nZmZaWrXrm2mTJnisFXFY9KkSaZt27Y5zjty5IipWLGieeutt/zTNm3aZCSZlStXllALi9fZB+msrCyTkJBgnnzySf+0I0eOGK/Xa9544w1jjDHffvutkWRWrVrlX2bx4sXG4/GY3bt3l1jbi0qwoNKvX7+g7ylr+yA5OdlIMh999JExJn+//UWLFpmwsDCzb98+/zLTpk0zMTExJiMjo2Q34Dydvf3G2IPUmDFjgr6nLG2/T9WqVc0rr7xS7r7/M/n2gTGh9xug6+d/Tp48qa+++ko9e/b0TwsLC1PPnj21cuVKhy0rPlu2bFHt2rV1wQUXaPDgwdq5c6ck6auvvtKpU6cC9kXz5s1Vv379Mrsvtm3bpn379gVsc2xsrDp16uTf5pUrVyouLk4dOnTwL9OzZ0+FhYXpiy++KPE2F5fly5erZs2aatasmUaOHKmDBw/655W1fZCSkiJJqlatmqT8/fZXrlypiy66SLVq1fIv07t3b6Wmpmrjxo0l2Przd/b2+8yaNUs1atRQ69atNX78eB07dsw/ryxtf2ZmpmbPnq2jR4+qc+fO5e77l87dBz6h9Bso1TclLEoHDhxQZmZmwI6XpFq1aum7775z1Kri06lTJ82YMUPNmjXT3r179fDDD6tr167asGGD9u3bp4iICMXFxQW8p1atWtq3b5+bBhcz33bl9P375u3bt081a9YMmB8eHq5q1aqVmf3Sp08fXX/99WrUqJF++OEHPfDAA0pKStLKlStVoUKFMrUPsrKyNHbsWF1++eVq3bq1JOXrt79v374cfye+eaVFTtsvSb/61a/UoEED1a5dW998843uu+8+bd68WXPnzpVUNrZ//fr16ty5s06cOKEqVapo3rx5atmypdauXVtuvv9g+0AKvd8AQaWcSkpK8j9v06aNOnXqpAYNGujNN99UZGSkw5bBpZtvvtn//KKLLlKbNm3UuHFjLV++XD169HDYsqI3evRobdiwQZ9++qnrpjgRbPtHjBjhf37RRRcpMTFRPXr00A8//KDGjRuXdDOLRbNmzbR27VqlpKRozpw5GjJkiD766CPXzSpRwfZBy5YtQ+43QNfP/9SoUUMVKlQ4Z3T3/v37lZCQ4KhVJScuLk4XXnihtm7dqoSEBJ08eVJHjhwJWKYs7wvfduX2/SckJCg5OTlg/unTp3Xo0KEyu18uuOAC1ahRQ1u3bpVUdvbBXXfdpYULF+rDDz9U3bp1/dPz89tPSEjI8Xfim1caBNv+nHTq1EmSAn4DpX37IyIi1KRJE7Vv315TpkxR27Zt9de//rXcfP9S8H2QE9e/AYLK/0RERKh9+/Z6//33/dOysrL0/vvvB/TblVXp6en64YcflJiYqPbt26tixYoB+2Lz5s3auXNnmd0XjRo1UkJCQsA2p6am6osvvvBvc+fOnXXkyBF99dVX/mU++OADZWVl+f9DLmt++uknHTx4UImJiZJK/z4wxuiuu+7SvHnz9MEHH6hRo0YB8/Pz2+/cubPWr18fENiWLl2qmJgYf+k8VOW1/TlZu3atJAX8Bkrr9geTlZWljIyMMv/958a3D3Li/DdQ5MNzS7HZs2cbr9drZsyYYb799lszYsQIExcXFzCyuay49957zfLly822bdvMihUrTM+ePU2NGjVMcnKyMcaeole/fn3zwQcfmNWrV5vOnTubzp07O271+UlLSzNr1qwxa9asMZLM008/bdasWWN27NhhjLGnJ8fFxZm3337bfPPNN6Zfv345np7crl0788UXX5hPP/3UNG3atNScmmtM7vsgLS3NjBs3zqxcudJs27bNLFu2zFxyySWmadOm5sSJE/51lOZ9MHLkSBMbG2uWL18ecOrlsWPH/Mvk9dv3nZrZq1cvs3btWvPuu++a+Pj4UnF6al7bv3XrVvPII4+Y1atXm23btpm3337bXHDBBaZbt27+dZTm7TfGmPvvv9989NFHZtu2beabb74x999/v/F4PGbJkiXGmLL9/fvktg9C8TdAUDnLc889Z+rXr28iIiLMpZdeaj7//HPXTSoWN910k0lMTDQRERGmTp065qabbjJbt271zz9+/LgZNWqUqVq1qomKijIDBgwwe/fuddji8/fhhx8aSec8hgwZYoyxpyhPmDDB1KpVy3i9XtOjRw+zefPmgHUcPHjQDBo0yFSpUsXExMSYYcOGmbS0NAdbUzi57YNjx46ZXr16mfj4eFOxYkXToEEDM3z48HOCemneBzltuyQzffp0/zL5+e1v377dJCUlmcjISFOjRg1z7733mlOnTpXw1hRcXtu/c+dO061bN1OtWjXj9XpNkyZNzO9///uAa2gYU3q33xhjbr/9dtOgQQMTERFh4uPjTY8ePfwhxZiy/f375LYPQvE34DHGmKKv0wAAAJw/xqgAAICQRVABAAAhi6ACAABCFkEFAACELIIKAAAIWQQVAAAQsggqAAAgZBFUAJR6Ho9H8+fPd90MAMWAoALgvAwdOlQej+ecR58+fVw3DUAZEO66AQBKvz59+mj69OkB07xer6PWAChLqKgAOG9er1cJCQkBj6pVq0qy3TLTpk1TUlKSIiMjdcEFF2jOnDkB71+/fr2uuuoqRUZGqnr16hoxYoTS09MDlvnHP/6hVq1ayev1KjExUXfddVfA/AMHDmjAgAGKiopS06ZNtWDBAv+8w4cPa/DgwYqPj1dkZKSaNm16TrACEJoIKgCK3YQJEzRw4ECtW7dOgwcP1s0336xNmzZJko4eParevXuratWqWrVqld566y0tW7YsIIhMmzZNo0eP1ogRI7R+/XotWLBATZo0CfiMhx9+WDfeeKO++eYbXXPNNRo8eLAOHTrk//xvv/1Wixcv1qZNmzRt2jTVqFGj5HYAgMIrllsdAig3hgwZYipUqGAqV64c8HjssceMMfaOvXfeeWfAezp16mRGjhxpjDHmpZdeMlWrVjXp6en++e+8844JCwvz37m5du3a5sEHHwzaBknmoYce8r9OT083kszixYuNMcb07dvXDBs2rGg2GECJYowKgPP2i1/8QtOmTQuYVq1aNf/zzp07B8zr3Lmz1q5dK0natGmT2rZtq8qVK/vnX3755crKytLmzZvl8Xi0Z88e9ejRI9c2tGnTxv+8cuXKiomJUXJysiRp5MiRGjhwoL7++mv16tVL/fv3V5cuXQq1rQBKFkEFwHmrXLnyOV0xRSUyMjJfy1WsWDHgtcfjUVZWliQpKSlJO3bs0KJFi7R06VL16NFDo0eP1lNPPVXk7QVQtBijAqDYff755+e8btGihSSpRYsWWrdunY4ePeqfv2LFCoWFhalZs2aKjo5Ww4YN9f77759XG+Lj4zVkyBC9/vrreuaZZ/TSSy+d1/oAlAwqKgDOW0ZGhvbt2xcwLTw83D9g9a233lKHDh10xRVXaNasWfryyy/16quvSpIGDx6sSZMmaciQIZo8ebJ+/vln3X333br11ltVq1YtSdLkyZN15513qmbNmkpKSlJaWppWrFihu+++O1/tmzhxotq3b69WrVopIyNDCxcu9AclAKGNoALgvL377rtKTEwMmNasWTN99913kuwZObNnz9aoUaOUmJioN954Qy1btpQkRUVF6b333tOYMWPUsWNHRUVFaeDAgXr66af96xoyZIhOnDihv/zlLxo3bpxq1KihG264Id/ti4iI0Pjx47V9+3ZFRkaqa9eumj17dhFsOYDi5jHGGNeNAFB2eTwezZs3T/3793fdFAClEGNUAABAyCKoAACAkMUYFQDFit5lAOeDigoAAAhZBBUAABCyCCoAACBkEVQAAEDIIqgAAICQRVABAAAhi6ACAABCFkEFAACELIIKAAAIWf8P2HJWN2E5DGAAAAAASUVORK5CYII=\n" + }, + "metadata": {} + } + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "3SRXs6V6bNEX", + "outputId": "4ed1f452-e232-41a3-caa9-ea01e89369d0" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "313/313 [==============================] - 1s 3ms/step - loss: 1.5913 - accuracy: 0.7413\n" + ] + }, + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[1.5913232564926147, 0.7412999868392944]" + ] + }, + "metadata": {}, + "execution_count": 23 + } + ], + "source": [ + "model.evaluate(x_test, to_categorical(y_test, num_classes))" + ] + }, + { + "cell_type": "markdown", + "source": [ + "---" + ], + "metadata": { + "id": "XyPoVUwrRRj5" + } + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": { + "id": "vXA26MZUbOXO" + }, + "outputs": [], + "source": [ + "def cnn(input_shape, num_classes):\n", + " \"\"\"CNN Model from (McMahan et. al., 2017).\n", + "\n", + " Communication-efficient learning of deep networks from decentralized data\n", + " \"\"\"\n", + " input_shape = tuple(input_shape)\n", + "\n", + " weight_decay = 0.004\n", + " model = keras.Sequential(\n", + " [\n", + " keras.layers.Conv2D(\n", + " 64,\n", + " (5, 5),\n", + " padding=\"same\",\n", + " activation=\"relu\",\n", + " input_shape=input_shape,\n", + " ),\n", + " keras.layers.MaxPooling2D((3, 3), strides=(2, 2)),\n", + " keras.layers.BatchNormalization(),\n", + " keras.layers.Conv2D(\n", + " 64,\n", + " (5, 5),\n", + " padding=\"same\",\n", + " activation=\"relu\",\n", + " ),\n", + " keras.layers.BatchNormalization(),\n", + " keras.layers.MaxPooling2D((3, 3), strides=(2, 2)),\n", + " keras.layers.Flatten(),\n", + " keras.layers.Dense(\n", + " 384, activation=\"relu\", kernel_regularizer=l2(weight_decay)\n", + " ),\n", + " keras.layers.Dense(\n", + " 192, activation=\"relu\", kernel_regularizer=l2(weight_decay)\n", + " ),\n", + " keras.layers.Dense(num_classes, activation=\"softmax\"),\n", + " ]\n", + " )\n", + " optimizer = SGD(learning_rate=0.1)\n", + " model.compile(\n", + " loss=\"categorical_crossentropy\", optimizer=optimizer, metrics=[\"accuracy\"]\n", + " )\n", + "\n", + " return model" + ] + }, + { + "cell_type": "code", + "source": [ + "model_cnn = cnn(input_shape, num_classes)" + ], + "metadata": { + "id": "t098yVNYRxPu" + }, + "execution_count": 25, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "history_cnn = model_cnn.fit(x_train, to_categorical(y_train, num_classes), epochs=350, batch_size=100)" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "JinRA8quR2mr", + "outputId": "edc6a49c-3fa4-498d-fbb4-21cb439c9b38" + }, + "execution_count": 26, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Epoch 1/350\n", + "500/500 [==============================] - 4s 7ms/step - loss: 4.1634 - accuracy: 0.4622\n", + "Epoch 2/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 2.3282 - accuracy: 0.6234\n", + "Epoch 3/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 1.5241 - accuracy: 0.6978\n", + "Epoch 4/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 1.1409 - accuracy: 0.7442\n", + "Epoch 5/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.9513 - accuracy: 0.7783\n", + "Epoch 6/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.8526 - accuracy: 0.8004\n", + "Epoch 7/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.7955 - accuracy: 0.8228\n", + "Epoch 8/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.7653 - accuracy: 0.8402\n", + "Epoch 9/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.7479 - accuracy: 0.8540\n", + "Epoch 10/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.7359 - accuracy: 0.8678\n", + "Epoch 11/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.7267 - accuracy: 0.8774\n", + "Epoch 12/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.7274 - accuracy: 0.8839\n", + "Epoch 13/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.7191 - accuracy: 0.8918\n", + "Epoch 14/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.7182 - accuracy: 0.8971\n", + "Epoch 15/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.7166 - accuracy: 0.9014\n", + "Epoch 16/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.7239 - accuracy: 0.9033\n", + "Epoch 17/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.7214 - accuracy: 0.9069\n", + "Epoch 18/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.7103 - accuracy: 0.9122\n", + "Epoch 19/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.7023 - accuracy: 0.9168\n", + "Epoch 20/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.7128 - accuracy: 0.9147\n", + "Epoch 21/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.7064 - accuracy: 0.9197\n", + "Epoch 22/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.7090 - accuracy: 0.9177\n", + "Epoch 23/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.7103 - accuracy: 0.9190\n", + "Epoch 24/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6981 - accuracy: 0.9232\n", + "Epoch 25/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.7015 - accuracy: 0.9234\n", + "Epoch 26/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.7026 - accuracy: 0.9253\n", + "Epoch 27/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6889 - accuracy: 0.9264\n", + "Epoch 28/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6924 - accuracy: 0.9275\n", + "Epoch 29/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6818 - accuracy: 0.9303\n", + "Epoch 30/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6961 - accuracy: 0.9273\n", + "Epoch 31/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6967 - accuracy: 0.9277\n", + "Epoch 32/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6932 - accuracy: 0.9318\n", + "Epoch 33/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6812 - accuracy: 0.9331\n", + "Epoch 34/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6779 - accuracy: 0.9321\n", + "Epoch 35/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6898 - accuracy: 0.9312\n", + "Epoch 36/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6800 - accuracy: 0.9328\n", + "Epoch 37/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6785 - accuracy: 0.9340\n", + "Epoch 38/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6713 - accuracy: 0.9370\n", + "Epoch 39/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6832 - accuracy: 0.9345\n", + "Epoch 40/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6778 - accuracy: 0.9349\n", + "Epoch 41/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6610 - accuracy: 0.9378\n", + "Epoch 42/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6612 - accuracy: 0.9385\n", + "Epoch 43/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6545 - accuracy: 0.9393\n", + "Epoch 44/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6609 - accuracy: 0.9369\n", + "Epoch 45/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6648 - accuracy: 0.9382\n", + "Epoch 46/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6587 - accuracy: 0.9385\n", + "Epoch 47/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6492 - accuracy: 0.9420\n", + "Epoch 48/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6523 - accuracy: 0.9404\n", + "Epoch 49/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6648 - accuracy: 0.9378\n", + "Epoch 50/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6571 - accuracy: 0.9397\n", + "Epoch 51/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6493 - accuracy: 0.9413\n", + "Epoch 52/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6590 - accuracy: 0.9388\n", + "Epoch 53/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6542 - accuracy: 0.9412\n", + "Epoch 54/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6526 - accuracy: 0.9427\n", + "Epoch 55/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6311 - accuracy: 0.9462\n", + "Epoch 56/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6459 - accuracy: 0.9412\n", + "Epoch 57/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6436 - accuracy: 0.9438\n", + "Epoch 58/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6429 - accuracy: 0.9440\n", + "Epoch 59/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6459 - accuracy: 0.9421\n", + "Epoch 60/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6418 - accuracy: 0.9432\n", + "Epoch 61/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6357 - accuracy: 0.9444\n", + "Epoch 62/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6316 - accuracy: 0.9452\n", + "Epoch 63/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6348 - accuracy: 0.9451\n", + "Epoch 64/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6293 - accuracy: 0.9447\n", + "Epoch 65/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6339 - accuracy: 0.9453\n", + "Epoch 66/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6223 - accuracy: 0.9482\n", + "Epoch 67/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6169 - accuracy: 0.9483\n", + "Epoch 68/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6217 - accuracy: 0.9456\n", + "Epoch 69/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6262 - accuracy: 0.9456\n", + "Epoch 70/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6168 - accuracy: 0.9488\n", + "Epoch 71/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6166 - accuracy: 0.9465\n", + "Epoch 72/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6248 - accuracy: 0.9458\n", + "Epoch 73/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6089 - accuracy: 0.9510\n", + "Epoch 74/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6155 - accuracy: 0.9472\n", + "Epoch 75/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6207 - accuracy: 0.9480\n", + "Epoch 76/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6123 - accuracy: 0.9502\n", + "Epoch 77/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6173 - accuracy: 0.9474\n", + "Epoch 78/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6020 - accuracy: 0.9510\n", + "Epoch 79/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5970 - accuracy: 0.9512\n", + "Epoch 80/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6211 - accuracy: 0.9454\n", + "Epoch 81/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5945 - accuracy: 0.9522\n", + "Epoch 82/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6178 - accuracy: 0.9460\n", + "Epoch 83/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6082 - accuracy: 0.9504\n", + "Epoch 84/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5934 - accuracy: 0.9522\n", + "Epoch 85/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5979 - accuracy: 0.9512\n", + "Epoch 86/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5985 - accuracy: 0.9506\n", + "Epoch 87/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5924 - accuracy: 0.9520\n", + "Epoch 88/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5885 - accuracy: 0.9514\n", + "Epoch 89/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5934 - accuracy: 0.9515\n", + "Epoch 90/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.6033 - accuracy: 0.9507\n", + "Epoch 91/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5958 - accuracy: 0.9523\n", + "Epoch 92/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5970 - accuracy: 0.9505\n", + "Epoch 93/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5900 - accuracy: 0.9536\n", + "Epoch 94/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5916 - accuracy: 0.9512\n", + "Epoch 95/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5955 - accuracy: 0.9519\n", + "Epoch 96/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5943 - accuracy: 0.9520\n", + "Epoch 97/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5852 - accuracy: 0.9523\n", + "Epoch 98/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5784 - accuracy: 0.9533\n", + "Epoch 99/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5800 - accuracy: 0.9535\n", + "Epoch 100/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5691 - accuracy: 0.9552\n", + "Epoch 101/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5720 - accuracy: 0.9531\n", + "Epoch 102/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5766 - accuracy: 0.9541\n", + "Epoch 103/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5696 - accuracy: 0.9543\n", + "Epoch 104/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5753 - accuracy: 0.9538\n", + "Epoch 105/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5765 - accuracy: 0.9540\n", + "Epoch 106/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5590 - accuracy: 0.9576\n", + "Epoch 107/350\n", + "500/500 [==============================] - 4s 7ms/step - loss: 0.5675 - accuracy: 0.9537\n", + "Epoch 108/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5797 - accuracy: 0.9523\n", + "Epoch 109/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5745 - accuracy: 0.9549\n", + "Epoch 110/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5634 - accuracy: 0.9565\n", + "Epoch 111/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5626 - accuracy: 0.9556\n", + "Epoch 112/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5731 - accuracy: 0.9542\n", + "Epoch 113/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5737 - accuracy: 0.9539\n", + "Epoch 114/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5690 - accuracy: 0.9557\n", + "Epoch 115/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5670 - accuracy: 0.9558\n", + "Epoch 116/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5583 - accuracy: 0.9550\n", + "Epoch 117/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5479 - accuracy: 0.9570\n", + "Epoch 118/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5639 - accuracy: 0.9541\n", + "Epoch 119/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5530 - accuracy: 0.9580\n", + "Epoch 120/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5579 - accuracy: 0.9562\n", + "Epoch 121/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5522 - accuracy: 0.9573\n", + "Epoch 122/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5641 - accuracy: 0.9542\n", + "Epoch 123/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5519 - accuracy: 0.9582\n", + "Epoch 124/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5387 - accuracy: 0.9588\n", + "Epoch 125/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5470 - accuracy: 0.9570\n", + "Epoch 126/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5583 - accuracy: 0.9545\n", + "Epoch 127/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5439 - accuracy: 0.9590\n", + "Epoch 128/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5541 - accuracy: 0.9557\n", + "Epoch 129/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5357 - accuracy: 0.9598\n", + "Epoch 130/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5512 - accuracy: 0.9564\n", + "Epoch 131/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5377 - accuracy: 0.9593\n", + "Epoch 132/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5414 - accuracy: 0.9568\n", + "Epoch 133/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5476 - accuracy: 0.9556\n", + "Epoch 134/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5348 - accuracy: 0.9583\n", + "Epoch 135/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5451 - accuracy: 0.9572\n", + "Epoch 136/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5413 - accuracy: 0.9579\n", + "Epoch 137/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5257 - accuracy: 0.9604\n", + "Epoch 138/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5314 - accuracy: 0.9585\n", + "Epoch 139/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5326 - accuracy: 0.9591\n", + "Epoch 140/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5399 - accuracy: 0.9575\n", + "Epoch 141/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5402 - accuracy: 0.9588\n", + "Epoch 142/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5342 - accuracy: 0.9576\n", + "Epoch 143/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5380 - accuracy: 0.9577\n", + "Epoch 144/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5313 - accuracy: 0.9587\n", + "Epoch 145/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5303 - accuracy: 0.9589\n", + "Epoch 146/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5219 - accuracy: 0.9595\n", + "Epoch 147/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5418 - accuracy: 0.9567\n", + "Epoch 148/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5300 - accuracy: 0.9600\n", + "Epoch 149/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5088 - accuracy: 0.9607\n", + "Epoch 150/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5319 - accuracy: 0.9577\n", + "Epoch 151/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5168 - accuracy: 0.9621\n", + "Epoch 152/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5161 - accuracy: 0.9606\n", + "Epoch 153/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5109 - accuracy: 0.9618\n", + "Epoch 154/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5190 - accuracy: 0.9593\n", + "Epoch 155/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5273 - accuracy: 0.9586\n", + "Epoch 156/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5093 - accuracy: 0.9630\n", + "Epoch 157/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5210 - accuracy: 0.9589\n", + "Epoch 158/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5046 - accuracy: 0.9636\n", + "Epoch 159/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5142 - accuracy: 0.9598\n", + "Epoch 160/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5294 - accuracy: 0.9588\n", + "Epoch 161/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5079 - accuracy: 0.9622\n", + "Epoch 162/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4972 - accuracy: 0.9635\n", + "Epoch 163/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5120 - accuracy: 0.9601\n", + "Epoch 164/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5091 - accuracy: 0.9624\n", + "Epoch 165/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5115 - accuracy: 0.9612\n", + "Epoch 166/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5071 - accuracy: 0.9614\n", + "Epoch 167/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5067 - accuracy: 0.9628\n", + "Epoch 168/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5002 - accuracy: 0.9623\n", + "Epoch 169/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5094 - accuracy: 0.9602\n", + "Epoch 170/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5032 - accuracy: 0.9618\n", + "Epoch 171/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5024 - accuracy: 0.9618\n", + "Epoch 172/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4839 - accuracy: 0.9649\n", + "Epoch 173/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4931 - accuracy: 0.9620\n", + "Epoch 174/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5061 - accuracy: 0.9614\n", + "Epoch 175/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5023 - accuracy: 0.9620\n", + "Epoch 176/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5021 - accuracy: 0.9625\n", + "Epoch 177/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4821 - accuracy: 0.9651\n", + "Epoch 178/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4813 - accuracy: 0.9626\n", + "Epoch 179/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4906 - accuracy: 0.9630\n", + "Epoch 180/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4973 - accuracy: 0.9611\n", + "Epoch 181/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4970 - accuracy: 0.9629\n", + "Epoch 182/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4841 - accuracy: 0.9644\n", + "Epoch 183/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4872 - accuracy: 0.9631\n", + "Epoch 184/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4845 - accuracy: 0.9647\n", + "Epoch 185/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4756 - accuracy: 0.9648\n", + "Epoch 186/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4821 - accuracy: 0.9626\n", + "Epoch 187/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4819 - accuracy: 0.9633\n", + "Epoch 188/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.5000 - accuracy: 0.9617\n", + "Epoch 189/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4783 - accuracy: 0.9652\n", + "Epoch 190/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4778 - accuracy: 0.9641\n", + "Epoch 191/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4815 - accuracy: 0.9623\n", + "Epoch 192/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4892 - accuracy: 0.9640\n", + "Epoch 193/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4850 - accuracy: 0.9637\n", + "Epoch 194/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4871 - accuracy: 0.9641\n", + "Epoch 195/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4739 - accuracy: 0.9651\n", + "Epoch 196/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4779 - accuracy: 0.9636\n", + "Epoch 197/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4658 - accuracy: 0.9663\n", + "Epoch 198/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4821 - accuracy: 0.9623\n", + "Epoch 199/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4826 - accuracy: 0.9635\n", + "Epoch 200/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4732 - accuracy: 0.9656\n", + "Epoch 201/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4790 - accuracy: 0.9648\n", + "Epoch 202/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4675 - accuracy: 0.9658\n", + "Epoch 203/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4743 - accuracy: 0.9633\n", + "Epoch 204/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4667 - accuracy: 0.9653\n", + "Epoch 205/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4760 - accuracy: 0.9624\n", + "Epoch 206/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4736 - accuracy: 0.9651\n", + "Epoch 207/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4744 - accuracy: 0.9636\n", + "Epoch 208/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4623 - accuracy: 0.9664\n", + "Epoch 209/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4591 - accuracy: 0.9670\n", + "Epoch 210/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4700 - accuracy: 0.9645\n", + "Epoch 211/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4690 - accuracy: 0.9653\n", + "Epoch 212/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4686 - accuracy: 0.9649\n", + "Epoch 213/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4546 - accuracy: 0.9667\n", + "Epoch 214/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4710 - accuracy: 0.9645\n", + "Epoch 215/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4677 - accuracy: 0.9653\n", + "Epoch 216/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4796 - accuracy: 0.9629\n", + "Epoch 217/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4601 - accuracy: 0.9673\n", + "Epoch 218/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4571 - accuracy: 0.9667\n", + "Epoch 219/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4652 - accuracy: 0.9648\n", + "Epoch 220/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4613 - accuracy: 0.9658\n", + "Epoch 221/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4510 - accuracy: 0.9679\n", + "Epoch 222/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4689 - accuracy: 0.9653\n", + "Epoch 223/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4490 - accuracy: 0.9677\n", + "Epoch 224/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4579 - accuracy: 0.9645\n", + "Epoch 225/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4465 - accuracy: 0.9682\n", + "Epoch 226/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4486 - accuracy: 0.9673\n", + "Epoch 227/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4628 - accuracy: 0.9638\n", + "Epoch 228/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4438 - accuracy: 0.9689\n", + "Epoch 229/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4528 - accuracy: 0.9650\n", + "Epoch 230/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4560 - accuracy: 0.9656\n", + "Epoch 231/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4532 - accuracy: 0.9670\n", + "Epoch 232/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4497 - accuracy: 0.9671\n", + "Epoch 233/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4474 - accuracy: 0.9675\n", + "Epoch 234/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4517 - accuracy: 0.9672\n", + "Epoch 235/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4531 - accuracy: 0.9660\n", + "Epoch 236/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4524 - accuracy: 0.9662\n", + "Epoch 237/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4423 - accuracy: 0.9669\n", + "Epoch 238/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4500 - accuracy: 0.9658\n", + "Epoch 239/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4564 - accuracy: 0.9653\n", + "Epoch 240/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4247 - accuracy: 0.9709\n", + "Epoch 241/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4395 - accuracy: 0.9670\n", + "Epoch 242/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4506 - accuracy: 0.9656\n", + "Epoch 243/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4332 - accuracy: 0.9697\n", + "Epoch 244/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4385 - accuracy: 0.9674\n", + "Epoch 245/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4414 - accuracy: 0.9672\n", + "Epoch 246/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4494 - accuracy: 0.9664\n", + "Epoch 247/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4429 - accuracy: 0.9677\n", + "Epoch 248/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4385 - accuracy: 0.9683\n", + "Epoch 249/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4283 - accuracy: 0.9697\n", + "Epoch 250/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4362 - accuracy: 0.9677\n", + "Epoch 251/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4360 - accuracy: 0.9678\n", + "Epoch 252/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4337 - accuracy: 0.9684\n", + "Epoch 253/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4485 - accuracy: 0.9664\n", + "Epoch 254/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4364 - accuracy: 0.9686\n", + "Epoch 255/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4394 - accuracy: 0.9681\n", + "Epoch 256/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4211 - accuracy: 0.9692\n", + "Epoch 257/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4226 - accuracy: 0.9694\n", + "Epoch 258/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4358 - accuracy: 0.9669\n", + "Epoch 259/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4260 - accuracy: 0.9696\n", + "Epoch 260/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4276 - accuracy: 0.9690\n", + "Epoch 261/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4286 - accuracy: 0.9683\n", + "Epoch 262/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4297 - accuracy: 0.9690\n", + "Epoch 263/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4210 - accuracy: 0.9696\n", + "Epoch 264/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4301 - accuracy: 0.9681\n", + "Epoch 265/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4283 - accuracy: 0.9687\n", + "Epoch 266/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4182 - accuracy: 0.9713\n", + "Epoch 267/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4202 - accuracy: 0.9681\n", + "Epoch 268/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4292 - accuracy: 0.9686\n", + "Epoch 269/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4213 - accuracy: 0.9699\n", + "Epoch 270/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4239 - accuracy: 0.9688\n", + "Epoch 271/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4213 - accuracy: 0.9686\n", + "Epoch 272/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4120 - accuracy: 0.9706\n", + "Epoch 273/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4196 - accuracy: 0.9697\n", + "Epoch 274/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4181 - accuracy: 0.9694\n", + "Epoch 275/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4176 - accuracy: 0.9692\n", + "Epoch 276/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4199 - accuracy: 0.9693\n", + "Epoch 277/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4298 - accuracy: 0.9686\n", + "Epoch 278/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4201 - accuracy: 0.9696\n", + "Epoch 279/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4046 - accuracy: 0.9715\n", + "Epoch 280/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4024 - accuracy: 0.9707\n", + "Epoch 281/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4200 - accuracy: 0.9685\n", + "Epoch 282/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4064 - accuracy: 0.9710\n", + "Epoch 283/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3937 - accuracy: 0.9725\n", + "Epoch 284/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4107 - accuracy: 0.9690\n", + "Epoch 285/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4096 - accuracy: 0.9709\n", + "Epoch 286/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4103 - accuracy: 0.9696\n", + "Epoch 287/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4256 - accuracy: 0.9673\n", + "Epoch 288/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4131 - accuracy: 0.9715\n", + "Epoch 289/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3974 - accuracy: 0.9726\n", + "Epoch 290/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4112 - accuracy: 0.9703\n", + "Epoch 291/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3966 - accuracy: 0.9720\n", + "Epoch 292/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4136 - accuracy: 0.9687\n", + "Epoch 293/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4141 - accuracy: 0.9709\n", + "Epoch 294/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4057 - accuracy: 0.9714\n", + "Epoch 295/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3946 - accuracy: 0.9728\n", + "Epoch 296/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4176 - accuracy: 0.9682\n", + "Epoch 297/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4128 - accuracy: 0.9701\n", + "Epoch 298/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4058 - accuracy: 0.9712\n", + "Epoch 299/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3820 - accuracy: 0.9740\n", + "Epoch 300/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4019 - accuracy: 0.9694\n", + "Epoch 301/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4020 - accuracy: 0.9713\n", + "Epoch 302/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4151 - accuracy: 0.9681\n", + "Epoch 303/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3961 - accuracy: 0.9724\n", + "Epoch 304/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3941 - accuracy: 0.9709\n", + "Epoch 305/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3992 - accuracy: 0.9710\n", + "Epoch 306/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3982 - accuracy: 0.9722\n", + "Epoch 307/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3842 - accuracy: 0.9727\n", + "Epoch 308/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4010 - accuracy: 0.9696\n", + "Epoch 309/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4085 - accuracy: 0.9690\n", + "Epoch 310/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3894 - accuracy: 0.9731\n", + "Epoch 311/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4014 - accuracy: 0.9696\n", + "Epoch 312/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3918 - accuracy: 0.9729\n", + "Epoch 313/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3906 - accuracy: 0.9708\n", + "Epoch 314/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3815 - accuracy: 0.9746\n", + "Epoch 315/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3944 - accuracy: 0.9701\n", + "Epoch 316/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.4006 - accuracy: 0.9704\n", + "Epoch 317/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3836 - accuracy: 0.9748\n", + "Epoch 318/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3836 - accuracy: 0.9722\n", + "Epoch 319/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3873 - accuracy: 0.9715\n", + "Epoch 320/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3858 - accuracy: 0.9728\n", + "Epoch 321/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3900 - accuracy: 0.9710\n", + "Epoch 322/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3927 - accuracy: 0.9719\n", + "Epoch 323/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3863 - accuracy: 0.9711\n", + "Epoch 324/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3857 - accuracy: 0.9726\n", + "Epoch 325/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3778 - accuracy: 0.9728\n", + "Epoch 326/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3951 - accuracy: 0.9698\n", + "Epoch 327/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3871 - accuracy: 0.9726\n", + "Epoch 328/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3910 - accuracy: 0.9707\n", + "Epoch 329/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3787 - accuracy: 0.9735\n", + "Epoch 330/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3874 - accuracy: 0.9707\n", + "Epoch 331/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3901 - accuracy: 0.9715\n", + "Epoch 332/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3710 - accuracy: 0.9741\n", + "Epoch 333/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3874 - accuracy: 0.9715\n", + "Epoch 334/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3874 - accuracy: 0.9722\n", + "Epoch 335/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3768 - accuracy: 0.9730\n", + "Epoch 336/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3739 - accuracy: 0.9738\n", + "Epoch 337/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3883 - accuracy: 0.9711\n", + "Epoch 338/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3715 - accuracy: 0.9732\n", + "Epoch 339/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3740 - accuracy: 0.9730\n", + "Epoch 340/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3902 - accuracy: 0.9715\n", + "Epoch 341/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3779 - accuracy: 0.9727\n", + "Epoch 342/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3883 - accuracy: 0.9708\n", + "Epoch 343/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3739 - accuracy: 0.9741\n", + "Epoch 344/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3823 - accuracy: 0.9714\n", + "Epoch 345/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3729 - accuracy: 0.9736\n", + "Epoch 346/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3730 - accuracy: 0.9731\n", + "Epoch 347/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3716 - accuracy: 0.9722\n", + "Epoch 348/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3841 - accuracy: 0.9722\n", + "Epoch 349/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3659 - accuracy: 0.9750\n", + "Epoch 350/350\n", + "500/500 [==============================] - 3s 7ms/step - loss: 0.3740 - accuracy: 0.9721\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "model_cnn.evaluate(x_test, to_categorical(y_test, num_classes))" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "eIzsv0QLR_tt", + "outputId": "0a7eb8e7-4d7f-40ef-e1b0-b3979cc65854" + }, + "execution_count": 27, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "313/313 [==============================] - 1s 2ms/step - loss: 1.4919 - accuracy: 0.7581\n" + ] + }, + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[1.491928219795227, 0.7580999732017517]" + ] + }, + "metadata": {}, + "execution_count": 27 + } + ] + }, + { + "cell_type": "code", + "source": [ + "loss = history_cnn.history['loss']\n", + "epochs = range(1, len(loss) + 1)\n", + "\n", + "plt.plot(epochs, loss, 'b', label='Training Loss')\n", + "plt.title('Training Loss')\n", + "plt.xlabel('Epochs')\n", + "plt.ylabel('Loss')\n", + "plt.legend()\n", + "plt.show()\n" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 472 + }, + "id": "rOXE49XhSGBy", + "outputId": "7bf4879e-632d-4762-977f-522402eee644" + }, + "execution_count": 28, + "outputs": [ + { + "output_type": "display_data", + "data": { + "text/plain": [ + "
" + ], + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjcAAAHHCAYAAABDUnkqAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABOn0lEQVR4nO3dd3hUZeL28XsSkiGBJISSAoQisPQmNaCAgkJgWUB2VcQlYFsQXLDsKlZA3eBiF0VZlawFWeEFVASRIiC9CApIEUWCkgQRkhBKCMl5/3h+GRhSCTM5Kd/Pdc2VzKnPORmdm6cdh2VZlgAAAMoJH7sLAAAA4EmEGwAAUK4QbgAAQLlCuAEAAOUK4QYAAJQrhBsAAFCuEG4AAEC5QrgBAADlCuEGAACUK4QbAF43cuRINWjQoFj7Tpo0SQ6Hw7MFAlCuEW6ACszhcBTptWrVKruLaouRI0eqatWqdhcDwGVy8GwpoOL64IMP3N6/9957WrZsmd5//3235TfccIPCw8OLfZ7MzExlZ2fL6XRe9r7nz5/X+fPnVbly5WKfv7hGjhypefPmKT09vcTPDaD4KtldAAD2uf32293eb9y4UcuWLcu1/FKnT59WYGBgkc/j5+dXrPJJUqVKlVSpEv+rAlB0NEsBKFCvXr3UqlUrbdu2TT169FBgYKAeffRRSdInn3yiAQMGqHbt2nI6nWrUqJGefvppZWVluR3j0j43P//8sxwOh55//nnNnDlTjRo1ktPpVKdOnbRlyxa3ffPqc+NwODRu3DgtXLhQrVq1ktPpVMuWLfXFF1/kKv+qVavUsWNHVa5cWY0aNdJbb73l8X48c+fOVYcOHRQQEKCaNWvq9ttv16+//uq2TVJSkkaNGqW6devK6XQqMjJSgwYN0s8//+zaZuvWrerbt69q1qypgIAANWzYUHfccYfHyglUFPxzCEChfv/9d8XExOjWW2/V7bff7mqiio+PV9WqVfXAAw+oatWqWrlypZ588kmlpaVp2rRphR539uzZOnnypP72t7/J4XDo3//+t2666Sb99NNPhdb2rF27VvPnz9e9996roKAgvfrqqxo6dKgSEhJUo0YNSdL27dvVr18/RUZGavLkycrKytKUKVNUq1atK78p/yc+Pl6jRo1Sp06dFBcXp+TkZL3yyitat26dtm/frmrVqkmShg4dqt27d+u+++5TgwYNdPToUS1btkwJCQmu9zfeeKNq1aqlRx55RNWqVdPPP/+s+fPne6ysQIVhAcD/GTt2rHXp/xZ69uxpSbLefPPNXNufPn0617K//e1vVmBgoHX27FnXstjYWKt+/fqu9wcPHrQkWTVq1LCOHz/uWv7JJ59YkqzPPvvMteypp57KVSZJlr+/v3XgwAHXsm+//daSZL322muuZQMHDrQCAwOtX3/91bXshx9+sCpVqpTrmHmJjY21qlSpku/6c+fOWWFhYVarVq2sM2fOuJYvWrTIkmQ9+eSTlmVZ1okTJyxJ1rRp0/I91oIFCyxJ1pYtWwotF4CC0SwFoFBOp1OjRo3KtTwgIMD1+8mTJ3Xs2DFde+21On36tPbu3VvocW+55RaFhoa63l977bWSpJ9++qnQffv06aNGjRq53rdp00bBwcGufbOysrR8+XINHjxYtWvXdm3XuHFjxcTEFHr8oti6dauOHj2qe++9163D84ABA9SsWTN9/vnnksx98vf316pVq3TixIk8j5VTw7No0SJlZmZ6pHxARUW4AVCoOnXqyN/fP9fy3bt3a8iQIQoJCVFwcLBq1arl6oycmppa6HHr1avn9j4n6OQXAAraN2f/nH2PHj2qM2fOqHHjxrm2y2tZcRw6dEiS1LRp01zrmjVr5lrvdDr13HPPacmSJQoPD1ePHj3073//W0lJSa7te/bsqaFDh2ry5MmqWbOmBg0apFmzZikjI8MjZQUqEsINgEJdXEOTIyUlRT179tS3336rKVOm6LPPPtOyZcv03HPPSZKys7MLPa6vr2+ey60izFBxJfvaYcKECdq/f7/i4uJUuXJlPfHEE2revLm2b98uyXSSnjdvnjZs2KBx48bp119/1R133KEOHTowFB24TIQbAMWyatUq/f7774qPj9f48eP1xz/+UX369HFrZrJTWFiYKleurAMHDuRal9ey4qhfv74kad++fbnW7du3z7U+R6NGjfTggw/qyy+/1K5du3Tu3Dm98MILbtt07dpVzz77rLZu3aoPP/xQu3fv1pw5czxSXqCiINwAKJacmpOLa0rOnTunN954w64iufH19VWfPn20cOFCHTlyxLX8wIEDWrJkiUfO0bFjR4WFhenNN990az5asmSJ9uzZowEDBkgy8wKdPXvWbd9GjRopKCjItd+JEydy1Tq1a9dOkmiaAi4TQ8EBFEu3bt0UGhqq2NhY/f3vf5fD4dD7779fqpqFJk2apC+//FLdu3fXmDFjlJWVpenTp6tVq1basWNHkY6RmZmpZ555Jtfy6tWr695779Vzzz2nUaNGqWfPnho2bJhrKHiDBg10//33S5L279+v3r176+abb1aLFi1UqVIlLViwQMnJybr11lslSf/973/1xhtvaMiQIWrUqJFOnjyp//znPwoODlb//v09dk+AioBwA6BYatSooUWLFunBBx/U448/rtDQUN1+++3q3bu3+vbta3fxJEkdOnTQkiVL9NBDD+mJJ55QVFSUpkyZoj179hRpNJdkaqOeeOKJXMsbNWqke++9VyNHjlRgYKCmTp2qhx9+WFWqVNGQIUP03HPPuUZARUVFadiwYVqxYoXef/99VapUSc2aNdPHH3+soUOHSjIdijdv3qw5c+YoOTlZISEh6ty5sz788EM1bNjQY/cEqAh4thSACmfw4MHavXu3fvjhB7uLAsAL6HMDoFw7c+aM2/sffvhBixcvVq9evewpEACvo+YGQLkWGRmpkSNH6qqrrtKhQ4c0Y8YMZWRkaPv27WrSpIndxQPgBfS5AVCu9evXTx999JGSkpLkdDoVHR2tf/3rXwQboByj5gYAAJQr9LkBAADlCuEGAACUKxWuz012draOHDmioKAgORwOu4sDAACKwLIsnTx5UrVr15aPT8F1MxUu3Bw5ckRRUVF2FwMAABTD4cOHVbdu3QK3qXDhJigoSJK5OcHBwTaXBgAAFEVaWpqioqJc3+MFqXDhJqcpKjg4mHADAEAZU5QuJXQoBgAA5QrhBgAAlCuEGwAAUK5UuD43AAB7ZGdn69y5c3YXA6WYv79/ocO8i4JwAwDwunPnzungwYPKzs62uygoxXx8fNSwYUP5+/tf0XEINwAAr7IsS4mJifL19VVUVJRH/mWO8idnkt3ExETVq1fviibaJdwAALzq/PnzOn36tGrXrq3AwEC7i4NSrFatWjpy5IjOnz8vPz+/Yh+H+AwA8KqsrCxJuuKmBpR/OZ+RnM9McRFuAAAlguf5oTCe+owQbgAAQLlCuAEAoIQ0aNBAL7/8cpG3X7VqlRwOh1JSUrxWpvKIcAMAwCUcDkeBr0mTJhXruFu2bNE999xT5O27deumxMREhYSEFOt8RVXeQhSjpTwkI0NKTpZ8fKRCnsQOACjlEhMTXb//73//05NPPql9+/a5llWtWtX1u2VZysrKUqVKhX+l1qpV67LK4e/vr4iIiMvaB9TceMw330j160u9etldEgDAlYqIiHC9QkJC5HA4XO/37t2roKAgLVmyRB06dJDT6dTatWv1448/atCgQQoPD1fVqlXVqVMnLV++3O24lzZLORwOvf322xoyZIgCAwPVpEkTffrpp671l9aoxMfHq1q1alq6dKmaN2+uqlWrql+/fm5h7Pz58/r73/+uatWqqUaNGnr44YcVGxurwYMHF/t+nDhxQiNGjFBoaKgCAwMVExOjH374wbX+0KFDGjhwoEJDQ1WlShW1bNlSixcvdu07fPhw1apVSwEBAWrSpIlmzZpV7LIUBeHGQ3LmpGLyTQAomGVJp07Z87Isz13HI488oqlTp2rPnj1q06aN0tPT1b9/f61YsULbt29Xv379NHDgQCUkJBR4nMmTJ+vmm2/Wd999p/79+2v48OE6fvx4vtufPn1azz//vN5//32tWbNGCQkJeuihh1zrn3vuOX344YeaNWuW1q1bp7S0NC1cuPCKrnXkyJHaunWrPv30U23YsEGWZal///7KzMyUJI0dO1YZGRlas2aNdu7cqeeee85Vu/XEE0/o+++/15IlS7Rnzx7NmDFDNWvWvKLyFMqqYFJTUy1JVmpqqkePu3mzZUmWVb++Rw8LAGXemTNnrO+//946c+aMZVmWlZ5u/n9pxys9/fLLP2vWLCskJMT1/quvvrIkWQsXLix035YtW1qvvfaa6339+vWtl156yfVekvX444+73qenp1uSrCVLlrid68SJE66ySLIOHDjg2uf111+3wsPDXe/Dw8OtadOmud6fP3/eqlevnjVo0KB8y3npeS62f/9+S5K1bt0617Jjx45ZAQEB1scff2xZlmW1bt3amjRpUp7HHjhwoDVq1Kh8z32xSz8rF7uc729qbjyEmhsAqFg6duzo9j49PV0PPfSQmjdvrmrVqqlq1aras2dPoTU3bdq0cf1epUoVBQcH6+jRo/luHxgYqEaNGrneR0ZGurZPTU1VcnKyOnfu7Frv6+urDh06XNa1XWzPnj2qVKmSunTp4lpWo0YNNW3aVHv27JEk/f3vf9czzzyj7t2766mnntJ3333n2nbMmDGaM2eO2rVrp3/+859av359sctSVIQbDyHcAEDRBAZK6en2vDz59IcqVaq4vX/ooYe0YMEC/etf/9LXX3+tHTt2qHXr1oU+Cf3Sxww4HI4CHzCa1/aWJ9vbiuGuu+7STz/9pL/+9a/auXOnOnbsqNdee02SFBMTo0OHDun+++/XkSNH1Lt3b7dmNG8g3HgI4QYAisbhkKpUseflzUmS161bp5EjR2rIkCFq3bq1IiIi9PPPP3vvhHkICQlReHi4tmzZ4lqWlZWlb775ptjHbN68uc6fP69Nmza5lv3+++/at2+fWrRo4VoWFRWl0aNHa/78+XrwwQf1n//8x7WuVq1aio2N1QcffKCXX35ZM2fOLHZ5ioKh4B5CuAGAiq1JkyaaP3++Bg4cKIfDoSeeeKLAGhhvue+++xQXF6fGjRurWbNmeu2113TixIkiPdpg586dCgoKcr13OBxq27atBg0apLvvvltvvfWWgoKC9Mgjj6hOnToaNGiQJGnChAmKiYnRH/7wB504cUJfffWVmjdvLkl68skn1aFDB7Vs2VIZGRlatGiRa523EG48hHADABXbiy++qDvuuEPdunVTzZo19fDDDystLa3Ey/Hwww8rKSlJI0aMkK+vr+655x717dtXvr6+he7bo0cPt/e+vr46f/68Zs2apfHjx+uPf/yjzp07px49emjx4sWuJrKsrCyNHTtWv/zyi4KDg9WvXz+99NJLksxcPRMnTtTPP/+sgIAAXXvttZozZ47nL/wiDsvuhroSlpaWppCQEKWmpio4ONhjx92zR2rRQqpRQzp2zGOHBYAy7+zZszp48KAaNmyoypUr212cCic7O1vNmzfXzTffrKefftru4hSooM/K5Xx/U3PjIdTcAABKg0OHDunLL79Uz549lZGRoenTp+vgwYO67bbb7C5aiaFDsYcQbgAApYGPj4/i4+PVqVMnde/eXTt37tTy5cu93s+lNCk14Wbq1KlyOByaMGFCgdvNnTtXzZo1U+XKldW6dWvX9M52I9wAAEqDqKgorVu3TqmpqUpLS9P69etz9aUp70pFuNmyZYveeustt4mM8rJ+/XoNGzZMd955p7Zv367Bgwdr8ODB2rVrVwmVNH+EGwAASgfbw016erqGDx+u//znPwoNDS1w21deeUX9+vXTP/7xDzVv3lxPP/20rr76ak2fPr2ESps/wg0AFKyCjV9BMXjqM2J7uBk7dqwGDBigPn36FLrthg0bcm3Xt29fbdiwId99MjIylJaW5vbyBsINAOQtZwhyYTP1AjmfkaIMWy+IraOl5syZo2+++cZtJsWCJCUlKTw83G1ZeHi4kpKS8t0nLi5OkydPvqJyFgXhBgDyVqlSJQUGBuq3336Tn5+ffHxs/3c1SqHs7Gz99ttvCgwMVKVKVxZPbAs3hw8f1vjx47Vs2TKvznswceJEPfDAA673aWlpioqK8vh5CDcAkDeHw6HIyEgdPHhQhw4dsrs4KMV8fHxUr169Is2mXBDbws22bdt09OhRXX311a5lWVlZWrNmjaZPn66MjIxc1VIRERFKTk52W5acnKyIiIh8z+N0OuV0Oj1b+DwQbgAgf/7+/mrSpAlNUyiQv7+/R2r2bAs3vXv31s6dO92WjRo1Ss2aNdPDDz+cZ3tbdHS0VqxY4TZcfNmyZYqOjvZ2cQuV87ewLPPy5sPZAKAs8vHxYYZilAjbwk1QUJBatWrltqxKlSqqUaOGa/mIESNUp04dxcXFSZLGjx+vnj176oUXXtCAAQM0Z84cbd261etPFy2Ki4NmdrZ0hX2hAABAMZXqXl0JCQlKTEx0ve/WrZtmz56tmTNnqm3btpo3b54WLlyYKyTZ4dJwAwAA7MGDMz12XCkkxPx+9qxUAt18AACoMC7n+7tU19yUJdTcAABQOhBuPIRwAwBA6UC48RDCDQAApQPhxkMINwAAlA6EGw8h3AAAUDoQbjyEcAMAQOlAuPGQi2ckJtwAAGAfwo2HOBwXAg7hBgAA+xBuPIiHZwIAYD/CjQcRbgAAsB/hxoMINwAA2I9w40GEGwAA7Ee48SDCDQAA9iPceBDhBgAA+xFuPIhwAwCA/Qg3HkS4AQDAfoQbDyLcAABgP8KNBxFuAACwH+HGgwg3AADYj3DjQYQbAADsR7jxIMINAAD2I9x4EOEGAAD7EW48iHADAID9CDcelBNusrLsLQcAABUZ4caDqLkBAMB+hBsPItwAAGA/wo0H+fqan4QbAADsQ7jxIGpuAACwH+HGgwg3AADYj3DjQYQbAADsR7jxIMINAAD2I9x4EOEGAAD7EW48iHADAID9bA03M2bMUJs2bRQcHKzg4GBFR0dryZIl+W4fHx8vh8Ph9qpcuXIJlrhghBsAAOxXyc6T161bV1OnTlWTJk1kWZb++9//atCgQdq+fbtatmyZ5z7BwcHat2+f673D4Sip4haKcAMAgP1sDTcDBw50e//ss89qxowZ2rhxY77hxuFwKCIioiSKd9kINwAA2K/U9LnJysrSnDlzdOrUKUVHR+e7XXp6uurXr6+oqCgNGjRIu3fvLsFSFoxwAwCA/WytuZGknTt3Kjo6WmfPnlXVqlW1YMECtWjRIs9tmzZtqnfffVdt2rRRamqqnn/+eXXr1k27d+9W3bp189wnIyNDGRkZrvdpaWleuQ6JcAMAQGlge81N06ZNtWPHDm3atEljxoxRbGysvv/++zy3jY6O1ogRI9SuXTv17NlT8+fPV61atfTWW2/le/y4uDiFhIS4XlFRUd66FMINAAClgO3hxt/fX40bN1aHDh0UFxentm3b6pVXXinSvn5+fmrfvr0OHDiQ7zYTJ05Uamqq63X48GFPFT0Xwg0AAPazPdxcKjs7260ZqSBZWVnauXOnIiMj893G6XS6hprnvLyFcAMAgP1s7XMzceJExcTEqF69ejp58qRmz56tVatWaenSpZKkESNGqE6dOoqLi5MkTZkyRV27dlXjxo2VkpKiadOm6dChQ7rrrrvsvAwXwg0AAPazNdwcPXpUI0aMUGJiokJCQtSmTRstXbpUN9xwgyQpISFBPj4XKpdOnDihu+++W0lJSQoNDVWHDh20fv36fDsglzTCDQAA9nNYlmXZXYiSlJaWppCQEKWmpnq8iWrAAGnxYmnWLGnkSI8eGgCACu1yvr9LXZ+bsoyaGwAA7Ee48SDCDQAA9iPceBDhBgAA+xFuPIhwAwCA/Qg3HkS4AQDAfoQbDyLcAABgP8KNB+WEm6wse8sBAEBFRrjxIGpuAACwH+HGg3x9zU/CDQAA9iHceBA1NwAA2I9w40GEGwAA7Ee48SDCDQAA9iPceBDhBgAA+xFuPIhwAwCA/Qg3HkS4AQDAfoQbDyLcAABgP8KNBxFuAACwH+HGgwg3AADYj3DjQYQbAADsR7jxIMINAAD2I9x4EOEGAAD7EW48iHADAID9CDceRLgBAMB+hBsPItwAAGA/wo0HEW4AALAf4caDCDcAANiPcONBhBsAAOxHuPEgwg0AAPYj3HgQ4QYAAPsRbjyIcAMAgP0INx5EuAEAwH6EGw8i3AAAYD/CjQflhJusLHvLAQBARUa48SBqbgAAsJ+t4WbGjBlq06aNgoODFRwcrOjoaC1ZsqTAfebOnatmzZqpcuXKat26tRYvXlxCpS0c4QYAAPvZGm7q1q2rqVOnatu2bdq6dauuv/56DRo0SLt3785z+/Xr12vYsGG68847tX37dg0ePFiDBw/Wrl27SrjkefP1NT8JNwAA2MdhWZZldyEuVr16dU2bNk133nlnrnW33HKLTp06pUWLFrmWde3aVe3atdObb75ZpOOnpaUpJCREqampCg4O9li5JWnGDOnee6WhQ6V58zx6aAAAKrTL+f4uNX1usrKyNGfOHJ06dUrR0dF5brNhwwb16dPHbVnfvn21YcOGfI+bkZGhtLQ0t5e30CwFAID9bA83O3fuVNWqVeV0OjV69GgtWLBALVq0yHPbpKQkhYeHuy0LDw9XUlJSvsePi4tTSEiI6xUVFeXR8l+McAMAgP1sDzdNmzbVjh07tGnTJo0ZM0axsbH6/vvvPXb8iRMnKjU11fU6fPiwx459KcINAAD2q2R3Afz9/dW4cWNJUocOHbRlyxa98soreuutt3JtGxERoeTkZLdlycnJioiIyPf4TqdTTqfTs4XOB+EGAAD72V5zc6ns7GxlZGTkuS46OlorVqxwW7Zs2bJ8++iUNMINAAD2s7XmZuLEiYqJiVG9evV08uRJzZ49W6tWrdLSpUslSSNGjFCdOnUUFxcnSRo/frx69uypF154QQMGDNCcOXO0detWzZw5087LcCHcAABgP1vDzdGjRzVixAglJiYqJCREbdq00dKlS3XDDTdIkhISEuTjc6FyqVu3bpo9e7Yef/xxPfroo2rSpIkWLlyoVq1a2XUJbgg3AADYr9TNc+Nt3pzn5qOPpNtuk3r3lpYv9+ihAQCo0MrkPDflATU3AADYj3DjQYQbAADsR7jxIMINAAD2I9x4EOEGAAD7EW48iHADAID9CDceRLgBAMB+hBsPItwAAGA/wo0HEW4AALAf4caDCDcAANiPcONBhBsAAOxHuPEgwg0AAPYj3HgQ4QYAAPsRbjyIcAMAgP0INx5EuAEAwH6EGw/KCTdZWfaWAwCAioxw40HU3AAAYD/CjQf5+pqfhBsAAOxDuPEgam4AALAf4caDCDcAANiPcONBhBsAAOxHuPEgwg0AAPYj3HgQ4QYAAPsRbjyIcAMAgP0INx5EuAEAwH6EGw8i3AAAYD/CjQcRbgAAsB/hxoMINwAA2I9w40GEGwAA7Ee48SDCDQAA9iPceBDhBgAA+xFuPIhwAwCA/Qg3HkS4AQDAfoQbDyLcAABgP1vDTVxcnDp16qSgoCCFhYVp8ODB2rdvX4H7xMfHy+FwuL0qV65cQiUumM9Fd9Oy7CsHAAAVma3hZvXq1Ro7dqw2btyoZcuWKTMzUzfeeKNOnTpV4H7BwcFKTEx0vQ4dOlRCJS7YxeGG2hsAAOxRyc6Tf/HFF27v4+PjFRYWpm3btqlHjx757udwOBQREeHt4l22S8ONr699ZQEAoKIqVX1uUlNTJUnVq1cvcLv09HTVr19fUVFRGjRokHbv3p3vthkZGUpLS3N7eQs1NwAA2K/UhJvs7GxNmDBB3bt3V6tWrfLdrmnTpnr33Xf1ySef6IMPPlB2dra6deumX375Jc/t4+LiFBIS4npFRUV56xIINwAAlAIOyyodXV/HjBmjJUuWaO3atapbt26R98vMzFTz5s01bNgwPf3007nWZ2RkKCMjw/U+LS1NUVFRSk1NVXBwsEfKnuP0aalKFfN7evqF3wEAwJVJS0tTSEhIkb6/be1zk2PcuHFatGiR1qxZc1nBRpL8/PzUvn17HThwIM/1TqdTTqfTE8UsFDU3AADYz9ZmKcuyNG7cOC1YsEArV65Uw4YNL/sYWVlZ2rlzpyIjI71QwstzcbjJyrKvHAAAVGS21tyMHTtWs2fP1ieffKKgoCAlJSVJkkJCQhQQECBJGjFihOrUqaO4uDhJ0pQpU9S1a1c1btxYKSkpmjZtmg4dOqS77rrLtuvIQc0NAAD2szXczJgxQ5LUq1cvt+WzZs3SyJEjJUkJCQnyuSg1nDhxQnfffbeSkpIUGhqqDh06aP369WrRokVJFTtfhBsAAOxXajoUl5TL6ZBUHA6H+ZmcLIWFefzwAABUSJfz/V1qhoKXFzxfCgAAexFuPIxwAwCAvYoVbg4fPuw2ad7mzZs1YcIEzZw502MFK6sINwAA2KtY4ea2227TV199JUlKSkrSDTfcoM2bN+uxxx7TlClTPFrAsoZwAwCAvYoVbnbt2qXOnTtLkj7++GO1atVK69ev14cffqj4+HhPlq/MIdwAAGCvYoWbzMxM16y/y5cv15/+9CdJUrNmzZSYmOi50pVBhBsAAOxVrHDTsmVLvfnmm/r666+1bNky9evXT5J05MgR1ahRw6MFLGsINwAA2KtY4ea5557TW2+9pV69emnYsGFq27atJOnTTz91NVdVVIQbAADsVawZinv16qVjx44pLS1NoaGhruX33HOPAgMDPVa4sign3PBsKQAA7FGsmpszZ84oIyPDFWwOHTqkl19+Wfv27VNYBZ+W18/P/Dx/3t5yAABQURUr3AwaNEjvvfeeJCklJUVdunTRCy+8oMGDB7ueF1VRVfq/urDMTHvLAQBARVWscPPNN9/o2muvlSTNmzdP4eHhOnTokN577z29+uqrHi1gWZNTc0O4AQDAHsUKN6dPn1ZQUJAk6csvv9RNN90kHx8fde3aVYcOHfJoAcsamqUAALBXscJN48aNtXDhQh0+fFhLly7VjTfeKEk6evSoV560XZbQLAUAgL2KFW6efPJJPfTQQ2rQoIE6d+6s6OhoSaYWp3379h4tYFlDsxQAAPYq1lDwP//5z7rmmmuUmJjomuNGknr37q0hQ4Z4rHBlEc1SAADYq1jhRpIiIiIUERHhejp43bp1K/wEfhLNUgAA2K1YzVLZ2dmaMmWKQkJCVL9+fdWvX1/VqlXT008/rewKPjUvzVIAANirWDU3jz32mN555x1NnTpV3bt3lyStXbtWkyZN0tmzZ/Xss896tJBlCc1SAADYq1jh5r///a/efvtt19PAJalNmzaqU6eO7r333godbmiWAgDAXsVqljp+/LiaNWuWa3mzZs10/PjxKy5UWUazFAAA9ipWuGnbtq2mT5+ea/n06dPVpk2bKy5UWUazFAAA9ipWs9S///1vDRgwQMuXL3fNcbNhwwYdPnxYixcv9mgByxqapQAAsFexam569uyp/fv3a8iQIUpJSVFKSopuuukm7d69W++//76ny1im0CwFAIC9ij3PTe3atXN1HP7222/1zjvvaObMmVdcsLIqp+aGZikAAOxRrJob5I+aGwAA7EW48TDCDQAA9iLceBjNUgAA2Ouy+tzcdNNNBa5PSUm5krKUC9TcAABgr8sKNyEhIYWuHzFixBUVqKwj3AAAYK/LCjezZs3yVjnKDZqlAACwF31uPIyaGwAA7EW48TDCDQAA9rI13MTFxalTp04KCgpSWFiYBg8erH379hW639y5c9WsWTNVrlxZrVu3LlWPfKBZCgAAe9kablavXq2xY8dq48aNWrZsmTIzM3XjjTfq1KlT+e6zfv16DRs2THfeeae2b9+uwYMHa/Dgwdq1a1cJljx/1NwAAGAvh2VZlt2FyPHbb78pLCxMq1evVo8ePfLc5pZbbtGpU6e0aNEi17KuXbuqXbt2evPNNws9R1pamkJCQpSamqrg4GCPlT3HK69IEyZIw4ZJs2d7/PAAAFRIl/P9Xar63KSmpkqSqlevnu82GzZsUJ8+fdyW9e3bVxs2bMhz+4yMDKWlpbm9vImnggMAYK9SE26ys7M1YcIEde/eXa1atcp3u6SkJIWHh7stCw8PV1JSUp7bx8XFKSQkxPWKioryaLkvRbMUAAD2KjXhZuzYsdq1a5fmzJnj0eNOnDhRqamprtfhw4c9evxL5YQbOhQDAGCPy5rEz1vGjRunRYsWac2aNapbt26B20ZERCg5OdltWXJysiIiIvLc3ul0yul0eqyshaFZCgAAe9lac2NZlsaNG6cFCxZo5cqVatiwYaH7REdHa8WKFW7Lli1bpujoaG8V87LQLAUAgL1srbkZO3asZs+erU8++URBQUGufjMhISEKCAiQJI0YMUJ16tRRXFycJGn8+PHq2bOnXnjhBQ0YMEBz5szR1q1bNXPmTNuu42I0SwEAYC9ba25mzJih1NRU9erVS5GRka7X//73P9c2CQkJSkxMdL3v1q2bZs+erZkzZ6pt27aaN2+eFi5cWGAn5JJEsxQAAPayteamKFPsrFq1Kteyv/zlL/rLX/7ihRJdOZqlAACwV6kZLVVe8PgFAADsRbjxMGpuAACwF+HGwwg3AADYi3DjYTRLAQBgL8KNh1FzAwCAvQg3Hka4AQDAXoQbD6NZCgAAexFuPIyaGwAA7EW48TDCDQAA9iLceBjNUgAA2Itw42EX19wU4ekSAADAwwg3HpYTbiQpK8u+cgAAUFERbjys0kWPIqVpCgCAkke48bCLa27oVAwAQMkj3HjYxeGGmhsAAEoe4cbDfH0v/E7NDQAAJY9w42EOx4V+N4QbAABKHuHGC3KapmiWAgCg5BFuvICaGwAA7EO48QIewQAAgH0IN15AsxQAAPYh3HgBzVIAANiHcOMFNEsBAGAfwo0X8GRwAADsQ7jxAmpuAACwD+HGCwg3AADYh3DjBTRLAQBgH8KNF1BzAwCAfQg3XkC4AQDAPoQbL6BZCgAA+xBuvICaGwAA7EO48QLCDQAA9iHceAHNUgAA2MfWcLNmzRoNHDhQtWvXlsPh0MKFCwvcftWqVXI4HLleSUlJJVPgIqLmBgAA+9gabk6dOqW2bdvq9ddfv6z99u3bp8TERNcrLCzMSyUsHsINAAD2qWTnyWNiYhQTE3PZ+4WFhalatWqeL5CH5ISbc+fsLQcAABVRmexz065dO0VGRuqGG27QunXr7C5OLgEB5ufZs/aWAwCAisjWmpvLFRkZqTfffFMdO3ZURkaG3n77bfXq1UubNm3S1Vdfnec+GRkZysjIcL1PS0vzejkDA83P06e9fioAAHCJMhVumjZtqqZNm7red+vWTT/++KNeeuklvf/++3nuExcXp8mTJ5dUESURbgAAsFOZbJa6WOfOnXXgwIF810+cOFGpqamu1+HDh71eppxwc+aM108FAAAuUaZqbvKyY8cORUZG5rve6XTK6XSWYIku9Lmh5gYAgJJna7hJT093q3U5ePCgduzYoerVq6tevXqaOHGifv31V7333nuSpJdfflkNGzZUy5YtdfbsWb399ttauXKlvvzyS7suIU80SwEAYB9bw83WrVt13XXXud4/8MADkqTY2FjFx8crMTFRCQkJrvXnzp3Tgw8+qF9//VWBgYFq06aNli9f7naM0oBwAwCAfRyWZVl2F6IkpaWlKSQkRKmpqQoODvbKOebNk/7yF6lHD2n1aq+cAgCACuVyvr/LfIfi0og+NwAA2Idw4wU0SwEAYB/CjRcQbgAAsA/hxguY5wYAAPsQbryAPjcAANiHcOMFFzdLVayxaAAA2I9w4wU54SYrS8rMtLcsAABUNIQbL8gJNxL9bgAAKGmEGy/w85N8/u/O0u8GAICSRbjxAoeD4eAAANiFcOMlhBsAAOxBuPES5roBAMAehBsvYa4bAADsQbjxEpqlAACwB+HGSwg3AADYg3DjJfS5AQDAHoQbL6HPDQAA9iDceAnNUgAA2INw4yU0SwEAYA/CjZdQcwMAgD0IN15CnxsAAOxBuPESam4AALAH4cZL6HMDAIA9CDdeQs0NAAD2INx4CX1uAACwB+HGS6pWNT9PnrS3HAAAVDSEGy+pUcP8/P13e8sBAEBFQ7jxkpo1zU/CDQAAJYtw4yU5NTfHjkmWZW9ZAACoSAg3XpITbrKypNRUe8sCAEBFQrjxksqVL3QqpmkKAICSQ7jxooubpgAAQMkg3HhRTqdiwg0AACWHcONFhBsAAEqereFmzZo1GjhwoGrXri2Hw6GFCxcWus+qVat09dVXy+l0qnHjxoqPj/d6OYuL4eAAAJQ8W8PNqVOn1LZtW73++utF2v7gwYMaMGCArrvuOu3YsUMTJkzQXXfdpaVLl3q5pMVDnxsAAEpeJTtPHhMTo5iYmCJv/+abb6phw4Z64YUXJEnNmzfX2rVr9dJLL6lv377eKmax0SwFAEDJK1N9bjZs2KA+ffq4Levbt682bNiQ7z4ZGRlKS0tze5UUwg0AACWvTIWbpKQkhYeHuy0LDw9XWlqazpw5k+c+cXFxCgkJcb2ioqJKoqiS6HMDAIAdylS4KY6JEycqNTXV9Tp8+HCJnZs+NwAAlDxb+9xcroiICCUnJ7stS05OVnBwsAICAvLcx+l0yul0lkTxcqFZCgCAklemam6io6O1YsUKt2XLli1TdHS0TSUq2MXNUjw8EwCAkmFruElPT9eOHTu0Y8cOSWao944dO5SQkCDJNCmNGDHCtf3o0aP1008/6Z///Kf27t2rN954Qx9//LHuv/9+O4pfqFq1JB8f8/DMxES7SwMAQMVga7jZunWr2rdvr/bt20uSHnjgAbVv315PPvmkJCkxMdEVdCSpYcOG+vzzz7Vs2TK1bdtWL7zwgt5+++1SOQxckvz8pIYNze8//GBvWQAAqCgcllWxGkzS0tIUEhKi1NRUBQcHe/18/ftLS5ZIM2dKd9/t9dMBAFAuXc73d5nqc1MW/eEP5uf+/faWAwCAioJw42U54YZmKQAASgbhxsuaNDE/qbkBAKBkEG68LKfm5sABM2oKAAB4F+HGy6KiJKdTysyUDh2yuzQAAJR/hBsv8/G50DS1d6+9ZQEAoCIg3JSA/5vGR+vW2VsOAAAqAsJNCbj+evNz5Up7ywEAQEVAuCkB111nfm7ZIp08aW9ZAAAo7wg3JaB+femqq8xoqa+/trs0AACUb4SbEpJTe7N0qb3lAACgvCPclJAhQ8zPd9+Vjh+3tywAAJRnhJsS0r+/1LatlJ4uvfqq3aUBAKD8ItyUEIdDeuwx8/u//y1t22ZveQAAKK8INyVo6FCpXz/pzBnpT3+Sdu2yu0QAAJQ/hJsS5OMjzZkjtWwpHTkide0qvfOO9Ntv0rlzdpcOAIDyoZLdBahoQkKk1aulW26RVqyQ7rrLLK9eXXr4YaldO7PN//4nffut1KKFNGGC1KiRnaUGAKDscFiWZdldiJKUlpamkJAQpaamKjg42LZynD8vvfKKNHly4RP7BQdL06dLqanSvHnSTTdJlSqZB3GGhEh//7tUtWrJlBsAADtczvc34cZmWVnmFR8vzZ0rJSebZqrmzU3tznvvSevXF3yM5s2lGjWkX36RGjaUZs0yEwdKJhAtXWr6+bRqZWqGfH29fVUAAHgW4aYApS3cFCYzU3r+eelf/zK//+1v0tatphnrqqtM81Vysvs+1aqZ/j19+phHPhw8eGFd27bSggVSdrZpBuvYUXrwQcnPz6w/dszUClWrVlJXCABA4Qg3BShr4SZHaqrpdFyrlvvyX36RPvxQCg83tTX33ivt3eu+TZ06UtOm0ubNZp6dgAATYHKawyIiTCfnHj2kqVNNk9nQodJbb5kmMQAA7Ea4KUBZDTdFlZIiffmlFBRkamb8/KRPPzUB59dfTWjZtMls27atdPhw/jMm33CD9MQTpjkrMNB0gN6yxTRt/fGPpnkrM1N64w0TkP7yF1NjBACApxFuClDew82lLMtMIJgjO1vas0dKSpKuucbUBu3aJS1eLL3/vnTbbVLfvtKAAdKpU2afwEDTYfno0QvHadrUBJ9Zs0zokaQ2bcw8Pu+8Y5rN+vSROnSQYmNNTREAAMVFuClARQs3xbV8uTRxoglBv/xiltWuLV17rVn3++8Xtq1SxdTipKXlfazRo02gSk2VGjQwYWvvXqluXTOZ4e2308cHAFAwwk0BCDeXx7KktWtNX50+fUwzV0qKeZTEmjWmyerRR03wee456YsvpPvuM0PU16yRXn+98HNUrix162aCVM2a5pjbt0v332+aunKkpZnmtpyaqEtrpQAA5RfhpgCEm5J1++2mw3NUlDRtmqnxOX9eatzY1ObMmiXt3p3//sOHm+HtCxea5rP27U1fotWrpQ8+MMFq9Gjp44+l77+Xbr7ZNL0FBZk+RefPS5MmmWMMH26GxV93HR2lAaCsIdwUgHBTss6cMcPVb7zR1O5cyrJMaFm3znR63rvXTE5oWaajclEEBJjzXMzXV/p//09KSDCTHDocpnZo3ToTtP77XxNy8nPy5IVRZQAA+xFuCkC4KTs2bjQ1O2lpUv/+UufO0osvmjAUGmr67Lz1ltm2ZUszmeH/+3+m6ezcOfOzUqXcwUeSnE4zv89PP0kZGVKvXtJf/2o6T7/6quksHREhDRtmjnHbbSYUbdok7d9vht737Wv6GwEAvI9wUwDCTflhWWZCwmrVTC2Mw2FqXCpVku64wzykVDLP59q/3zRR3XefGf6+cGHu4/n4mHCTnl6080dFmQkWhw41s0z7+xe93BL9hQDgchBuCkC4qRgsy3RKXr3a9MP58ktT6/Liiyb8/P3vptbm+utNqHn/fVMjJJnmsaeeks6eNf2BEhPNXEGSaVq7+mrzUNPDh80yX1/zmjDBHG/pUrNu0CBpxw7T1Fatmulcffy4eTZYQoJ5vMakSVJYmDnP55+bGqdrrjFNaEUNSwBQERBuCkC4QX4SEkz4aNMm92SEx4+bUJTzkTl92nSQfvXV/CdBvFTnziZQHTt2YVnt2mZOofh4MyFiDh8f03n6uedMYHr9dRPYxowxASggwHSo3r/fDNkPDDT7WZZpxgsJKfZtAIBSiXBTAMINPOnsWfNsr82bpUceMTU43bqZB5l+9pkZsdW9uxnVldP35+qrpccfN8Pp9+y5cKzrrjP9fJYudQ9MnTub40umlufoUdPfKGf+oU6dzLmyskxt0IYN0rPPSgcOmFFid91lnib/5z+b/TMzzbD7qKiSuUcA4AmEmwIQbmCHzz4zTVC33iqNH2+anNLTTYfpw4fNTM4332z64ViWCS7//veFGhsp96gwHx8zc3Ramgkq6enSiRO5z12zpqktuuoq0z/ogw9MU9tdd0k7d5p1/fubh7NWrWr22bHD9Es6e9aEs3r1zHY9elx4yOru3WZE29/+Zmq7AMCbyly4ef311zVt2jQlJSWpbdu2eu2119S5c+c8t42Pj9eoUaPcljmdTp09e7ZI5yLcoCxZudL0/7nlFvM8r40bTU3O6tXmERg1a0oxMaa5SzJNWV27SjNmmCavI0eKfq5rrjEPZt2/P/+5h+rWNc8Wu/ZaE7wSEkyT2EcfmfXx8SYkNWt2YZ+sLDO5Y6tW5uGuF9u40fSFGj3ajGADgPyUqXDzv//9TyNGjNCbb76pLl266OWXX9bcuXO1b98+hYWF5do+Pj5e48eP1759+1zLHA6HwsPDi3Q+wg3Km99+k55+2jSBjRxpmsYOHTIdox9+WPr6a9M/6I03TBCJiTE1Mg88YOYfGjxYuvNO91FiDoc0ZIhpXps/39QYBQS4P3ZDMv2Qzp+/MCT+1CkpMtKct3Vrs+6hh0wNUViY6ci9cqUZZXbsmAls585J//iHqamyLPPiAawALlWmwk2XLl3UqVMnTZ8+XZKUnZ2tqKgo3XfffXrkkUdybR8fH68JEyYoJSWlWOcj3ADGxY+v+Ppr0w+oWzfT96dZM9MUlbNddrbpq7N8uanZeeEF0xy2fLn0z3+aR21cjtBQE6ZyOlE7HGbo/quvmifP169vmub+8AfTp6hWLTMX0euvm07dt90mNWpk1ktmdupPP5U6djRzEDVrdqH5DED5UGbCzblz5xQYGKh58+Zp8ODBruWxsbFKSUnRJ598kmuf+Ph43XXXXapTp46ys7N19dVX61//+pdatmyZ5zkyMjKUkZHhep+WlqaoqCjCDXAFMjNNbU5wsHTwoGmqysgwnaEXLTLLVq408w6NHSuNGGGeGH/8uOkf9PPP5jh/+Yt5ttj77xevHJMmmUA2dKg5V47mzc1s1KGhpsnu1VfNUP+aNc0+FzebXWz6dOmZZ8wM1n37Fq9MALzjcsKNrZPLHzt2TFlZWbmalMLDw7V3794892natKneffddtWnTRqmpqXr++efVrVs37d69W3Xr1s21fVxcnCZPnuyV8gMVlZ/fhZqRhg3NvD+ZmVKTJlLPnmZ5Zqap8cnpS7NnjxlC7+dnmtF69jTh5uxZ07w1a5ZpBvvoI9MsFRkpffONCUKbNpl5gAYMMM1b69eb+YMmTbpQphYtzLESE825hg83/Xxeesk0j+X48ssL4SY62sxP9M03pp/R44+bmqrYWLPs2DHTf+nDD811BgdL1aubsJTTf+j4cTOZ5Nmzpnx165rr9NS/nX77zYQyJn0Eis7WmpsjR46oTp06Wr9+vaKjo13L//nPf2r16tXatGlTocfIzMxU8+bNNWzYMD399NO51lNzA5QNmzebWp3IyLzXnz1ranlyTJggvfKK+f2uu0yti9Mpbd1qanMunjeob1/zKI0ZM0xQKoifn/u+ealWzdRU9e5tmun27zfLAwJMqDlxQnrySTPkvm5dc7wFC8y1+fiY6QAefdSMmtu2Tfr1V/Ow18hIE7w2bTKj6L76yvSVuusuaebM3AHHssy+efy7Dih3ykzNTc2aNeXr66vk5GS35cnJyYqIiCjSMfz8/NS+fXsdOHAgz/VOp1NOhmEApV4+AyRdLg42kukk3bKlqbHp3v3C8o4dzTD2t94yNTZ33206TUumpujjjy/M/vzSSyYMhYeb87dqJQ0caDpdp6aajtK9epnHdqSkmM7Pzz13YTTZokXmZ+3apgP3li0Xhus//nj+1/LZZ9KyZVLjxtJ777mvGzhQWrzYjDLL8fbbpg9UTkhq1swEnldflZYsMQGoXz8TrsLDTRPd009LXbqYJjvJ9HEKDCxaZ+3sbBPeAgIK3xYojUpFh+LOnTvrtddek2Q6FNerV0/jxo3Ls0PxpbKystSyZUv1799fL774YqHb06EYQI6sLDOnT8uW7uEpK8s0LeX1ZPjUVBNqQkKkceNMCFi50gSO+fPNaLWcTtfXX28eA5Kaaob05zz/bMoU9z5CV19tQtHevRfmNapb19TmtGt3Yah9fvz8TJkDA81Q/NmzzTB7Hx8zr9Lnn5syVa1qpgu4+moTrDZulH780VxrzZpmZNuxY+YYP/0krVhhpgj45BPTafuBBy40M+7YYcJc374XOp9f6vffzb0ZMODCLNpAcZWZDsWSGQoeGxurt956S507d9bLL7+sjz/+WHv37lV4eLhGjBihOnXqKC4uTpI0ZcoUde3aVY0bN1ZKSoqmTZumhQsXatu2bWrRokWh5yPcAPCUrCxTm3JprdLF8hre/sMPptNyQoJ5Gv0NN5jls2ebGpkbb5TmzjWhJTvbDJPPGV9xzz2mCe/NN8371q3NUHtv6NXLhLacc912m5lfae1aU/MkmTB3/fVmGoLBg80IunPnTNPdvfeaa+3QwTThNWpkyi+ZsBQamn8zJHCpMhVuJGn69OmuSfzatWunV199VV26dJEk9erVSw0aNFB8fLwk6f7779f8+fOVlJSk0NBQdejQQc8884zat29fpHMRbgCUZpf2LcrP5s1m2zZtTPNat27m0RtLl5rOz/ffb15795omqj//2Twq5JtvzGv/flMr1LGjqSH69FPTb6lWLRNApk690DTmcJhXdrZ7GS4NVlWrus+XlJcZM0wt1QMPmNqcMWPMeX77zTxapEEDE6ri403zYb9+ZpuDB02NUtOm5lqOHTMd2KtXz30OyzIj8ObNM3Mr9elT+P2UzDE3bjTnvLTGDvYrc+GmJBFuAFQUOWGkqJMiHj5smqcCAqTbbzejxCpVMv2UfvvNTLZ4zTXmcR29epnmvP37zSM9pk41tVh16phwdeiQabp79VVTI/XLL2am6uKoX9+Mgjt3Lve6m24yAWftWhPSqlUznay///7CNo8+aob4r1kjPfigNGqUmaJAMs2DAQHm+D17mhDVt6+55qAgE8QCA03frD17TBNcjx5m33/9ywSioUNNv6+LO3yfO2fCVffuuWfmRvEQbgpAuAGAwh06ZGp+7rzT9JkpzJ49pn/RTTflXfOUnW2atf73PxMaHn/c1BKtW2emAAgLM81Us2aZ2pP77jOdo5955kJtUK9epikvIcHsm5iYf3n8/U1I+ewz8/7OO02fqJznrz34oAkteQyylWQeY+JwSN99Z0bavf32hXV9+pjJLh977MKyzp1NbVNkpHk8yhtvmGkHatc29yUszAQlPz8TGLOyTO3SjBnSn/5k+jv98IMJjJeOiktKMqGtKDV65RnhpgCEGwCwh2WZR3RUqVLwvD0XN80lJppwcNVVpklKMsGgUiXTJPbAAyYwjBljjp+SYsJShw5SRISZLmDChAvHrlfPhKO8NG8uPfusCUJ5PYS2a1dTlotmF1H37mY4/9mzZuReSkruZ7o1bWrCzdq1powDB5opAz78MHe5BgwwtWZdu5owNH686d/Urp0JTcHBF2YXP3rUzPn088+mT1Pr1uZYv/9upiJo2dLURiUnmzCWnS0dOGBq8po0Me/Pnzf3L+fvcf68+d3X17w/eNDU5gUFFb3J1FsINwUg3ABAxTJ3rhnhdu6c6Z/05ZcmwJw/b2qQxo83X+YhIeaLf+NGU+sTHGxGl332mWnKeucd82V/221mLqK2bc2IseRk8/vx4+Z84eGmL9OwYSZ0XTwy7mK+vqYWaOnS3Ov8/EyQuHjfVq1M7dbmzWbUWlqa+z6RkabTdmZm7vmc+vUzk20mJpprfOYZU7v0yy9m5NysWSbADBhgwuHy5SZU3X67CYnt2pn3U6eacOrvbya79PU1YevVV80Iu6lTTfDMzPT8CDnCTQEINwCArVtNM9Ctt+Zdi5SWZgKEv78ZLt+o0YXtzp0zcxFde60JApL07rsmMAUEmMCT80SgX34xw+FPnzaBKTHR1NwcP26aumJjpb/9zdS23Hefmfdo925TGySZjuL//KcJSjlzKF2sdWsTapYvd+/wHRRkwkVKiqnpypml29fXfQ6li+U8CFcyNWUJCe6ze1+qWTNz7jNnTCCUTIg6eNCMnJs6Nf99i4NwUwDCDQDA0yzLBJw//MGEnoIkJ5umq/wG+VqWqWn6/nsz2isgwDQnrVxpanM6dTJBKzTU1BJJponql19MU9eyZaaG6vrrTThZt06aPNlMTjlmjFm+dauZ72j+fPMYk//+15y3QwcT5nKeTX3LLSYQ/fKLCVKvv25qck6dcq9VqlTJ7J8TnKKiTHj05By6hJsCEG4AABXZsWMm1Pz5zxeG0qemSvv2mea1AwekVasu9Nm5uGZr714TXE6eNKPPsrJMLdQ115gaqylTzHPdHnnENPN5EuGmAIQbAADKnsv5/i7i7AcAAABlA+EGAACUK4QbAABQrhBuAABAuUK4AQAA5QrhBgAAlCuEGwAAUK4QbgAAQLlCuAEAAOUK4QYAAJQrhBsAAFCuEG4AAEC5QrgBAADlCuEGAACUK5XsLkBJsyxLknl0OgAAKBtyvrdzvscLUuHCzcmTJyVJUVFRNpcEAABcrpMnTyokJKTAbRxWUSJQOZKdna0jR44oKChIDofDY8dNS0tTVFSUDh8+rODgYI8dt6yo6NcvcQ8q+vVL3AOJe1DRr1/y3j2wLEsnT55U7dq15eNTcK+aCldz4+Pjo7p163rt+MHBwRX2Ay1x/RL3oKJfv8Q9kLgHFf36Je/cg8JqbHLQoRgAAJQrhBsAAFCuEG48xOl06qmnnpLT6bS7KLao6NcvcQ8q+vVL3AOJe1DRr18qHfegwnUoBgAA5Rs1NwAAoFwh3AAAgHKFcAMAAMoVwg0AAChXCDce8Prrr6tBgwaqXLmyunTpos2bN9tdJK+YNGmSHA6H26tZs2au9WfPntXYsWNVo0YNVa1aVUOHDlVycrKNJb5ya9as0cCBA1W7dm05HA4tXLjQbb1lWXryyScVGRmpgIAA9enTRz/88IPbNsePH9fw4cMVHBysatWq6c4771R6enoJXsWVKewejBw5Mtfnol+/fm7blOV7EBcXp06dOikoKEhhYWEaPHiw9u3b57ZNUT77CQkJGjBggAIDAxUWFqZ//OMfOn/+fEleSrEU5fp79eqV6zMwevRot23K6vVL0owZM9SmTRvXpHTR0dFasmSJa315/vvnKOwelLrPgIUrMmfOHMvf39969913rd27d1t33323Va1aNSs5OdnuonncU089ZbVs2dJKTEx0vX777TfX+tGjR1tRUVHWihUrrK1bt1pdu3a1unXrZmOJr9zixYutxx57zJo/f74lyVqwYIHb+qlTp1ohISHWwoULrW+//db605/+ZDVs2NA6c+aMa5t+/fpZbdu2tTZu3Gh9/fXXVuPGja1hw4aV8JUUX2H3IDY21urXr5/b5+L48eNu25Tle9C3b19r1qxZ1q5du6wdO3ZY/fv3t+rVq2elp6e7tinss3/+/HmrVatWVp8+fazt27dbixcvtmrWrGlNnDjRjku6LEW5/p49e1p3332322cgNTXVtb4sX79lWdann35qff7559b+/futffv2WY8++qjl5+dn7dq1y7Ks8v33z1HYPShtnwHCzRXq3LmzNXbsWNf7rKwsq3bt2lZcXJyNpfKOp556ymrbtm2e61JSUiw/Pz9r7ty5rmV79uyxJFkbNmwooRJ616Vf7NnZ2VZERIQ1bdo017KUlBTL6XRaH330kWVZlvX9999bkqwtW7a4tlmyZInlcDisX3/9tcTK7in5hZtBgwblu095uwdHjx61JFmrV6+2LKton/3FixdbPj4+VlJSkmubGTNmWMHBwVZGRkbJXsAVuvT6Lct8sY0fPz7ffcrT9ecIDQ213n777Qr3979Yzj2wrNL3GaBZ6gqcO3dO27ZtU58+fVzLfHx81KdPH23YsMHGknnPDz/8oNq1a+uqq67S8OHDlZCQIEnatm2bMjMz3e5Fs2bNVK9evXJ7Lw4ePKikpCS3aw4JCVGXLl1c17xhwwZVq1ZNHTt2dG3Tp08f+fj4aNOmTSVeZm9ZtWqVwsLC1LRpU40ZM0a///67a115uwepqamSpOrVq0sq2md/w4YNat26tcLDw13b9O3bV2lpadq9e3cJlv7KXXr9OT788EPVrFlTrVq10sSJE3X69GnXuvJ0/VlZWZozZ45OnTql6OjoCvf3l3Lfgxyl6TNQ4R6c6UnHjh1TVlaW2x9LksLDw7V3716bSuU9Xbp0UXx8vJo2barExERNnjxZ1157rXbt2qWkpCT5+/urWrVqbvuEh4crKSnJngJ7Wc515fX3z1mXlJSksLAwt/WVKlVS9erVy8196devn2666SY1bNhQP/74ox599FHFxMRow4YN8vX1LVf3IDs7WxMmTFD37t3VqlUrSSrSZz8pKSnPz0nOurIir+uXpNtuu03169dX7dq19d133+nhhx/Wvn37NH/+fEnl4/p37typ6OhonT17VlWrVtWCBQvUokUL7dixo8L8/fO7B1Lp+wwQblBkMTExrt/btGmjLl26qH79+vr4448VEBBgY8lgp1tvvdX1e+vWrdWmTRs1atRIq1atUu/evW0smeeNHTtWu3bt0tq1a+0uii3yu/577rnH9Xvr1q0VGRmp3r1768cff1SjRo1Kuphe0bRpU+3YsUOpqamaN2+eYmNjtXr1aruLVaLyuwctWrQodZ8BmqWuQM2aNeXr65urV3xycrIiIiJsKlXJqVatmv7whz/owIEDioiI0Llz55SSkuK2TXm+FznXVdDfPyIiQkePHnVbf/78eR0/frzc3perrrpKNWvW1IEDBySVn3swbtw4LVq0SF999ZXq1q3rWl6Uz35ERESen5OcdWVBftefly5dukiS22egrF+/v7+/GjdurA4dOiguLk5t27bVK6+8UmH+/lL+9yAvdn8GCDdXwN/fXx06dNCKFStcy7Kzs7VixQq3dsjyKj09XT/++KMiIyPVoUMH+fn5ud2Lffv2KSEhodzei4YNGyoiIsLtmtPS0rRp0ybXNUdHRyslJUXbtm1zbbNy5UplZ2e7/uMvb3755Rf9/vvvioyMlFT274FlWRo3bpwWLFiglStXqmHDhm7ri/LZj46O1s6dO91C3rJlyxQcHOyq1i+tCrv+vOzYsUOS3D4DZfX685Odna2MjIxy//cvSM49yIvtnwGPd1GuYObMmWM5nU4rPj7e+v7776177rnHqlatmluP8PLiwQcftFatWmUdPHjQWrdundWnTx+rZs2a1tGjRy3LMsMh69WrZ61cudLaunWrFR0dbUVHR9tc6itz8uRJa/v27db27dstSdaLL75obd++3Tp06JBlWWYoeLVq1axPPvnE+u6776xBgwblORS8ffv21qZNm6y1a9daTZo0KTPDoC2r4Htw8uRJ66GHHrI2bNhgHTx40Fq+fLl19dVXW02aNLHOnj3rOkZZvgdjxoyxQkJCrFWrVrkNcz19+rRrm8I++znDYG+88UZrx44d1hdffGHVqlWrTAwFLuz6Dxw4YE2ZMsXaunWrdfDgQeuTTz6xrrrqKqtHjx6uY5Tl67csy3rkkUes1atXWwcPHrS+++4765FHHrEcDof15ZdfWpZVvv/+OQq6B6XxM0C48YDXXnvNqlevnuXv72917tzZ2rhxo91F8opbbrnFioyMtPz9/a06depYt9xyi3XgwAHX+jNnzlj33nuvFRoaagUGBlpDhgyxEhMTbSzxlfvqq68sSblesbGxlmWZ4eBPPPGEFR4ebjmdTqt3797Wvn373I7x+++/W8OGDbOqVq1qBQcHW6NGjbJOnjxpw9UUT0H34PTp09aNN95o1apVy/Lz87Pq169v3X333bnCfVm+B3lduyRr1qxZrm2K8tn/+eefrZiYGCsgIMCqWbOm9eCDD1qZmZklfDWXr7DrT0hIsHr06GFVr17dcjqdVuPGja1//OMfbnOcWFbZvX7Lsqw77rjDql+/vuXv72/VqlXL6t27tyvYWFb5/vvnKOgelMbPgMOyLMvz9UEAAAD2oM8NAAAoVwg3AACgXCHcAACAcoVwAwAAyhXCDQAAKFcINwAAoFwh3AAAgHKFcAOgQnI4HFq4cKHdxQDgBYQbACVu5MiRcjgcuV79+vWzu2gAyoFKdhcAQMXUr18/zZo1y22Z0+m0qTQAyhNqbgDYwul0KiIiwu0VGhoqyTQZzZgxQzExMQoICNBVV12lefPmue2/c+dOXX/99QoICFCNGjV0zz33KD093W2bd999Vy1btpTT6VRkZKTGjRvntv7YsWMaMmSIAgMD1aRJE3366aeudSdOnNDw4cNVq1YtBQQEqEmTJrnCGIDSiXADoFR64oknNHToUH377bcaPny4br31Vu3Zs0eSdOrUKfXt21ehoaHasmWL5s6dq+XLl7uFlxkzZmjs2LG65557tHPnTn366adq3Lix2zkmT56sm2++Wd9995369++v4cOH6/jx467zf//991qyZIn27NmjGTNmqGbNmiV3AwAUn1cexwkABYiNjbV8fX2tKlWquL2effZZy7LMk6hHjx7ttk+XLl2sMWPGWJZlWTNnzrRCQ0Ot9PR01/rPP//c8vHxcT2RvHbt2tZjjz2WbxkkWY8//rjrfXp6uiXJWrJkiWVZljVw4EBr1KhRnrlgACWKPjcAbHHddddpxowZbsuqV6/u+j06OtptXXR0tHbs2CFJ2rNnj9q2basqVaq41nfv3l3Z2dnat2+fHA6Hjhw5ot69exdYhjZt2rh+r1KlioKDg3X06FFJ0pgxYzR06FB98803uvHGGzV48GB169atWNcKoGQRbgDYokqVKrmaiTwlICCgSNv5+fm5vXc4HMrOzpYkxcTE6NChQ1q8eLGWLVum3r17a+zYsXr++ec9Xl4AnkWfGwCl0saNG3O9b968uSSpefPm+vbbb3Xq1CnX+nXr1snHx0dNmzZVUFCQGjRooBUrVlxRGWrVqqXY2Fh98MEHevnllzVz5swrOh6AkkHNDQBbZGRkKCkpyW1ZpUqVXJ12586dq44dO+qaa67Rhx9+qM2bN+udd96RJA0fPlxPPfWUYmNjNWnSJP3222+677779Ne//lXh4eGSpEmTJmn06NEKCwtTTEyMTp48qXXr1um+++4rUvmefPJJdejQQS1btlRGRoYWLVrkClcASjfCDQBbfPHFF4qMjHRb1rRpU+3du1eSGck0Z84c3XvvvYqMjNRHH32kFi1aSJICAwO1dOlSjR8/Xp06dVJgYKCGDh2qF1980XWs2NhYnT17Vi+99JIeeugh1axZU3/+85+LXD5/f39NnDhRP//8swICAnTttddqzpw5HrhyAN7msCzLsrsQAHAxh8OhBQsWaPDgwXYXBUAZRJ8bAABQrhBuAABAuUKfGwClDq3lAK4ENTcAAKBcIdwAAIByhXADAADKFcINAAAoVwg3AACgXCHcAACAcoVwAwAAyhXCDQAAKFcINwAAoFz5/yAGBPoE3TruAAAAAElFTkSuQmCC\n" + }, + "metadata": {} + } + ] + }, + { + "cell_type": "code", + "source": [], + "metadata": { + "id": "dXPARCwlSpqV" + }, + "execution_count": 28, + "outputs": [] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "machine_shape": "hm", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + }, + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/baselines/fedavgm/_static/Figure6_cifar10_num-rounds=1000_concentration=1.png b/baselines/fedavgm/_static/Figure6_cifar10_num-rounds=1000_concentration=1.png new file mode 100644 index 0000000000000000000000000000000000000000..1668474caed6d296d70aa3136051275ed1ac70de GIT binary patch literal 46747 zcmd431yGgk*FL%drI8jvQc^$=M5Rka326!GmhP4k5D*mv5kWvgQY1vWK}wL8?(UH8 zvmX6@-}(M$=FFTqbLKy1&-lKGdq4Ym?t8_xu63ALZJu^kw0jeVi^`FR57-Ul!Tgl{OXv8 zrrPgwtj+xdL&~B1Cd9-_qxSdhllktFmAvFFQTw5Dlr~#`x3H1#7xS&xvQh0-qbgyt zuhn!M>j-W;JtA;JBPS*rGV9m~cVKy8Na<`K?Ol@?{9s}9)A~=|NfTJz)#`dFHJD7B$58-Q)vI^TmAmGyj3wRjDmam z*WC%Jwc(;;9*a1FT9gp zhNL52K-HQCp3(VdesQsMmFMnFk3S}}v$I#6se>=k)MqNE5?#51Ehi`UFjCF(j9Kn| zl3~LOB{s55?Gmo)vm>j3y?d|*>E?#ZxJ1Onlq2)MK6Ym)(rIdIGYSh+zrK1`T2GI5 zZ+$Ebhm>WqFH2SP`%~r6&W)Zly!wU)m!533tdwo{)1%$tTJPW4k3Wcddlr~#X=@vA zO*f+ay!O|pV)T9LSU){`4i9Mr?=Yopdw)Ya-+jx7&t{mc$ZDWT?EFO4Xxy&m^z!+> z?|JZel@}vBJFeH>YTXAOx0*hXjHCLI)o1QyQ+dqlb^qqdUo1o?5;@aHX zleV_zJUiXjuM&oxAIvwz`1bAFcMC~u;r&r3e)mm!+wn@AyLU@e_M;*r(Xp_w)Us4A zzJ2>vGv5I1c5q!E4Ha@*H|Wb$7Fu=jyT3hSZMoL`H9G#qsMh0@UFtoL`cb0$S!7WNA2)f`e=jJ*r z^)RDuT88Zv7&oI(5)u-EOaA`;5v?Y}Yj9nYYQ!(PRT&u>F$VJVA4IV{qWuz&7?J1h zvY@Y3e@?fpevu`)p3kAFO`B_t&*|n&xL8V3QqtfbK8dr&FbW>Gbsg)`QewoAz)I-8 zBnFLDdXPGnQRn973NERut7oczrLr9@HT;e&nsVbLlY^AhPG3t)3ko$jG9sofz+H2~ zdySFNrHAUacUFf~7d-YiCLZiA|90sa zF186H=TLtq!06>6?7oSDZ)&tvWg{DoT?q{dX?eyY&U!5+<3&IK28#aHt?h!JSz3iG zl9G}y$MzZs#cKQeFC(@YghzqO#4w|apQ;%NG3kwe_lN4zrAy$DmCVp1JV#VjRo_NOUlbA&vWe5-8o=4Rckf|Zl$YhP(HY}p723E84$Kc}ZJJ3Do@wKXINxhbVezjlm9Y$7@wiC!X)VW)!b zhh1i1U_h&@tCPL~J~>wEx<-}$=;MnQ1Xq`>E4=o(q~B174i0MKQ}Jc~;1w1YzUgz~ zE}tMsj&+Gz{No)PB3jxvV3L;G!m?Iw+w;HBP!HZvON7&kd|!ee{AJU6Y+1o6xBopR z`$qLKm-^QyK~oLE#?5bNJXc=nSGp6!bv!?(_4AF5u*!cx1xY5CctiBkEV6u@g(irq zJlz}laLfFX=;@B?kEW&+*R|nH-3r&T6;;g)g@nn~)mN|=%NG{$D7XS&zkaPZ;DrPP zG5oI3ki{&O3kVWokE|8652_ zq55>Eun1}K#mAP@0RK_GS=D-n&;|-bs71*+2*Z3o||;q0PG;HbAZHpYtrF z#pu;T47+LGL4ES4ja#$So+0P!*Y58Ys_h9v6pYfZ*xX`2-nEnV^c2*4w$(_oU3oAa z_Cd&vt3OvajK{d?-4i`EH8LY3qnloP)|~p)l|P=z$jBTlriMHB@P&_**yRy)p&^kW z|NHkJqh(H8D|t2bZEbk^a52W+%a8(7ezM{)1ZHtaz-_^6CnqYl9R1(VQa=#WL|!eN zH^C?VzIZW^b^ZHlM`1Ys`{lJ;wg3HAe0XcX!^psJv9rOx-|n0-T)NMhWDZWYJ0Izi zNA<>s!Pdo=v#Uu#4(dw2ho!2~Fe~W1=ob=#t*Q}#S{*J5+Ydt$&Q^QP!0k`>+hVxy zx;$?0fhA{q<3H(AamQf4;tvz6w%JI(mu&4+UKaH(wx?VgE@Pd=8*?4p-{#q>p< z!3t?xxRiNpvy6Lfh&Znf67TKpMgN&Qgaj%LhCVetjn2Wr0c(TdeZ1HBhDM0pxhDM& zBwMw?n5Nzg#TkfKKG`+SH*`vgypMJyAT&Z&4Y1)Y|1*vS!6Y2azP_mm6SD4=TW0u> zB6j}F&CeT6{|H4q8PebL{;%4`BgHo1kLPFDj)oXx+NPG5Uk>Pb5r7@>ELXGtT>kxq zn1;q?*>aI>?N*iV`C0UnnAlt9*Y4aYR@t|}PE!drZF`@5FZhzZtLuxRqTA1&J-g|= zs1)@~;}rb9JLNte8yRk2_G3KNba{x-{t}&!v(ye&3p*!QR`7x({lSxse#PDPJcNCE zS;a!d_tapufr0&PP!@-{;wLl=%%lh5w4O_7XlN{%ueV-P-om`0UEC)}c761$xNF@c5;$$UbL>=FaP*l*fB( z4NjdR8ip=6gAk8_gi21LnJDT_rJA8|ANG85W+t`JW~KkD^u(v@?TI2+V`F2V{>~d5(D8IIp8-Heu+>2B)OM#>^WpZw*6~Ko%IxcqkXi^(H*H7x z`@U*ZfQi6kh~zshD15gW*?aqw5A&C~qK}WLLOj3!hYwf4rtnx569X$MD!TX0<-cg; zHMK^u^1!DPobBxFkR0&-{rmK+D=*LGxjprBpWZU>_+gb-^&&t2X2MQg09GpO3}C3- zz9Up^02blq@|V18TwIs9D>u z9vkT^ZLK)?HdaSR2ZB`~ z9H|-}$3c*I&#nK)VW!D&ywW3rNv2{Ave%FO3133IF!N(|$MZAq!Qo-5G*0k>)OW^~ z6>b~-#fc+BL$T$8W{TMU@$uAvYa06=dlx0L%$VCmLv&c8RHwunoSccv8wDn3gN>&+5FmJ*H^ylo59b#a-h-0CVX_vj z>e)L_ARlSz(mc?%^CigZN>EQ%EdT@L^1L3Ks4ea+kMe&m;;?S>O6AXX12moBVGS>!(teMXD z735@zWTo1M28_JwBX%ym%FGxy@bgU7j7vyn_+~0cE&8lf@=ak1K%j=9p|bk@?a4X} zz&v;aRD87%+;U0dt?+4uuRy%&5xq0J+v9xW1S}J>1%6+S7AbBJI)DKLzU=HdY)(o* zs=|2uMLp>2*BfznY+eGSQeo{JDm333w#us;8lr^vLu5fln^Q_D`)Ce z3Bn_9{p84-gr^*SCMqh*@3Kthx-tHBZ9JWlBm25B1lcRMEy;9rb&GCb$8hU6L0D9( z@$p8`GQjtNgkaEHfw@MdXQ8LUK-R-u_@fl-h` zV!adRhlbs?wKd(_b@@%L30g>b+L|u1C?o*SKC^t7Z$h0M+yZxgW=KvKiT)dbp8_j;O zH)4l&WsjEK!rIbdrcns;6JpHZyNiPbjMmoHlS@mXoubF`d3pf%?rCaXfyF?A`_@b| zrgP`@kqWxtL*chto_*Dg1_qh8E0W4A- zIPq($+w~BEV25&}Tvqz00tiL@Dk_AK9qN(ewg@lC-(4ACym^xd5U77x7#=18Rp81% z-pVYbazFTHu(J?a5zpH3aas(lOGs+YhYCWc`k9tjP49Oliv8-ocTT5AE`dQobsZg- zet*$u7TT!9NK?rw>_lph&B;1@dwajADAE`Bl+SC&N1-QYM*F-*Zv*9L;1AvA9HS=Bmz`!}+UxmVMX|1CdlJ z9ZvgtXU91~c+O*j!ewpv_Os))lEt31Fg^QAnZEq)5Cz!3?w-Kz@OvNe0btGQrN3#} z$A--Si}yn1(|H?r4ZWBc9RT2-beYJ&l5tNw*fx&h{nsj4Qqs~dwk|!-q)uEj%VX`z zQqAasHRk8%pM=mAl$OR^eY&UZyx4^X@lhJW9U2NC{nLS58bGK4cgNi^Zan#33$+vi zt)_>Itn3{eB&4M=FLUd?g8XXmBZRo6z5Pcdvm6l>)l0|($sXJD;cCsRtJZ7bs2~9W zfu&K+&QD_DKBq@h=Vu$|H%(fIIJJxM5y+vVLtXB?gpPVBBU1-SH5fkh3UV|!Ps2-c zj!oEFcs>I6xo8bkrrl?VIFiqv2_pp~lx){--6918Yk*9r3>bLLMeS87|3Dc#pt-YOd?;sljfu(q4Lc&+loA0E!Vp1XCo6dj1hr;0glP6T* z0_I(L2QRAisF0*(!b>^su|P9g;YNsD4f7Ip2qZ_#ZR?R@0=Spw!NFKfZ)otq#o`5= zFahJ&Lu5wFoS^6wI~Nuf5!nJ@RtMFT*x3PNeSLjC;P0waSnZL_Cq?Y(^0Ck5Vz?Lt z1*wqAy2O?SMGrp%7jS2Fb#-$pm<}B^Iy&n76A3Zd8hLNvIcFfzp!^zw360OsP7!~z z?9T~>ix^JT2SI4SQ1~QDaP{id%FTK_1hG{gEpt5EtiwWJ2I}egjb+I6$lJJpD`;tX zcyM%N1XTk-U2#v(@;&OO!ppl~Rk9-9zgM5$tA~j4XqdT(A2Lc%ULH3%co5XyM!;>@ zySOy24i(aswZoPs!v-Df^eQ3+x%?+l8pzQ|Z0XKYWks;h7xl08;79>T?hm5SW79Z< z#M9u1hOs&S(as2LCsV)1r)=Whi*l&M%3W4oXy|*BBXAW#KMvDUd=@iy=&(FN7JXrqx`GeTq=%@}-McKQ#HA5gksE^MX88O4;ePsJ}HRoqn zFJ8Qejf>kOQ}XSbBouKDkPK~x!HAUd^h6SpuKMjgXaP*t(AM@v$5fr{QJ4CGaN6h3 zNsm6>0aJqgLDGTG`KcTJ)jRlRSXEy53(L!t+2@CL4Q(zzVj*dO=HI$V6y1JAA%w2}cw#?P~tjcTO9vt81il=k)B+?BV;~oE4eV#YRv8}tnOhvM5_&;M_vZO?bl65Lq|Q}R zB0|;^pH7q-$*N~J)eeN@l#vt!Mfb~y2%^f}erMMb}l<9}D?h0_S7 z-+IO@By<%33FI_s@Od7CS`-+~$Ev!_KiIrifh;3M{Qy)W*G_u*GD}WQ&JSQ!iY)Hh zjuJtsgV+;LP{1dkh{R5JqVy=)qh+xfq@YS@`St7fkn8@s?$rD|3z9_=oKZ$U-(64Z z7rTC42F13;&A=e>abRRb8%1>Wu5DjBn3$rwyL*VAAIfI9)M4fV3Mp%Co|u`XZ|?4r zL++Q%Qp05k)GT+-LeT?c*O=b2T-Nyd1B@;dzy9Yp5X)SL7(^J^D zpcu=W*0*4FR#{agcOxScOcM`f1Rj7X_P~kjr)LM)#dTB&n7~kyBY}4n-@-T8Bf)_r zGhg5H(-I;{@7G61N4Nmt9pGUSye;%J7y=gofj#Z{910Xy_t4p>Q)ejnjb$(2Mgie1fK5}9v&Xo`1v&lhlWC5y~1bsF{3|WTIGv(9=-5{R1zDgG#-)d>r65v z5LdJAoB~cr0TePPD*8+0CKs0i>sZ%*#Jr%qtZa`*^?cRigWHzKZQuUz@gv9R*x3Dz z@v0(=IIA@Pkg!=bzWTr)_^U(_U5iJ)*t9zJ7OcvbQJ7%E}V#yuEYW3sw9D6pvZ^m8w&UGko?X zTCE-L#JD(ia1JTR_(l8Z41piSe5?1Ux^M1y>&F9J!$VDj&_)GzG+gM!u^oa*v zxQ$Ad+CxRfVA(WDh&OL&MUGl_sRbNOAZMPQp1RNuWGdgw@PTlEO-R@bk?C!GJU;BA z_0YGANKiU7?FD28e8>;T=-~jlQeoEyOC1<>baXZ$<;1vGA|zO^g+rG!=?Rp91cKF& zGp#!QfP1&Y#Q1o=Plm`~~1u)S-9Z?8= z<*W70vjR-ux;p3&OT++Z0}A-XssU3`4JJyatPEME_PGsOeDMgpEq96+$9SCjz= zn1YI8@%NV)*O64c3E&-=JJfX53eA4~bi$6FMzE7JZHd*uCFmLy?j;_uK@iV9ta+9UdKBLN}`9^Ru8fcE#AH zKTTKd7KL*mD#FICYx~MCP&O0Q zG!WCS+_j}P`F1Xy_2vo>I^DBPv^#d=uYi5B>?Hu;ol*Jel$mGeR+@6Ebeb*q%Bi}# zhV#9N0n|;ae$E78=sG+A)Q2E5BZv*rq77mqAFqHT*p8J40dVch)7Q7thJQ&`$+CxT z2!ih^xb>tUcC~RjHNGTAXw8m9k+74KXEx<|c^ixah44{q9GvU7KEZ1oOARiM%B)x~ ztEiX(HgMSXILM;>x^DF|O7Ns6$bHJl#L zRup#-awwudGEn7Tyzx3YFGgA`FUZW=)B>#)>PY&)+6Q#rf#IS@nS;VeL_s^6(Zdkm z+93Y*;S^}NDIkl9B%hXuEDw$X-(e+%}h5X+ zuf%pt5_savH=YCnsZXvgZE2a^F$2kOS@=B@j*pMe@cMS#v7Ez%5YT{r0H~HfYeC^^ zs`Je($JG#!|L?MJWHBMAdx{Z(TSi;yn|i0m?0C0CY$zgI1y$y&JLd8-dBM@qHf>M z`0(4CiqHMoDNzd1kJEWJ=)y^VMDM%5@}BqzRtMzhCO{N~%tGn{1j|De{EjLqc3O0C zY6=b2$h8l>jYq&0-($7wZtcVs(}*e&Hl>oA66fXQ#D~~%g~#Xu^p)_qxw+Rw%({{! znBO$q`5xSr`mMOQ|Cnb>`Ge*BpJ}pvQE!!MF_Got>P_?W)Q$%i#7FRwIp4f_lQvF> zS2F=!L+4_EMylp1!;y(Kz-MAJIIkz%pBP~7mOrvw6uu!99)@;A`WYNKAL3G()51=1 zf{52;-dK6BnQY*Qkp7XS$JIO5;?ObP>Qxk58qI&X^71h;-;GZ}`=?NOp$_Z_RP5|F zNWwJ=X_V+T0GJ|q%QZ^F0zuQBx@ciY6X~w;mn;N-gIzQN>jh;J-MhoI)~ zmO$Dk#GL9ozsVRm-$SFU#Ax2mcthv^?UnQH6_+|}Wp z3UZF%_yJxWz>=xjrF%GKSSWKJJt4Dk;Jtls;vTt!FeDy+k3XNsy~|3EP|JP=GEpoa z_$)~FfNe+m-aI@!NE2+&@YAb)kLWS1|XAO zEtlla3*35@SCAGHr7hqVU6Nl?&*yvGB^AZE3imF&ZJR@wR-eN;iUe-1>({SqR(akA zllyWii4aXEK+qjPZ)5jo_laXRPBHFzq$jX4Mll~Vv7sX9%F+i?LOoX}6wrACH|V*LFb1V3ed}FPPre35c5^t(9gdL^k#pqotEbPl2=#Rf$jtvfn(baDv-d=AinFhVytSTW6n=u-=^48WSFe@HqPbaIi38Y$|Hv}#q#6?v908}D?ekZi#t94vBL(TBZ z8GPsYF{?NM{{X{M2YZQ_5Ufu&J^_>o5 zTU#!qMg)9k<$Hl!F_t_HaPs0<#a)C`pPe;<5@H8d3ciPP59?@pJqW24vd*C_w}(Kn zhCczEDG&ko<>g;>SRF%(o@tN0wK!CmA>mR4eVikbX8@F1Iy;-)CcHBqsb;BxcEDeG zUzPe^jo2<+STe-O_Z|C*&OI?Z=sX*kng#(KEW2|97$RyJfdv^xV0KeB>PRH&Z zv>h9IdQ=vCxC0T-KzJD_n$VQalk{{-&J;xtFGylLLuBmpB%#F9?)udQJR{Ts88M=+ ztC|R<0oHL3{_p_drm3kpIbv6H-_|y_WB5HOi#*b$=>pA2w?0u54jIZAqB_C`(;d3M94<5ucO6wRg!1G@(Q#5iv!3OXEZF#q z;-`&Dw`lEqKFdgtbOz#(+AsHAN7fob1=1)=PY`FlbLTR6K|Rz5XjjfWDP>ur5ZcKd zz%tSTz^4%;gTxAJ&I#8G0?G>mm2Xgoj)YjiCv_pjj7k-*Ht;y~(2a*JY|!^TeY`$% z2_OFlklZ}pN6t{w)&2hcIPYQ>uq~cbU{k@+cQ`pceGBw3JV@%3B1^%;xVv_j%5>fU zfsgDlBK$#mzyT6ndy5jrQvQ^YR3c5FaznNadWDHn2anCQRa+THS1hCqP6VHBu3g8P*UiR2$s06 zJ*lcK6Gl2(z=0!5A>_+8H*4YF^X@jeLDxUz)hj6={}FPDjErm&0kD`MX*|EZ?E&b! zua_-!ReO6O+zhe~5QDpmtTYn1$G&QpNW;fAp{d4r`!+cs8fXk$gpP0^$W9hlN4j`9 zdSGu5oyj$z3K5DDkqjVl8$Q|uOFX%;5&r!wc0|R=948fV1Y~QViNL*cA zk!_}18EmMp@5+A6akAli-T>JIU;xGmxaP2NPKde}0l2mcw4s3`Z2}rEXnUbERlCGi zDNAjz(40`vbu|P?N7V9mq4%+C`^US4w=Mhp^78BooRrn_^k|@MB#MwtfI1OqSP!Xx z6T)8-08>HIdXZfC`}A~U7=m9AmqN5WNa}_<`L=~+QL#Gih?x>fXiC=+RG>? zNQ;Z3F!8S>t<9eU@n8Tr3?cNDYvf3xvmIH&SnoZ3-^;eN0o@i!YzC-j*@3hRN=Qfm z;hxi@{Gqt#)+oPfhOgXu3xig|QsE14k zLN>K$(5m-5CLkoFfReQyLM=S%OYq=O08CG}-+g~)=pd)c%*q;;DB^_+paf04f?CC* zjRD+O1Hy({X=y1oq(WjUDt0K3IDjkLecyGsUa^6Q0)s#gH00K8+8;=CjhXqrc5ZZZ zG{Sz33wVyMj12P0^^)yt^hXLxzVnSO0&83POi_ZbOVx(q~ATM#YqfO}F-lO^viG*@&u z{9{_>ougf%0Qv=NxbD>IDkroq*nlBOcN1=`5`&fG;E=04J6fr4ZIwyt;`L;{QK+sX z5Vvn?2sIW-Ud>r~en|-hpmqqQlF$%UL=izXf1{Y|ey4=YLeY0GOOI+Xax$_IFsBe` zwP7QM4eiJ7DlfOUA3kLMa@t$f+N#In4q<0GLSy^DNtI{5} zZC7*40Aq_N0U&23AUg(9g?cc1FpZsp7y~adgzk7cURegQ{xZl8$Uuol0>!+b2j4=@ z;=(JHtiEb9khrBl*)G@?gPVMYPX|rM&FyVcU@`tWjAh?L#MA7=q{wAR>gA8ldu}yC z2OFuXfr*pXjW&mz9|Mw-W2raH>E;k15EVxNkU1aA*`>&lgaRs`dRRer?}-Z6HA#r^ z1rT)-*6S1lKs$!y_rVIcypBjmDJezVHxh`JD1}v_dv`usmKl+n!Zs$sma$}$6L-%{ zT|s0;jAbq>>RECn&~2l%d1R7eJ_e}?F$#Kq{wrl=Wd}#09~XMbpWXFOqf8n z(!1iStqCBP_{tT{(A$6S5a)jgoMfi|sh&dq?`2Q$8F>9aQb_&(`>h5b`4zsd#!e3E zOEd^c4|184d|K1$+i%X?!j&i==(1l}QI)fobs}lEAlGZJlk6r#lrboyK7sspS=B1YCqvui9yZ(4u7%GF2Fy zniX_t`LW#wa&OyQgyuaq3fU{z8uEqVA=QVYWMN?sE{S8K$wVr#W)eL2tU~X^JWJ0V zCuJ1kA#UvzMGHi|ldVnZdQhW+gL;@KNYKB#T$g?ykMwsW+ykl@xd%$YY8SlzN3kJc zp~3WlC=@n>JVfDp7%+_uN<}rq4#(OqQR)mYb*`PuP;9BjkmMCw-chsYFrP^vIR&hPg z@Cz+-`GiaOv69LHN|^USE&nS>mN!6>@vNpG~J!T-H6 z3@s=d-oDBw*|MoTSbcEyA#a&Ney#^K?=v#ts|O$R-z(C7ynMr;H|1isx#^VAGvVd+ zL(=|uvN!OxIsd-4H%E@J)cug&!HJWLTEUM9J(8)8JM%WW*9#tgUwmFGs|4RH>qC#S zmbQ);(XR=D9}^KP2N@wF5N54~{$lVoxA?C`mAUSbk=$*}jydx>U{p>>x`VyEi|fk- zYlmBh$UocT(xPu;IAuFGm)J_2L2N6BR z{9iy%35Knumx+|{0O48m>qi$M6NDr(|8>(4+Hc>#NyEcozLwaxqHHI}gk{Y9Hv>Zc zzic1eVf()^5w(ZEFkchFexMeMK3mZSljdRjFPM?A-(^CwT9 z#FgHe{{jnPzx79AdR|k9KOV(!fl21MzLM32J$z$(SatH}e+AkFQq?5c)XXNh0!(c^ z&G=#~Z_=q7fA(?|oG<%4TRdP8{JIgiP?^{eA$^Z%lFDFtIhGl|chh<@{G?I`Y`@`W z(+lH{(2z{Yzmj%Vh4nPAmQg1ffs73ly59x1lP`7E39LEZ)Q@yU3Jcd)1>5=gF?eM# z@siILHE_sXb;p8eG+Yf-KFTa=*W_7nSTu2d&7u52O01kViR5U{6cd_Ke{CMt2q%_D zH1QaID94V^Q|j$F@N0{nqu7tB*B|iAOyXl>vjwgHFqcgZ^sC*$=yl-S{Sa#T5d4%1 zS@Oluw_Z!MJ;vPGbeVzcO~K*4#@zJ#Jyo)~v4>AQJef^Ymgx3*WnM&QsH>}hFNI_> z{pVw43xf1fvYkv(5oPi)twtFp8p z$<^B3`h}G2@=MulN+jL||BZLIA1hqKwVimOG(V$9j+ZI5^lOow!9MTh#ew_2Y@=e8 z?Y8T1Gw!LIvf1z<;^@G?pGsT(A*JJWY!h!q9I|aQGUcpvHU6Q8k{M(9g~^RRYO+}v zEJ1y@xAK>lst9MjEw z#L|h-TMyAGoE|?^#eGO((8#kWh^~VH7VMXUr4=$k6;9*rTAPc@8U$gPm-TNX&-r{q zKi5`3W7$1A&tXMqn{@Cqpgvv}AlHyd z33lWjyEmV>(jwsfpz1=>|LQ1U_UI0g{BUbmAwQ7>IHByz-H3NP^DYMZeeSU_)R4eI z^Y<>gOMl18<-SIAqJtKe?IifbY`lyr)&h#&FaDNADY+9m{5tNt58RiD5gw8N31Slx zI)2zUZdDdg6l>kJZ~Q{4^5t#-K5u4{WaRe5!H6+MBu%9KQ8vE7MFE|GkL*(5%@4z? zGJTTRdxB=BW_AYg@|{Zz<=|q`cu0igy~-PezLy|`fQBlb#h@Y6)3DX&z6GBjlWSfl zZ;4tEzuq}u3y%KnPvMm$)gUA^M93V~c^LBI#Cql3Ld@FHkireF#L+=aoK~EK;iq%a z+Lk$3B{3XJJyx(zAW2WMy(9A0yQrQ5vkpiK;P zmNi@>R;h7{I_^m5GwwkXX9;+EHSl`C++csoY&dtc5<{i3JM-^%iMM%!sM*d+7`%HL z5Db9HJA(4AdwvmkE`P~nabPeVfaM|Cb_FS4B-7IAF3<)aA7J|f0Rr401~Qrhd~nAn zF@4HlNhzuDT0lj8C(9gtVsRm$zDhdlmg3X3i0x zkjEdUSw{gxn*3{f$o$jPm%M68AY?Dm3L}aV5Y^2k`8r4{*6-C5tidiOZ)Tlo%tzTJXF*A?gE;>2iG1~6XB4|AOBpz&$| zJ`9;9Kq0Iz(&&W}Z@OxKyti#*bv0+(9moYJ=DRT$C9#1YGzRJuVM)oELcYc}va5qcy)enpcUR#teZhcL$n9RY<{sI;Gs zl^4~Sk`TW=RMuphRA~&V6L>6GafdeZ%=c09rrVjFd^xyjg7WEkTz&5AV3`$|b5dj} zw34XtKE4A|3`8o}Gd4PE4-6LXC*Q>`hInBQ68N_6bf8|W+Ss0?n`tXUS&3+)L_3fX zv)P3OMlrFP!G#Eq-b&kNr!DW?X(F?>sQ74OGYF`Ue+S+3vB`9dj?ii=GVbv3k!yw; z=GSPhgi{t)Y~s+x#c(Sa>jN$Z2YA_C%{fG&4I>c0pUL_nZdSjF?o0t2J?99#@Z#r;&!20@#;W=j#$hlFkq>}2 z#kuEpr<{z8e{i#}ZikABN>`=Fwo#ks`RP6}IeD6Q{M)x^(2_*RK4_fG2{7@JLSuKZ z*ybvtD+R7WRU7y?ZW(BY zA3%!)2#e2A>o95pI+P#H%~lwHKn_JM>`r7?wX6Pj*s}o$IqtEZo*vHeA3KW&RR5l_ z$tt29E$<%hZKpEKU&)tDXxq8OvHXBe#i!)G0G$(L9wa3-)d5Bn)^b2OGqt(1^CL&A z&^fWhq?H6j$jJ!Ty%fo0I9loe9YMk6{;znz=OOxT5I?@ydw>G7L|&@AZKx_>>PlT! zF0&{mzJk^bpWQgMgM-7c%y_O&8FYl)(2sx}Fa#aKASen8OskLAD-c04GWZ6nV}zlH zw=1#nZ*wD=7(#WT7Um*~h>}vvC}t0)FqYDO=jqc)VxP7Xg18u1$fehc2*Uv{UM#bc zOAwHeS9f6ulEabL|3~x1(V6s$S^w_p?F(e2M{0+&)PaG<*QVyV9IIWjG=;(85ARJ2 z9hzVFEbAINTl;=PO1<{4FwT6QFzQWEF>rz*!(wrK0|zC;x;{BEQ__95d-U|=d4|l& zLn*b`{{dR1q~-&3M?*u{yI$ih4OY$5q5PC^5^Um{D>Obe{K8v`*1df#Mb?dPj;(2p zI#^*v2n~e*R0pBvDa~abq{wX%Mxau*INV=~tpIlrQYpHf70ov>&Z-lA)N~;rkD=)E6?1 z#OF}_n)8_oHkK9e%T5dKpLZ=CEyitjI5}jF57YK-2O^nVe6FQrJW`OjzWjcNM#BxFUPccrmI)U>O!Y>8G;7WOTw{9=+r@rqq&-OP1-YRibFkel3Z{fo zRm;0>GAwl^v3Q(0klea^Svkbygyu(Y|DrmR+1o6e!QQ!eWV`<(&2ry<_%3NyN14*! z?3|RZ^r5sLhDDJ&>u1_#q+#6ph@^dMek31WR*-M_1lR3wB^zriRMNj`;O5(Zv;y9kE63$<=56uvh%F{BWpz#1`vcBJ~n?B=92YjNY` zqD)^o=)V!7|A&AnmO3IU;|zvKjHVf&bn}atO6`K`nsgvCB-2Q9Xiz+oDd4%YOr(LS zu_{gxL;%HjrZ*w@DO$Wai&EzIWq5I_e|;(3JxR7QS7h*My^BI6_vmu#ZMz>&+u5J4 zF3ILy75VsQ`;YTkT62tQM8F84t<&i`$$#jE-SBs55hjiYMfheb0>mP{gU{hEXCe*f zVoI-Hj+D=(h^Js_oqae;-Ilwqmi+Qk;lbXoI%>165C2s;ZZ9iaNesF&{@=NnpyV2S^x2=KV8!Wj~QRZVqE+HXdwYgs^l1SK5ezKYQE6ZH? zhu+ve!uzQ*E9SPAcb*2q0;0t8n02@LO%B-3G^M0Eg@J`qE+f<9y85K8dVctk!~7QS2s-%a?vrIZuXP z13u?n&B_0v@{(Q1$8n0x%Q`yc`LvI*4`b&?rlU9^MEiTf?yRY*g z_B70tgm+&rFCJGU2zIGd&B4%K@8x^ZjsvC~9;Qs~pLkJiOb=0_I99Ld>CB`0!b4hg z^8c=3XmBBE*fVAW3ybODu3m*%j<-%nmz4T1=(79AtGdzuiH;bo`oP|jbZ7kOq-}Vk zS5*G->Lb+h#G7(H=Z4MVYJ4D`5GF2DWmc~|xLstx;e6C4iP_$1o?)=x&EH!{(Ul_| zH}$9TdzyGFY!@E^4Cicd3DFMhKMvCBu9v1uPW>C`0&6>7h!FmV9DkgjgK@Xd*w2(C zX=&&>Qb)H4xL_$5`s7>981@ME^va}1e905I_uP5O&?iOK;B+&X-s+5AO6qffG2#SD zQ#j>2m(hI=_(k^6p~U|(8`oVZ{_WkpDg9qS%D$BTJy-?1(eeYSJ0H_4blp!ko9IwI zZ%$58L*KIz;mClxP)h9@LUNEx2}#7D)x9-)&UMYWaL*x7b+EyuBkW9bXR|={1Kolo zh#h$`kmgdj{47WEPi4vUh(arC9)1amSUjj(pi-BZA73f;-76MUMkgakFr0W#N=^c) z3lA=@W*bM@TGVz|#;?AH%AAS&IjQqofdcrJqHsp)Z%+2lM(8#VlE`!Tz3u>*)G~xv zHkWdXv5aMN_SI}-k9=3O6U)~f+jU+XTwn7&$t2n2vJUqu0G_dQNb@bgLHf_1nG3q` zG*TvPNUFTq@lekvJ}C@{vJm`6u5~FBqP17Ermr)&_!tE$y0RVMF%dX>0eA&oY;z`G z;s3n(XZ?S@`Nia`S?1|~7gZxcE=u(4)$EH`5*mEjO{jOsl{4dEW#88EGYTLeSh*3o zZ_Jrjv&el&kq1ZpAcnA?_*QtKuR&{!S$wLKvi0v#1GHH1wd{A{a@X{69={>s4T?u- zO?c6g!oPT$M5e{dWg*ujro?H6&HMX`{eO2{Pc78z7x6CFLIV^8yohiF6wtE2J|!l; zf!ZCR3XrKpjXc_7>!FmfGY}r!i)2cufl^3q$MA2D4DOLXX4X+woTxLzYnCS=6;!Z8*P<>VSbj8r5M z6CNI}CyIxM2aR-%r(>vb?-dKD%DCFV8hirQSFyP7YJRpDvj?O`jtw5%~^n6cU7E|G7N8mlgJ+)8j*N}F0#X*w_${_p^s-E z-Y^1jIvgdGH0}iYm|&PiV`65u&~8Vz6;uP0pwetyXB82l0YxmL3I`iew@#dX(p6Pi zX@9(D0}XTG4nq~@q2e7fu21p-^CU5|=(h#OnqSit6UoXXu*huMsu1YDIQsZ4dk4Q2 zgobdNU1@R@9YNwOb^l#{S76?PS(NGN=b&5R>3466eT-KQ@H7qF1=!? zTyt;In_tWcHj>vR$TE8`_{lY|i@%JhtC!8yxTJi4sw(h*w0#{3;_lJ32!Hs^YxUpM zL*<=EX(|k&qIwzktMU;EFnpIE94eE33X|QSDr39DkL8z>!>I$O$k5VKd!2m$*`{3I zRm~uox&1E=#71frC1HreF)7Gsa#9lgcgx?npihMozuHxOe{<6w^f<^MBFYb$3kFqI zGuX`*Ow7!H8GnEygg}&*dSg4mZM+UfSg=6}45R}FaC!bZT+J}5Dew47Z40RsN$_!d zm}L?^A%UqS{(7{m0-Jf$%e_?}GBb*#{gnYiP>cHoQNzC#Pbi}^r|Va7Z|`qP7@gxMs#rSP4){NG>8@qSffUN3@jy>hmaK$&8EJr z?s#y=NunoIh$uTU)%jnASy2)r9vy}8SpBkzt82t{vh>^zRf%1DTfE1%T(#@g5OGbQ z?-Uso1*34b%h^hlFn%++zMiXhf0_7$uHPjTjCCSpkLw8NI(B-%k>NlDf08!s3fHd% z+0u#;W(-A}8(Sl3^Th1_^9fb#S<;GcpgjRh{hRg!8b8^KEhhOJqc|P+g6n}wRn^!C z{Vb5N@T6% z$$8yYrRMkCOBG{ZL6`Moairpv1SiX_&EhsES@=>+CHCmy$x5J+(sM6GBg1wmKlsYh z@^VoXH#f}19W|?y=d9Vk^BcDgzLf5i91U>d_$l6^fEpXXeF!|K3;p#ANu0b6;|zbKZID0X*qHg^6xwhY5EAIzv*PHOd8g>};i2q$46a3ZwGZXjGf!3V&T8!Bb z<3cC-8FaSP1O%3O7Sb+8qLS3EIe7`g-=kFvSg&+mN8)!hk}-ir`S$G$_;fR~XhE^) zmkUS|PBevS&5$>5?zr4eWP{oO8%1AOsOr{uhpkXP;gf9R-_;?%@o!WkJy*Km5}oKP z(AcXV_`n$$5TT)e0}^l$Q$L4^o0tk}nMk6mSFKKP-(&p!$`&%*LAUh_?{3Ei>0COF z=+L1UL;Zb@9ljoU9gX)8?KFmGUBSW|;M@y`KeIA$vJ^I)bW{rpN4@LEcUS;Ae4e8w zR(6fC2F*h_e*NEDN_U30A_sJU+S1<1sbR7%zyadm+FofXKSI$dC@3Jug@Hmi2&8J3 zCHm#gIKU6W*)=fXDFT@Y=4#Sx%Fbcj3iw2S7;HC$K`y~MuWqh&cpuDdBV+Ui#>N3K zF*6PF^WRw;p!9kPe*@WC5*${Ah)tPfV8q5VtJ%WNju1FcnC!bs%Bm1pSa=H&!;SeI zY#PE5QWrr12TCm|5EVJ>EGj3Iqt|leRiev8eh){Rgqc1>i9yoBk2=kVx23@1VU z8x}@RH-i|5{QBU)y~TtV5nU;fTNt>5GWsS+wUHTo03awoIQ;`riVl`JvBH@P{-EMA zhNRMCJQa>xZ};<*1FItB+?wYjtdBMbug zA21zk2tn4j{Xvj(#sU#~Zd;1+PI?wO-dws9#zOKi8|L*&r`{!{zuJi2t<}=>!_v<;1 z=W!g*-qSn+8M24O5=Vd1^|_4P$-cO6cTQ!JzWeFj$rHnjJu+riya0=01r8e2N<(oF^eff8Tc+)e=qT#fGS@S zPB)7wq>@#n2I6L2@1{5kWs>o~6RjSVLF$&c~1E zFAf{mJ*e;1)y-wx?3eF4dsTRTUK+h(7|-rWXsFZpzVELjqByXuA4s{g1#qiWK*dqS~u66s3*=ut0tYdAWJ0AyD;&vTy{ z*$Aa@+|ahm_|MA9tDk0M+>)$_05lsU|5$bB&b_-sv$EcnAT%GM=j7vCDOfzd^-N7f za?Tr6+>~EmZwi9+$U@Y!;G2UC<%OaOE^aE^CCSOjhD<1r#r!xG>iU?ieY+UTUc6We zdN??a?Od;+X#4Cw7Xo5)#I_oVc3yM1UdGVfPJkuvZq1;$89`Twq z=J@H}-T;@aa}aMLZlhCI2utCqn@J>~pmW^}b65 z=`eK}M#?9&Z6evBloq>9%@elaR7D|fF1<>@H1Y1@$U|ag#6koFZhO5BzIEK!=P~-N zE}l47bi^`Yq5yO{;(>y~Y8B&7iVxjpqIN{*;ZDkbyaF-Iz@zL93MDiLD@omp&O^y@ zlXdX~7tvJ0W9}=s-Opgk79Vb}?ElC%NEk6;8dZ*{?YCu=NYKiOq22lKubIgFhM2$v z@oL&fbb#lHBf-JV-2r6<#RJn)Ko}H4qgB_j*=84MoXL;GGbk9n z;1&>G0zZ{#$jm?FSZ~G{F5j{L2K--6pn`(YgQpVL6(z26$QQwiy@nfVJU#xqd3m$a zGf0S_VJlUUnz>3W%Bu0EqpnNh8aGeB4um)07XCsz2k-wGsEGE;x(EKgq&DNUA5_#7{{?aITmS|I@`FpX<)bq6NKbDyK25$ zp;Wwf{rWTHIGEjC5g+ISxq&>=D7F}iVQNF<0>r%q4+HW1gZB)ut|0Hf=0c%F6kv=I zFaqyEh`N83%*~KTs5l`>)9kN{dsi3k7J|QCY&5!;&&*Z+Kjfvlc!C`BdGt!l%k3|B zIcQ$%-Zf8Rz=WRrU2R~D4v01wZ7dH`J+LrB_7-CH1m1K^+wU41l|JAT{6y^Rdz*6b z(D5Z;1}zOzrJ?|eGthQA`SRz%UmbaykLpQnb%9OU!r$HUPZOEgw<58sGSVj|FZYNB zySzRm%n>a&ZwQI=wfyPeu;R-{btM(AIG8{z(Nz$R=GIj%UYE1m?ruOT|Ne}-{5okA zoIY67Vhk+28k~6ZS$IXnzzL1~H;h6|Y$QZ(+VVAjg1ZPWn|?9N0klNhwA4T(fbb58_n} z3Ie!n>>zyaCk6!!P$g0*PxP`@fUFMVknPArR{>ZM^%h>i&@24r*S~MZhyTF4ispXv z<%rq6O|I(}d8n@smwKeJd>Li)i-;$8f`|aRpSZ!RG?Y;MBu_J9i=j zHdCNMIr91;*awJ}&v4#xm+FI|5xI(7Xju$}14<`L?eVKumk^tvw5Lz&KGq;K+aNB; z?f)#dz%9g68Y$h1lSg_#Vy z!3~B*F?OAmm6ccz{-3v0(*!KT&+wAvV|0QUVFiluYd-N5`kNn8x$)p4XLh)9Eg6L+toz`?o2Slw z+x0jkHqYU2k&Z_X`aN0*E_R$hUa)V0REgIy`|^HKK(mtP5(P|O1gh8%lsys&AUwio zX8*kE*W0C;(({59I~kx^k?#PIc{zcG{Mj zN5Rc+3o}QjWA)nELK!Iiqeo`)++z{~>Q^>JGx8V&`ZwnU} zHFz3mzYl>7gCj*t=?(o1A>R$+Q3OZ;B597|>A@o;kI72W#QyzofOyDwFW{MXPSUO9 zICaH0G3v`#S@-F)PyK94-Hh|=g#^BB-##YvUUJ8eKX8=E2A@ZoYbrHnj2Mg|aS^7^ z_~$EhsyS8$#BCQcYYGL5LSoMXmxf&MX-KI7b?hk!ch15>C;I^;VeGqiuR~|yt;BT@ zOp4!)+D2wM`(eok_zY$gLW>qJhT*Cw-o5WhkRj7wxlCf4-lS+pKw8B(&nSnJ;u>uCKy*kP{=og7Jcr7xdd#(7sKumtFoY4mM-N z#0@Z81rHe0#>YJEt;aXXkzFH{O zwhqN&!joSle8q0h$w0~sWP(JbNl|JYDbv`{JnYzg8(@1RDLNRh1 zAv%l85HWs61igUXg?OY8`{7)tpAqSf=tSmEe#0(3j}-Q+(hz3?f!lIbns`D%y9en7 z>2oVSvCbD?&}ck6OXYIv6f`v3NUw_^unq46Vfi!EaOY>Ioe^Re<5Ar8eqiAn!yg>i zA`Cq}Bz9Z2Z(mL-D}2A-(AbpK)To4+q7wpzCU}=4cvXKsO!aOH#FQ5-)`e6mDmCkd zpi`5MRcA7GTKF4>R9It{bZ>SWzPsxNcxU9t&{o@xpXBIMvLKKi-X(&!7k@_3AjtTj1ADja2wVo-QC@Cyn9f6LG~q! zlYPR}blx5w8aVD6G&gV)Qu*-VLuq3pJvtQW0|%ZNZ?<-3t!`Is)7j3mU3D7L_qAC;t6;9I{*Ka-KTo;G#B!+l#~&-3aT{KB&{8m z5y-Q|!wy+--Z}_QK&9vy*b=P#%FYHnFPiL4%DwP=E8G~CP_Fs;RlH=(xE;hb#2s>XD|r6|5Z~>4Gk$zNelWga;ud3M0nP;+P2NPHJy!p^#HNC$9`OK0 z56qsL%nl3QkL!N@^ZxG5S@_O8WKcYKxKWYWv9wbN7_&P)J=z}dco0O;?juv)l^-D=KIZY0m_0OdmmE0y4f$*S0=S>~d)7^ay$%@ckQN-YrloI(K zGIyp~44Y{?6y31IHHJe4ajU)qM4SKzK;$^zvKg)QT^FeWSOh>`#oX+e6v0*CGQm!y z-gw9rjrRVz(28rs^(2Z5g?MAdjFxu)HyJKy4jm?dqIZ(B00DoaHXyece#RjOPA@8j zp)ggbk~c)R@NeCS6Fv?H#>^IY5F;ecJBC8N27A62K)7~VHfOHbN5{EK(WBVBk@wV#!xsH-RTLob`GxrrZa zG>@5m%2;<12vd` zAx~X~D~qKbSz16(Kl1X~#J^;jz8#E48mJ-xv_Pv(hb<>D=*(QMOuf?Ql9*UfydD3# zJ5+4*eCWG-UBc1B%FOIK5Jlz~0gA_e%JJ^yXEf4*F%8(Z#LW~3f|g8xK{4HrsgtB_ z=Eq?aE+X=t@i6i6{;%3IK%B5z^oH10 z91u4+?*{-rgJ_;1ClC;E@jnRZfJVsRF)~DjrxYeai@$jgjs`FbcF1PQU+sd=i&B~h zr!D@$%~t(kN1QtlV2p7}$KmRS@sk5lN#7GazxA8##I_&>6-j6q@?8tq59BL7o|o*+ zzti$}A^f_FfOnF%Y}YO|(=ZMNYMO4i%y_FOfJ+{xoc||M|F9!lS5QDN1_1ROC8o`n zvZcPv!Yfb=0}id;7POo~INCch4IN?YLK(R?1-Gy(w|$i41$>S+3A~)P^UDq^=>6jF zo>+EduBtQ5dTvpm?Y_W^1_|rV4s0&;yOZ$iuQBakRBdG6vh~k3MS>KULMP6zm$?j< zJ=kM+D3qpHe;-I~F-SsUFCbGVIPHf;XAHDJX$O-v#P zp?lc3S*D&CbV@foNtn!djJ zB$A?<@OkvpA<>ZmY!#^6!vPEnncyJw*^*Q<6mPL26k7P-jl7jNou9R<&7V#3C}i2<^f<8u#` zyvNq<4r-nDDZjJ@H;${)l@JV$UwIr~{$E359xGZ>&v+%H?#i@kW5z&v87=J#AO*y0 zCBz+pigD)&3b7JH6gi2#GuQ)APQ)MZz@(Id2~eADsXyG`GQZ@*v4mKrVJ1ve^YGDx zA;vml{|>~Qh5~$-WEV8tq?IR_JBoU;&jUA!@5jaNwX%Sd9uGC4;{DR!zhe_C>kCYq z)Ki{f0|p;F<6polj|^H~!2iW7ARqwn1B1-6T^D|xAO_54Lq>VFhv7(j6P?!if!2M* z92ocm?_-sRRyLvDqy?dDL9MKv(!oA-Hz0I7lplwc&pb{@lBr1p)`IGZ5ABGO0liDb zCdK@1vMJ*H%ooSQuHCvtPb)cn)c3fv&L-u2#MfFk9i0F zc_MQrSt3JANcYnrA^lnZ zC#QzS`dhS#{Rek4qA%H#)$l4TFpz_C4YQ>lI2^~VM43absbwZd)>Nq_TFrZrO%!{n-@HxtfX`Z4IuaBeHU&R!l#D_%}ukA+Ue~lY2TxY$MxPtD>aRsDZNbS)w zVaJx5yWm2HYdZxK3i98ydr**>Wz7xG`Y6Rk*u@n3=Bwpo?XBF z!Ut8ut$)!^`J&PSLn1=af9pW1q`ExlfU_py((=c={V~hKcMj%~A+Cp0zE-1GI|$!z zWj>cLwm5qbkFgicoXyr`K$Ww3zoSyuPxv=dJaFP+Hq<=dutnPfHsjeN+N8Q1dV#Gj zE|7~J;9_D}RL#NkgTLQ5f37Ua(d?|&hZU<=0|7)qDKR3zK^>IHePIQNOUI6S%IP#Y5+O-5g2y#c@)%>T)juwC==uL~DMVfuvw#ZM{4 z{LU7%F_2~vKs`D-8he;l z(a_KkoAG6|!b1T6B%zPl;?15z#837NZbLCiDT)fUl@1xD0X_ zr^EU<)46#3>q=srgsE}GO2I<19AX;Y?alot2=IM8VI^y0vlC3et6&YGa!QtYdr-l5 z@R;+VLraK_Hg?fybI*X80K1m+>3dQ!3{!i-@+n0mJ3?{0u%9C5&cbACB6qo_l;EYm zQptSJ@_IU@rZC`)RZ;7B@#m`UJ-HjTK4}&J5xfGM=0F`oyf!L9#kM$^s8STH=m|X6 z4Ut+HQ7kt2Sgo%J^$fCM(-G_X$Fwj&wDL6o3$b?F*`5wxl``goi-z^;I(IK$=Yc=i z!Pg|(R32Pqg1LgS9nTsdu)tF$pe+9T(c|T&A!>T>I2B}I?rk`UOA)CNEa?S3=331s z2vN1AZEQEkxK57gI^-M(U1D$6?fu2ljmUo`uKZE)KJeUR(rNnaZC#2?#e-uLq|5%| zwUM~Qq8_ET&2yQ)ubx2UT5xNdn&x_r?X~O_|N9kvQrcmT@1^M zbe7)%x=v`m3r-t;hOhZ%e>Q^=>Q1}wO2+hZFGTaEm>p9mX#ABtFXTQ>ULy?_*7(@C zC7~-ZmwYyBRse!zH4v|ds0nip^R0u?vK^QUbpg<&3^FJyKc=^TDt4hJvAnTT3po#w@1v@hCVH7>L(r=pH z#~nx*{?xDM|LD)}X)ydzyYq6#)y)@}8PzTq3`gU&>wwTj1EZIL|?B6wP@Cfg%`&J%F_3phSxtcD7Z+!Vu%J9@CRu>SNh z2{WUGn;BhT@&5B*FZS!8m`Z?=AL(6zGCCl-@Kw_3|i8ir=Bl^)6@ole)zbT z!TbsqFwi?Ui#0*(0F0KyZ|_~&$%7Zk<*kig`n~2MophTE8!PKA+{Q2H5@mse5aUI{ znM_Ylr+9!eKz2C++acRjNEJ-nn8@xZVm36-fFauj;Gx*+}C<9?xxj z)ms>nkPS-2JrcY5$R@3=JUlP&6*UBcSq zP?mlaXQ81ia>fQauT%#LN)4;r(|7d3o%+2~a-Aylx#- zq~5UgV<>1xMM=IL@I&{^D{hBm$@)Fq6h`q2JK^k|Y0zFJil}g1yWK2mU|@EqYzRjw z)PRO=aZBfe#ftf0aaZ>U`jk6CYpJVYup{dT$_`FhZeCu`qk_CV_I>;EK6HVR5pv&X z$XeGKbaIDmeI1?Bj~{idOKK#>Hi7L1K`b#{KM7;TPVwqW9^1oBu3 zkT>0B#GJGs{!tlWcMtrL+Ifm|4%!B$slVL!{lR>EsD4ykb>PhO?YaD{kV6TCR2gq; zyX6=UZ=CI!Q+Ni*dK;KkdXX)#WJ?=(G5Tgx?oSwo0gXh1?m|dXNKCg;Dt!e6cc;pP zjO?!rtuTQw^41mlD|RGuZQXs1moEvhX&CH&tPg=g2UZqL8EX|(pl>cHa80i{@8l#X zT=KpB>EMR&t0|F@6m+yMQzL_eo}|cF#ppqRc(Rov@UYJ!jmXLs??{0F8VYHeNcRep z=T$f<#*M^A1V6rq&X{UDjyYPM2^A|@``?ZmMJz8tWQ?A@?^_jsYMk9HXYgV{puG(( zyJZ9>u9$!XiE-$Ivy8@@_n>>jPS48TuabxY0P-lH3zeZfs0EsZWw4Dp4u7Uc+wN}P z0IsZpgsZDra+&Qu7y%{T8R-9iGH_f5C*CC#Y-ho$+j4A1QUln7jW%Cn{3K4gzS^wWKEhql!y*jHLm_(ye_>Eu+V=U0Ok?&7|Xhd(zrnq%EyQ0zYf=#e}@PlpKNenn&R#(fQ zG9>HIzF>+e{9Ztb3wi|1yqw3h%bb{f%>Vq&ceUfPMd@o2<(F zFJ=g{c6JVriwAatji!SP2!8=Ei`h*7`g=83ad6?JN}+(4OAN(Z&=`FI4-N4{^6j5l z;kAh6evK~-V$~1Csz(7Fz&x%|QY%3N_6-Dtd6%RuXxtd|=afSxfBlEA_=ayibh;@# zD;fAb{QT$;B=vHvZo$kj=)ODVb%X$l|3-+-(2I1R+bDAa&nF}X(jdYS773^>ecu!S zaG^0KtK$G@l66Oj8qNJ!f=bx* zrZ)HM@pVLl({8MMqV_|Z&gg(4KXU;~N#%o5$msufmlfZpo&1xx09kE~()ezYkg*^)ix$9M_QQu+GUx6H(={E&h0tW0rs=tB?y?bF$+O6QUk4k6% zvU~a}GZ(BRMmqRtl=KCoOjU90s_`#(93 zVDPgIa3S0$QJKG58fLVV z&p^O%_{&l0kY!MW2987|4BYD`=~5{`w9{?A$biqg2;*6L$LZ6U*s#H28stn0(d`hK z6`-6bFgUT5z;gjMIHdBl{O0A;ZI|6I(Ogvw=hEn{JW@dA6LK&6(xIAb|6Z(Xv+w93 z(>x`m2D7N(Did7E1At@FYfvG)v#4E7OWXKe<&TvK8#WXGF4PO-1~`LpBw4!kefjd` zmqCL>I=SU|vEr9bz;iQXcZVx=TJD%jO`7s0X^EeqTqW4~22{vl5VX>F~7k` z|0~2pwMRBfT`JslgOHD+xr4GKwp6WJS;rYV|KOBd#l2|JRDmAoHDTHErLXTMsDdAL=7*lq+}BUjM_y7HIoBWSU>BlG8F7UyOU=uS zL4D;MTZw@=ThzL<=tyKn$$+ml-(e*|a-cASi}`J+8~Qn{NiVB3Y9#j)i|be8A*^_$ z5I~ku5XLef=EEp&vYbD;jTw+_>DYdB2w90Nc*k#J?W$$Oe@ad2X-Gz0ntO5L>F9ZB zX^+W^jA8<%%aFYUvFn)-<*`p>C-$ZGKexdlMSXK-o_sgVzmhGP&B%~BNIl%|8n4Db zR7r7r!wTjb4i-1+u`)0?)&flZkv)&4j`Y;ifabD2U0lH*lVc$Q96cAuP+SKCw1g3| zV=JMB?+Tt2U=mH^&<2tnIx+%tsVs-Exrp#W|33Dgo=Ml_Q8?N--+TD@agI0&q|0h- z|I<)VwVx3Hr_O%~v|!JYer8H{Y!pAL|IG9rPB7V&^=8gXS6`oS*8b2MB&zgQA4|21 zNTZCK7Kk17ry6mI*MAErN0WC!q!fj`S-6Sw$%erdOm@nh#(|VJ5VJu^_sPl0QFg(0 zN{p+{{&jv+v{*)r9J$ggX?~TsQ&^N&$eAcIXsvqvrpkC}H?mGSS#$`*8UD`9$d|Ct z0Chzt;|whsl7Jt|l9WPHveRZ|DFK5E45WIw1RECNudV=uffjq9s1cXs@uR zzx^Om5YcwFp8AWEwcU?3vMEVhMNnRlcgK$Y^G*b0B_B$|^#{cb0vX79gk%jC>WXU- zy7=o5-aPDh^XCQF0)Cv2IIh*?xKKsyHi9Wh?!!wP@sFVFn*T&rT!P^Q+0Ou}F@B>yE^0CNZE!nJp>W?{_=#^^nwT0`Jp1Yu?NA zUX|Z^WXrkBhqmmCymtA}#CA27yN`A2_}v)08R`}NgqBbRv2A212#vC&refK^#F`(M ze)$gjHW{6zOHA&*jFw)vmVM3ZQ;jn-Yme!fuwUkIQ23N;*IQaN((3w9eO}yZ1q1<>6wox=#G`v{5hxu-2b~_+F zvd0zFH4C6h2=~yV`h!3B!+Z!Uhr4g>{Y3Uq;1vWWrl$DtYYf>r9$ zaYCENtsl;KIP-esqM=~~agImrY6dp03(%Jl_-}V(Wb~wHW6Y);`gh^V+#xxO$p>fs zR{EVT%W?E)V3u``#kwh?%bK~#XLDsQ-yyCJG% zr~a-`(+KRZnt`Nj|BDwddW_~qjIw~R(Qn*%#9u4bFgZGU>kh0+Mo4KqrXmsBKK*^K zUfPKR$wdsEdC8D#;wRVcqQj4Cb#p`QoC@CcUq+Aj3@j%wwWGtJ z?}=6t{uvY+syiTp$Zploxt*Q;S>r6GL)CBJI$G4jo^AvHMwy#ozP$oMt`>-*g@uL3 zEAucY`i2F}jt?-d>;s>tC~JN3(k1rfz0YnLoiQ%r*V2!*uiifA}noCQUU*CkonUlmRSZxEutYR#M4Kzx7^;w<>;W8O$9pp8)uNt`4)E`>y|k^o=UtUVsDs$~ z;|64bL?$MsmY)iBMZ%5}o?}~~dCtrpi>xU7uO8@96H!!QlltAhLC+fl&eqMD34kXC z^aQNEC(y?iVV7sh>CKxT-v22+|30d2=>%5e&2v&g3^aqL?pYcV|tj; z;aT9e?#6IYY2M}E91Ge;a(@plN#M9Mmx+#Thxw2YdFxW4!kOvB>0=)mZXW)0>5{PP zMQiKBa&mkU5^Y&;d+d27Bn*J5by}@wVd;Bs=#RGict0EpPUp0=H6wSMfip`#kTIO= zuc4#UE%p{WxxU>ON*NYKuynZ3a&)h!q1gFSGy^~NoHDL2=IFt^2OVAb1LZg{Et8SQ zb@8}4SOna?8-@925{h!H4!c9xH=tZ5w$9KqFyw#YPxy8;_|ey|r*ihvI^C`tyER4^ zafr37KK{TxDu-n%d-Q4FYf2Rsx;02XHmds3A9Vb?ReEy+`(x`{wr_I;?f|w z)RVRsWf#^J9S2oI9pjonOoA2gjY44g7lW)vK7Ms|bsYu|4VZChUz?k#aZiDE;w->` z$+n-D;*ex4aDMf2ZBE`Qp$^(*Af0;ds|S$npaOJnSF!ph1;WT~pcF3H60`xB1qzOM zG*$kJ0@m)>K|#;PmI5Z{n|r$s(n$>8*}Z4a>5QOoj+2lW-rr-Y96Ev-Wv~&gd%)HF zpy1%|Jpz;lcJbTHSD93OehFnXe1@o{?}tw9oS*{tn1=YT?LnixY>^dy+Ozwy!{^ES&4R<&|5uMOmk_2B_A(X~p4Nx&5v~JjctNtt%u0If z_wVga>-M2BJT|?pBDq6i;hvbUIU|in`L+5Lhc+L)qSlGC_Ym7s16+5XX7uJ~xGDC0 zSabLthnM~BC-t+RmI@b*9FDkLcYQI|wdlpViEe^m$zGmL>iF**NpQV`hz=|# zbe5d&NcLqtn-cE4y~``WtAQvpI69ORu9eJb<7oV^2kp!s9md;S;1u45@k(n`gkqKM{H28(jt24TWId`;-dP#~! zBulmOwM`8&v~4`u^qgvphwDjfTmj8Xmmx0lDQ!$$UAveh@`R4*-~L`bQT|hBAFbJ| z+T*lF_Y+D6#5+E&UbHome2Dn2(Fetk8b*9h%iD^!xu0(SB4i?eKWjE^G@|(1-C3&G zW!>cUdb3Wq6yIqyPsqI0XkPo&Po+`K&tq`yJ1YEd4f;!8td4aHK6d8YwCw7*s5f#? zHGh>avGw3y^XGrpURm69^l9GBdLA>s<}A2PdU<(OH8p92D%uDZ44-9*RhyUtb{Hck z^?oWUkevQ?FsF;QTov0E(IhtFDqwd;AA+ir#Sj*>Z->hR2(jc8kyfS;5OUZ*}bwXF|J&F3tNPdlA93af&v4f3Flyyy!fHxN$@1;PAiMdz3| zF>$mURnLE8UVA6Sal++eZ@g)Q=7q@{0>v|-Z(|4Ed4)Lk9PeFiaB*r6eMGT(Ql$ab znt=!t4^G=*Sy^+?)8W8Uk7pSl{KD^3oLpSJSp9Mg`4Pl(acn-*cb}|($PwflAqdg| zWq5jV38OeK@XSyPievTLmG}UB)pc#rM!E%uJDo7 zjpY_ScV`=8%j&i5o!zvm%5E9yO{ZOSo~hDfQoyAmg_QoX;75?Doi;TMDL4w^Tx|c5 zxhHQWlk6TmWQ!3Hdcwj_o_`_vlI|wu@O~=2(cjx=+SC2lo=Q5*+{!7xe$r>}H5&m1 z?VNDFeZQ+yc+&LjM>^~Vms@B#v0mX=_;l1;aXL?x>Gd{`EQ*b@tczg!ZT_{Zug9v9 zDdt0zXHD_M@28_TL}}?aO~}fo9}H$LI6mp95nB*Lh7*8txLfEC+)Ns@N*Z1Fe@rtYcr50yKPWv}D`}UW? z!YOkHD~A^98Kxt)bm!iu9l@$fk24(vKHmo8O)3yuR5F0UbS^XHd{k_ZcSMp&3w8`O#IdRrrddZxo%yA z$8Ga`y-UC1cdXKb zGiegHm*(7k5?tN&i}y4V6OC06o&}a?twOmp!{4BMq$*9z!^%l81#=Nr{5I3L7 zpO&q&)Vj&<|DDsJgZn)*1vQ*P9V%&w*7X{Pd!{rdi#0dyeS*NK(NxrNQ ze(#KL5kuE}j>x`U-W5;vPZ85n(tk&snr+R*=a*ZMBa}t0x119R10b2w+In2UA2l<7 zaJ99&blaX)2fWUkz zxCKRvF0!1jabAot$yr5TchV42Os68F98dE6+Uzf6UjJIH54m>95cTU*={x{kKsV4r zX&hH@Wd)>uZG$hJx6j9FN-NblkrqoD>3b{z&q;MdIk9M=8K)?DEFc0N%NH^qWhd3b zuTZ$8$xT3Oxn!YQqTHj92!WsV3yJ>!PcKqWJ&YoOho4`GRC6sksc>j9IN5^Y=jEyN zTnT_%ZOx3{Q80!=5=(*-_J*Hd6GZriEphYLTRQ08?nr3jw6Ko%K5XZRDxT~Wz(l9| z&6{MKFo^bhVaSnEiHfB)!W|An!;r$9MHxgIQ|#^xf+`*ONVDX^J+P_{q#{cT?u(v} z2Gy9Ji75fK8(Lx&l;x>5EH__lEod8X0gW>+0?!|if;!B=gh4MD>8?G9IZYkVxh#z6 z#YDHw-&9`Hd=EnnjYp_Uwo6KW%6e;_#S0l3a36XrkbH!JpeaM_1RVDss+6xT1AkDU z7E^T54UlLkjx8rIByC{!t!H&>H%4u?qMNL?efCES_UM1m8WTvVzoWt}Skft`tW1U= z2EjvvgB%I~xp)NxiaUp{bOjb$7rOuAK%XZ|Ted3x;D!4i&EN6QcyxsdM7H0+CR%Q>0`2cK`pfx-Ew4S$ zLWagtL!36yBT&krwDH~I#fwj8d<+Z-=*X4?RO47sq&W3!a8MlzD&jDuU~3r?5D){M zT8^9S;ln`%Wbd>gZf{B_xb9xw-sYG{n}b#3puC(*?fCI0csp7)acF!DBL-G8^Zd(q zNEOiv@mp5H2c1Lx`ZL%w*BC`U@cquMjk=dHK>t-!RuEib3GGPR}g5 zZfNV1Fy()*v2p$w#_xJO#X65dQkb{?pE&n_V*kIVveHZ{H>~4Wv2tYqn6ZZj$)Gk#4BR{IaGDSbNF92-+XNYU{<+|el!(^RhaEV z6#)NWBOdp|+R`|Yd^++ZOJ@Nu5!6YOhEJRZa81kxvoP_)({BdTGkU8I8&M%_&VCcR zM{cauP9@J0bvsG}13y4C4ez&;m!PQbxFz0pnFZzb{O|$>0soBRe{m#-8C*(X=uZGf zCbx=!6m5=$DZsd+;E~eurC=M)A;kIvl0 z!?^@Z-86FBful>zj&}f{)7H`oKJ)rsln z?xn{-tr@c4Na5%{SXj_?q$u;t9^GVyeGj}XiqMJ-|K z$pjmq)Lyt(byC~I$zWo9JWv?ukiKn-q^lLEUrEQIRrT~*sMLz&Xk~! zG^;n+NU*sLt4aJST`Xqv*+b^wt2xIh!jIeAei7;Jv)=xVC_lS&v-?--CI>6F=l~;} z8rNVQSw?xMde3T!fj=g2LSP!ac>X*d>{~KF0hAN;-~qn^^PV%lV7v(7u1-u&CW6{$ z)dEx9+taV(jC`y!-d-gB01Edp^eEYc2+d)#k~QpG)4H zsgHYji8PG8K$b|RBC|e_6@pxu_G2`4FNNWvd%cZewOL>$H%fH#8`~G>bHI2{6G?c- zSe~!1uVRit`YEp#TS6zmbHd!qgk~-krFxaQT98bBqk0`&+wPyrcO@{P_=~1jkDuK= z@U#uBa9CqByFtG%;)VGJQb~X2{!HqKN`t7?gGHF&JrkVFI%ewX5$twRG0e~8m04h`ouue)g59itOGU|=?-EpR-%ASy zV3go_$_HO9mC`QTYPXsauSu$Z55;kpkITMOJj9+!%;TZ|uUGhycjNZk#O+*l&_6IF zr0Yw>XDtjZ`*3orU%d(dHOsVyt^P-9ev5XTn`X#J<#s5vE9PVf&Gde7=!;BGjf*?cd7nrWZUbB^6+mf zap7UFy1~QtaKkH&ZI5{^6AnnOl$g=gIc7Ova*<(Bx7&<%CgYcK{Ig!R^*a{}oR_j!!VG!Q%=%seS)(cTj;p zVI)unX%YYam;b9?F>(`@e`^8$!c+S}GzL7^|g$uqmM`2jERJHy8bO{tcI~^7yBry~JxGXn%XT4olITDliYvHGP0YM|o3oM-1PLf7G5qcz8J@)zcu`-g zk-V^iiq+|npX8*xpZMT4l-ApBzK8}u5n`6{Z}WvD2VXLP0Wr@UnH(dkeaIK@Re!2& zZa7REOlJ7FB2ZFWXEeoI_dZ>HIWf2GzBq(2ckxdCh6otGo? zRItJ@%J8>+Xaw_S635}c;|9q_@5hWd$Voxf?K3~-;y!W94=vRZe*$(u&VW;QOfTci zEo8XfZ>=__@i&Z}wWuJh16XND^C;n8;NKEGrCrqwYRDOF&UYu&?$>6-|q$xi#*4oPU~3+t$=o`7pcsZN%;=1~Xw z5gp4*4B&P(Uif;Q+dMk#th7}u7y3{u*wE%hxPx|b3&uoE3hruBZEsDtJ#_oEME0H9 z&kYN;5&Cnd!9pqL~AQLD8RRD#Kh!E&|2@Xtl?2&gULflH+y*bVqUL|xk*0NyR)cwxmrMsDC&0+WOh3#lAa77esrqthrq3t|bAYT`gNB26Kaek*(DswF&{Sk+ zT{0u$aE&+Vx>(>Nj1^eLFQ}p@BzGMbCk{3z_Xh-+J@8pTSsX^A)pwbO?M{7911iqY zXRN}Bi4SjG9DR0dS-153&Okhk7xG!omV&=07+2rZV?}9(h$^nYgZg`|+~!8m{(F0@ zA}@Dj&_$GS4#Yd5fI=v%R_YB72-pTpA<8g{J!(Eky!O6NhMzn4FB0oz1zrAM2?yav z0)S_K&&$Et^FBtYJ$jNDhpdLJ5U5PO;Ie69Y*lfaH3>R-5aHwq zDG>)5*oMRhiw5q67X71Y04aQ&gy?Fv(zW^{P%!&fi{rQY%!d#gYIEU+ON&nugSw*v zUN;g=CmZ(v>#y#M3AEefUj#sBuyXQK~*TNtBh@QspCTF|E?(ei0MiEo?< zYlBua62vwYC-e*rYmn(9Wl%>1yg0Re2)%@TmM(f%f#bQ6xJTU>z|yA_7(fq3DyWux zAm|MrX8HW@-epHrz}au=J#Y0Qqee0ba}GVJw|MQ(EQ7GX%iOa#^Lq!^`WR zlQVX-1+RAqth0nGYzl-{ggf>P;{`|>W(Gq_8W1=$!Iu)-CyDtl)wXU_7ARLy<`zic zqq8TNXiUwFwKpLcgQEj&My_p>LLCSO!Y9j?v^g{x03}I)r0)?TNj__z`)rrHJ$#O< zF-wKHhRcuiN)82FkOm-Q!>{+$){f0I-zTZ->D$>gFPgiCW0yX`}T?=__B~eJ?JM`^(aMnSE$uf`mG9|7ujE(gzNP&P<~+&9}B5n zd~PmAszCDy0fl+5)e}fZlXWv5wEY~rsoEdQU)!}5+TKS_IvIh-G`R0gi$iz6K`aiH(93%D@HC z%;@3d@Z@C1$5nW|aw?Aq7JW0{1GLo`l2@yiwp=T4%ZbAAmZ#^U97lAZduOM+q*%O9 zGJ=;?0bgw1A4eei^2No@Pj>A~&hbTImKTgA7H7fO{*-kQiuHs3#3a6a=PF}x1S()J z)jIkcyjEs^UySraTwCYEp)b-!_G>$M`0YZ^NqjnTM!=(q{0%EB?F#}>u?Sz9YH808 z2JP`u3~K03HYe0lDGsnv-pRoP-|X;{jNZg|F9x+mN*9`Ti%g5Ax5V_>pbu8%R-l2G z?OatZ>M|V9UiBfv)UR&>imBE+8sD>K5-@pW(MoFkH8!>vpg669HwUqM<(5g9qrsN|AfkxLeN-jNIn zCeuM}xBc1jwz^s!7MY0-g}543Uu*(DrogY;Aq5pA)T&R(LqPI5kV;;D{y@}tEh4D@ zI@!Q!WlJf6b-@vT5Gounmkntt2Ilkh<2kDq43Fjg?Qxfc%^T!VdOAw912S#eu`&pZ z-*@eEvrhD&BBi>|B$1qFG&k8~Wl20x5eNGqI-5iW)X>nduh;!f$0i<+(ua4;mQqVM z9Fimvbf!Gyd@GWag3p|j<%hT3E-Lz=|I-ik*s(iJ&rPfy3k+bER|jqvWbaalH?nvU z$G*yKe&W&YQ#tN@8J1?_$+z)~RcA5?JY*H6)RJWmU> zkB*HAjxhqSPALh&HL(}t`%$mb5@K~scPepkbkwo&+2+|_G=vU9=tTh9ITS$qOo?zC z)g|n^8@Fz0qUtqt8_|RYKdElUu)y&suH$K|qjw$^4CMD^Ptk(=dlmDicn4fCEzs4V zCwP>WmS)?6z`;qogVs9RX*Bzy@q5MEUxS~@5$7!Mod|M<4$@_% z=~9aH%rlPjxI-9Q0DEZZS8LO)L2{*MVM!uhVj!NxsXTlS-r+C_WvKOnI0&2WXW`Io z!qvf})eDll)wVX|`^oMYKYI+T75(Pm7Sjj!A~MKkyYQI`AE;ua6$X>o9l=TRmh0#b zH5>q#xIBW2FwA=nPX{>+DU}cjHDaXzxbKP}w$>uvM*_nzLx#IAS^xt9N^#EpJ3tb3pZrKA+q@(iP$ClQ;^pt0GeHD}dYQVninZE;`+0qR#Ecb-fkz;RLF zloHMc(`shsr@OX>Mj#4_ZzA;*5WhP>my!hnZ{3lHix({#e!0!B8^HsQSYV4??6xAD z_JiK5)Nnz-_7D;V3Z-SyImh`4KrgO%NS~V6q8WJMkRE2T-`YJ1eJ3Y2FX7vzxIRaC zO~6w?_~a-}0s&Z|S;{gjxlgFts62XL+*WVwC-E45`Z&nam>M>IYT8pH9s}`c57fmC zsh39h2=K`a(2AhUFyODS6f*E994NdMz_ad@f`K}K8H6%U8WMc495(mq^A0_AhY|3# z4;-M{)=TlAt~|x){iR14)ojgpZ&E4&F^Mh_(wAX-5bV#sBR#p$mbs~txg?za1U!8C zHeWmmyoU7NgF5px*F6-Z4mTCP;;&2JxOr0;k`_0Nbyh4}rizCjtn#CxH2>V@-{Y9) gk^g@`3jQ@m#j#D%)c=L<{2Wx_u=1f4nG>G>3rb{m5C8xG literal 0 HcmV?d00001 diff --git a/baselines/fedavgm/_static/concentration_cifar10.png b/baselines/fedavgm/_static/concentration_cifar10.png new file mode 100644 index 0000000000000000000000000000000000000000..0755ef8d66be1aa630bcbddaa5d0ba12919b499b GIT binary patch literal 18925 zcmd^nc~n#9x;NI-a=a?sbDRgDtteUr98rdlwpKBs-~=d;fFd9YM8*&T#G#c_R8UY* zfuMqdK|ur=0wlB)5D~&4M1~|X1Y}AifrKO^`JOkP_MZEld(XOGSNpBIR{wChH`&?Q z`+1-F_x#@P4>;J(|F`A;rlX@XAGhb5LpnMi_vq-%n*8%8@E4QsL>u5g%g|j%Lk|;u zLP^Jiy><2<4?XKo4E6Ur@oBhsaEKoyHpI%@Jm8lL%!t9h=IihR zD%@nw**(WXbaeEOqkpq&%(nd;gn{BqxoHucfJ0S-0&=oRdUudS$tV%)@Z@*>rSH)tAzBwMXX)s(v)k9_{t))zcoVetK5>*}14tN5ujZO@j*6uE)amMq@c?4O_a?Mk#l^*2mtV!$ zYUX-Oe;sc*cx$YLA9EW+3Aa^tvV~IenU2TxqaGT?%FyPohR4QU`Lr|8yO{Y_*9MRav|H6l2VruC9Xfw_HchqaX|MaLB%R>$zWs6U}I`* z=-5pc!ZG7@!m=S+V)ljT*uWIxW0LazWwS53y1L>PINB8KP`t~$jEP<ddHjyPzx9z= zO72-rvS-G9rJya^bK_)5l){bn@!Z7=byuGKvfym*k0-JUaX<(7)ztBail8Rj;AKvyM3VjeulUQ-triT!ROVj>#0Ph zWvoH?QGR!mwWy+pBC3>klrWuozisa>4sa{>?@UxlyYmH{<=B|0+5{tyACI)3CK8Rr z1^SrD1Buhln>T*LP`wVNxDca9-6Tv|rTGFAhgXTK?LVnV^#<{kz5j7;NS9x>;EadT zHF2OPgxv@Ng@;QSpw1*{uGc4-nBZ(>Wo!YBBCJPRlor~G!zHFvj(J9#NJwy#b9GWk0@>>YEeyrJ4+)y@M`wHY0i0#o9;#Dyi?@b0@b%})y)7aFPG z$6h{G=(A{ie7xq$vMq5kmPS?6pMd3w$Xk3~t#z;-FY4=XBbqu`4QJiY%*?!H8R9cR zqRL3+6@#l1Yz|fQ8;I6|R1X%Flq`WqTHjG+S|_FY;LxHj&D(l;_g*)z0Q*C^_20Y65$NHWAW&np))@IG(k89X+R z5Yo7=*{uiMMO$>cbuIeIg1~%c*D^HD@i>;HEgO{()XQt z)bUM0eLwZRuhuuWl@=sm+Kt?X;hJXAmh6(AVEgM?(=T0^hoS~n8LfM`Kh-%jG<2bP zWXoj*x8ByP_6nZ2eF;9a!4i|wcD8mdk$47 z+Vh37ie+1T7KRMITBnDZZ0?(FH2r0eT#&K^iEa7S-4lfws>dSoitT|L;<4)K6Eo8j zHDK*-?qtEun$;=phmele#4TDMcYkf%6StDZ+6TrZtyv_BsI!jKOtI811t;qhPyG1p zoEqfZVutn~vKc+b{zA`s>}g-3z1ce3sYi~B3Vj;CShN4mYnfo2#3TDo*ik3#z&|`| zFk7DoPQ9<|OulJkWTb-7f6FU=ou1{uwZ6W~=4N@NeR0fVS}zemq_}CHJcSVHH<31E zlfCCqlq8iumfDNHY-ME?H-y#5qw}Mc<0VUW6)5NK5q&&<oyK!CVBHOj#XuppzTmz?FN zdToikiaxNS4QAzI&-!*6DGqG%Yt8DjWRo5ElEBj5OMTn&$@{Nom5N`kzYHc@Tqm$McMi@Sn>6Qg}6ArT3Vtqy&`% zlvmX1VyA!j9M|G+Av%_ScE1&bbDXhA&_>2IDL&*ANJPMKT zg;o{pWXu=aEqg;_njA|!(Np@t!0N#(8%Z-)Y&Py-Jn7y!E{&H?p4X6v z(p|0?VPi^&)&tje#?@S2vSio6g`=E&Fi+$bNn{>!9meaN^W4i^LS~h>MstmX!rA57 zEQNKCikpfp|KUydcgp7+f*V-(`|r%h+Q~~}n6b*W9Dj=>V_n0Dx8P;RiB#))sYq3n ziNc#al_gJ&8g=8p{PDZFN<%CNL^3mkWr;YkD}(n8zPg>{p&D_CPQX)DQ{;|9+D%Ty z;5Hk!K>vO$a1>+WeaA1VppR3uv_q^$gk}jHrtD(k~V-7RnSt54Gav* zqj~Y{S(hASx(}6AXIuH_hl!R_DLZf9dY8Ocuv95SV3d~1vBFrv%6aMEfs7%WGwuXc z1Q}P&);|eKMpm?C@I(`j4rdSEy{Pwcw!TfXXac25U_1l$Ny=SgC?#Cxn^sUK;f1g< za)CMZ_D(&^lO;rv>a#RT2rqjxI7qIox{(;O#Yq0bF1XHsbP}?l=mZ&IOzJFB`ZuLn zstk!lBKz^N!fN~^%91uNf~ z2OL~T31bFy7YmlLBX*lrPPTHH(Td682N@)i{RObyZTV4x^^yxqDn3O)G#J9-gC|c+ zIqo6*lg;o}?>K?G*0V~|S7EL>_u)G-ET2*sn2lURg)Z0HYV2wCv#`+)o5|*#w>Nw5 z<@ZC%R9~%+?IF3af6z5bi;s_g4Az^%c^Cc4;8wF0*XWio1=2a$B*|0Z=0Md|`pVD? zXcYOHF4;7MtaS63iOV~!d`Tn{LHaC3&|+uQ_@sn4(m!?Q*;%96go!wp0eWuUOR=Cq z(lcaBN?@mN^4bkKRWc;BCdk+LvF9RFF(7%DB>N-+oJXqs!b@=w3_(TY|{g$f9w!+ix^eE(a z$4frGZ&-_&I-FB3gY>eKs!@eBSq=@LP-I4AiI_?;cs47XLS3yqoS`_v8k&r;5EivL zbdO3hz$2DHmlM!}?@0IJ4-?W`Mah1zddlHloIdNd@wvyZIN#)~FRE33-NczLkR@WS3 z#7T0jW=LZrz>1`Jq@zaB*q|$_0Xt6Gp z0J^#%l{fj&xW14!^*AkB+KDPUG18hJE_^KS)u-NKDa%<6x!{9iusSE7@+36b0LtPQ zo4Vf6sQDU-gU!TG{%@Xsww)K@Wvh|1hmNr$ol(jgHI?hp=*AN#_XIbXTFM8K3c*Gm zyO(W7{yd~@|8_wq7$_mVM5YYENA!M&iV>)tf)&HrGW^l-Ay~bJ2PuR*haEB7G~+B) z39_%UzPR<`$!u(NifB%F-}fei<$P5jvgVaB)4jJL6IZ|2*PKn^*gyait7R4#R&?qhLD8Br2>E|w3Ts9%Bk)jr>)B4fWpbjryLrjE`AnRxJbq58y2aS44D=tTMZ>hfwJYc?Buf|j9R-3^^x)-HU@*NW& zd$4TbM5Jt*LXOBh;)l73{+ixOBwGipY7)uWLoH5GRyy1k+3!8D(dV)2bNUWhPsnAc zJf5LcLC@xWRo_68qnS3fOTell_!4ly-)nP5pQf~9@K}pbeaX|PWipDx!G+o)2mEM2 zie$slr+1G&_56xeX9(gvQJ=WF20~MuR4RosC4Oy_t#S;Nq3=Z;p1yeV^xeaCiuWoJfDv{`=q-$0Z%LK`g;c>$_Qxu*`#`iE@ zmsd7;hglcN_kc9V=$_wk|JDF5Z$)J~kBha|$QSQA=pNSYRFh^IqLo5UX8ac@wLJlK zak-6%!%~M$FgTTY@5aV9=s_mXw3+k5&B{{Bo2lQR_K6-bTgqE-%tQH9Z%&MxPJ6yyRhaK-99rd7T5~ z!(U*i`SJ3@AEdXXP%BKo5bddde&mI5XQI2))Sr))9S>+F ze&*lOO1H}?5;$$E&}+p;hhT-Vy1Z@UjuL5SQMS_Bx+U z$<#CDALDl09<7d^pYv?osl*1?DsikY#W^_^u$MNb=jK{TNgJ#*gBi}4A8$o{U+dbMf`wgy$>o8Wyee^luJ1*OK3ESYG{q zHY-sdCG1s(D-um@+HIs~e*040(5VlleSv>TWIvs5 z_u}RJGf1${PN)Xyc6$;xc(ffxmv~=J1`RcJwY|pJYTF{mJhGQioI2HCM0%t!Wy^<~ zM&;pQf%5oEg`*(e+>tkO?lPg2Bj}&reC4O-_X0T=xPAh~#zRl?83J4)jk>pXKRq&T zlkbLY4+2M?iry?qqe>ie4UcC`{BcA{d!0R0Inq4E*cxr;B&!_B8C0kM|@=h-$y~Rg+|Kfr+aX&)~O$5nQe-HTUhC31X%%X-OrFen&}p4(Urjm^#?B2ayQI%MORCT_iUkk(J{`OvBg%$Z z$S7uV_E@k+l1=I#vIsYdFhl2lT>>C?65F(BNln&Ka5PpAiRFulqu)E zdZhJz`qd+Cqpjlx?GJi!(N?_N5!xBDC7r2fJXN-M%;6X(mLZLHloqR;wpKV?HPlT+$WA()N(iePZ53tN&gUOAh~(w!;W?8U2;yM<&aa29o)$8q9%me(r00m2AFMm` z8)l7s)iA5}6O8Us zmFsxHFLCkriR`-?DD4l1DTlm8qs`Nd$!HSYKgPf+<4CvS`WOYjoS8|d5VDg~j&F6> z|Kvr8I>;S`!l`}RsRQ%AapjCW<4gvm=Ga!s-5yo8h8Q!|g8+9mXE%r&d6m7t)RnL|2W zowX*##lL-G^s`g=H9_e2NsPQr^X*xrY_t6!n@D^lb4zr-9Va(167Nbo!k1k4CmSg7 z*@605FKL|T0^D;EF3G-S(@b>`S?E-V$wH1rwkY7JYiNbYUZ}JSun30yW}sf zSa*}ga;bwAM@i(X+sCvyWf9=va9Zqw`5`=admcUV+s8dmi&3(^zJr*D;CQv8i~02rezP_{F3C( zf!M)>Z$@*GK2};ITQ$JsSJw%5wr`@IuC09M?UbQCKIc%}4_zdQ@B?m}kz_$Uj~J*W{mlSh#CcHV#BlyKl!!)UXJ z-u=Ld#4Hc-rT zK6!N0@Qme`m{XQjZ|)B1WP0Rc!v86b{VwtOPo|F1%OSBzcHzGNq8lGe5Ir_i@ea0w zd291WyJrHYt&}Wld{}Jk;oPf6!rh#L>YWGc!bP}tJU{8We@wfnV`2cGkQk73T5R-8 zM|n3OA;51q;_A&>9RBLy7AUUyO<`TC4TU>xs!mOw4$p3NC(oF2N1yI0d z=6!X!%`iec08HBLzgBCjO9K?9Judl#bO70f;;hEh?Y@n7wym;QsQvVc#e&3v+7-kx z%3FXh_Cmzv=a&R>@1X9xrTE1vyQm!>?qa&exE}Sfjd7VRFQ75V70p4zQg8_xFA4rB zRy((KuI0qniT>TirZ@zA&A`COORBVAjIvpLaqpa_gHA_6w(8vo}T9}but!KUA>h4T>*@s^{GF)<2188Ul z!hUOXAZECKs8Z<0MXq27Pwn{TdWN%{fcY>Lu&>+w?xpYb)@rq4O zc@lSK=S|LqUdYPIdGu%bb60T{jFPBN!={hMK!poF=VY6Q`Cd0Ww{|a`u|kW0nEMa< z>&B-qo4OSbkl7T1z4@QD@1(pRGgk)Sjfs7Vv!>Hs`!`tOX#3j$0@}FX!3|O#OiPYz z`!J?=<#d0z>}{otmzo;LSeI%Mus0DJTK9MU{Z?(nUNxYELpA0xLGiYVcL=#+Gr~p_ zjj3W2Y9eac-<6vup=YkZ_({h)$To(Izwcg3!Ces*e1NU7{%Ia?~0iVP> zh*zkN(1nzCd_H3+>=dLQ;!TRCkvFRIN646*hC1=ZO=+Af^gP|LQS|`@AuK7D4*4uD zqyf9yEnF4O(z$YFFuBgvDh7-+G%QT;<6Lw9;aytQ@buWy3u99+C+dtc5PAv6UHwTJ z@>jm`%|}1TjIj>@&jn%_0a$bR;lBDgCTZ7pXI{U*Hk^C{q}=huZT-@}Xs;Y0{s=fj z20-e@xL`mRU2Hzgi(YY903%{8vdab%vmB zUyQsj`g{LncIs0AvFX0rkIx+>x+nHTNg6F-YNE~Gf<*ornm7oO>4_YD@*ir63TGFl z_~xR|G@)3E~8{9wDDmG>~1RSSDjy#%eI}Fn_ zIKVnH=0)$-KFZSm+ST<$sJ& z6WpDD3DNCCw_TtA?Zu71P2uy!8M1)_?_ECHJW6-Nr|g!|>Fr^|65%Q7TPLpM3s=JK zx?J_r$SgN^Z&GY-S=jUrl5d!BpVAx*eatuqcbxjLlTRphpOd z)H!XMtNr5@*M7R8_DHApUyLOGn%2NCCEf2*YrMWM_?{-pL8a>`THe@j69wiPBecQ= z665=VRh{EjMPxfG)$5Vw2|{(Jgt0X;XQV@#)x~uAW1=Nlg0NF0sIxNZD+np!GCInB zV`-2KM>~$ROw++DvSsbb`mqeW(e9D1jeq5wvkH&Sp)(Dye4`v~o+PYlmqKCt+Kc_s zQ}gcWpE{D`{{RqEmY}N=@6?!Qp9wh-K}O$MLVDO=PPcDJ8lk9p&-xZqrNkZnP_UPlX$- za8LdZ>0Q0Y`cfzJRdHSM9|Du}Hsy%2ol_o~ObtioM^}o%{37$|U4)cmlXh9f&E(M_ z%aPY(+(&M84bCZ}KQ`?aTWxaZT>)d{Go%g0)m^$`a;4|wndf!C^c;R4ut=Od zI!$eUuer-F&lZ%oO2(z_$u8|e#$+V%^}TUxI$aVr42?YAnH}YX6bJp?sI2d!IcDwf zWYOz%-uN!UB$WIS8k?rBA7IMXsVv(7{`DqVi8+Z$&jVDbk}iZUu7zgGA*^`GFg(&JLzej;K&5BKb3e%+qZxNO8Ele#DB~fFXm%6A zpf&eC%g_LIlCp-n{a~KKt-Iks%4l^wT`fA-%kVi8WSX;6MzUz-oWC9z^+(k>0TgR> z3rt1U)cwn8N4h7hiYRtr!x4uvlA(>1=fah2b|Yk)K{9t$xXt}Z-r+2PqoD>2cb|mO*Ug(> zKdGkVni*y4cN`@~BYo3Xy`2di!`7PSxBQ~(58$WMDIO0Q zcx&l%jbTpr)QOF|#di)#pL>;dp_bpksI1Z?NiU~f_iN~p*G2vwx!Nx^<^R4}m1oHy zGAHhY)nvKZxX>x~5|mf~W8WK&WOC0IRhXp*?&2=T9`VMnxRg8L6khId)vSBZHfokD zA%y@?1DNVyGIek9J~~2aoI@JCetUH5*W_xy5eohr-Dcm?k@LF_8slKp)UR6SsVvgE zr~AJgH)TSv&bS_pu`2$mbK?|@7{gFg*QnQDAt#;tb56Pf241rc&I@v4K^N^YzT5&h zNuJBE)*c{ybndSUcU`}_V!M_)QW`{&rT7J9Pgw&D$0A zZLSidGDkKkRziD5{yN*XT{hZ#M}!EKyoq?yya-;e&5|!Wi=t#h22CNe=ILViDsrsE zUB|mk%*`O}V1Mjgc-Hqzmb{TDMtt7e+s-%xk#93_R?=b=!*ZUC=`wZdVsRjsQ8DIi zzT$O=Z1DaB%2~P@O>vYKG0Gjl#hFOY{+pi~9h7Os-^Ulz3WUzwhA^ zXlx_W_|s~=257aV0RM9!A6Pd@{+dG%jLD5>2uZs$TV$h%|89vIyey4O)AZ{07Q~KO z-Kk130>1Vp;Ac{AKvx%w_{KnpYmpVV=elIWTSDjc9c2vQtNqXge&*fqEOKfDJw9$T zSv&y5^@`#B#IEr&Lr{r|iL1m{(3#r;{7AHb4H#KZn@p<3d4R4SjQA12_pG0#yYd=h zK#X=gS@Ax4&XxVHOclpf?@D+i{2!@z#YcPI#H=z9j-9B!G+&-Vm8WQF2~wD@$Ma&E zztWu_F$UtgrM)*GY?8JN_0}O8B@l51DrA+S{fRRZMw%etwWqAluh`sQm%uh4^;`!6 zzi~Yxnjr{B$=Af7I|4?)dI1l$@ogFKMz(v8cRh!pdbie&j$~liV}TIfe2%RFu3gd! zV#Ovz^^g>0V@z2_EG~dQ?PeN}7KPR#l@+hLlDUL@5B31(Hv(p4L zG3xDXqZbGsL--9^T+~b*VyQVAtGAmfVep(bn$NQJO$*vB1R5I**824Q^!;o#Qv@{K zo{0XdXjcIx7>c%Oi{73o69DZmnFXA~dLSvLqVE2DV#H2F*WeO1oG-vNJYNaLt2_IA zi(|v{wCKyO<(#^h_ZN-Elp5elpG04p>Bp+Q_}{%nm=FcHF-#5( z$ozbXQPle%5aFL(yM=b$7?ir>+|aS-r=8UD42>mrrdJ;|WLM)5MREXEdD;70K-%6| z=*w=R%FQ*aukKz2RP%R~?$2UZCZ1I83LG*|=z8XD&zyW)Pju&@}fnIKo6U(R?*=^hCNT4whPqy6O zg-Zlx-sb=h4TZcuXtx@dyx~|P!XX_kbvK5eDodUXv;3o$8x?+K$%VuTQ|~tj=C_ru z_mJ;(u$2$&OSjsFHX*PDLf0U?{}@GC&YQ|h185I8Vu!`BLBm#{k9okJ0$6~cfFNF9 zZ<3LA*^IFkXq%jKLt3sO-~J(MpyGJ5!Avqm1dR-|BDnZAyz7y0g{q5P#RFI7G9?#5gZs zN-3(qn(FRej}bY-!tQBMwguN(%_W5+#tB$urTV-k_)TDU|UiLH)%Vr|yZ*%ll>S>)k{tHdCj! zE;q%$J>O!79@Dc34b!Y#dKJBa?_-DvOAxzX%R!1;pZ7upNFtu62ELY1TG~xyVL)wO ze7A&f%NMKcM0;%jOM5BVpJ9t3k{jH_C~k;iWaS}< z5hryI8ER^Th;Tb&X22vmfeVbZ*-ag}g@s#uX=I-~reXxBk{<)&CgOM>Er z9PxU$b3+dcVRMWb2ojV)@MN=CE^mPC6Fl-Ld(crc=_t8qH1jD|UhfzTe8YLuvo|IV zAW|1nI)vsRqefdSfHgc9HDtyE=T{5)6JsQ)aJ-75vMom@h(K3_m_oR?4zyS^h1rSD`j(m>{8jXf;Li$=U;*|dq{9CF< zN!1XwOio}QzW|z+nC?<}I^Xw`S_@N7Fd`LTba!LnQ2N=&^E|;#BQ}-`kfAaWO$_bT zfGBYr&^t4SjMoJMIiKml0@n08FeLR|AT!O51Kwa^>DaS|IG8d62=N5k{iLtv^5x;S z>?LS>3|K${2saA|%!h$2f_6LTLlh~r;{ZUOSvz22A&##Sl4du-NCu-1D1GbEXNZAZ zlLFrC1ekS=cM86^Tb{=lq4xo-zZONA`!LTIF>%?$UAr#)5F$XMeZcUHMB81UFM%(r z0o?5f+E1ZN&@*@OhP6D{FF>Y>M0bRk;l9DODCZV9f5#F&CAJ2e>cXTfC&Y8IE!fnT-@NwU_-8+$fvy6#ms+&#zfKjk oAN}ebfwbTLS7fGtu|JX8Xiwm|)))PiT0|PR%i){6uRSmP9~F|<*8l(j literal 0 HcmV?d00001 diff --git a/baselines/fedavgm/_static/concentration_cifar10_v2.png b/baselines/fedavgm/_static/concentration_cifar10_v2.png new file mode 100644 index 0000000000000000000000000000000000000000..bd3b9db1ff11b8efe15a36e17c27f43910ac1352 GIT binary patch literal 21208 zcmdsf2~?BkwzfT<%IR^S)LKxc7OIE}sDQ{2&>8_jKv6^)Rb-MO%z=b})>c3TL>ZJ( zWfBmOc}$`eL1q~v1c(wKKp-*9A&~t0i>>F})7$&sd#?Y#&N{Bu8sSUweeb*9y`R0G z{p^SnrUo1Ty6vxP)~wlh%n)t9X3ggVYu0==_m_3>6NR7Id*ClkzoTdUPGOwXm)pet+e! zUpz9Y`TE+185hpgj=7zYQ3vyXj6TzGczRu(rffugkAy7Tn~{G7NlueWxmDA$|3qr$0RDv*A{9(3%~} z+@B-NqAm>IbqMIay>T&22Dg(xhT-qngqnN(#yO4fC_i5*Rj|TnP|)KxzNrnGJu9kG zL&cC$%A}>?z!grz&2B+M=t{#^Gujlm%CwgalMXl=%<@gUnO8`{JqhdH zJ94m4znv2uxy|dhNupg5)&!jX6mhA<*zfrRKx+~i(+A9B><^+}Yl^C^6nr(R< zzB-ANt{3NO;18X^#@kEzy*|9pu_-oiXf{<`z-J5W+}+P*tecI*1k0P;-@knt)_!6d z6N;~1p3Litv$VS(zvtYY#H6IIq@lU2BAJT;3HBjDH^NJC^OJJ(i-TC2Uz;Xls+hp+ z&3Bz0Yjx!PWNO`0C2WJ-g+CvT;MQbsilyvr3+NZSX@}|BPn@HnNq%BWV`ugr8%cABx)-o3p z9sIqm_scC4w~uT(2A}J-{6dz0V);lv^S;Q8s_0&;5e>3_I~xpE;vs-eX+?n zZDmBEOK;_cUdC2crwoTEO(#RjghFcI#l!<1R#v4hClmKM=xcH6HaCk_KX3F7@u|gE z4EPx5_FQ)Dcyx4PGOy4SgQrqEGmhQuOroufh42Ogs2+-Po$tbAQ%w)P`w8x~oa(uI z?xcyk@EHTu3bVsX0#wcwf3L2tp1D}dKuy^5N9{YmP)NR=n3&ip8uI?yMS5QDL%WET zWeVA}E6c=qmeFno3rQX2BI`vM-kc_JEhbJq`3pyCxd*bmFLL$X{PcrSf{o8H*nb(nIv_dN}Y-ItKaH8N9BY4ADFIT zlU(Wbkp!ylOiXk}xS<}O6U{3h1Z#Ai8EKsOWxdLt_a|RB>paCp^Abe@)VqVO6pvu% zWftRTw!~2-%&}8h$B&8Xt$5MOLj{YWW@0h$SbWFm2EmvH`54@&v0|p*T!(>MrRDbx zd5e{WQtaHzQPEnHkNsNqHDoARm(O|do{8w*zFcNiE#UKb;#}<4@C;7ple|-84r}%@ksNCh|F% zDg;I3mpM5(ii3V#CW@JQrfB4O`e75g!0_+FX}?!M<}cgUKb<8HHI62_6>RmAv#$1Y zg=I`fZcFR^v$b8;wB_EpHxAMBR7Txqv*QM`1i$5%Dj_j;bC1L`6;EYlT-&6zlQ1GL z+t-z#RI}Mkr^x;(KJ+3#t}u8zVd;geX^C^YX?}kGM8;jyNvr}79yx$JVB^NR*C7N;r>D~vz@LX67mEp* zO`>h!1*boRbJUWG(w4@JKRhxdNcL?zCKh8ipKF zyFL?o7>oxR@O$OW&Cq;7Y^N-_9x+TMOi;73y6)0*QFz9A9N{ee;&{v2v8#7x z*Elv7u58jE3+C+wadH=*)&Ih@TdXbxhr;&WquuFMXc{#dSn9ii95%R%**0x#5DPi4 zNT3zbF?~Vs^4Zg{9URPTndFVZ{6TOJK3cHi*gk#=J6HkU!yQWsBak{??=3`YX=$0G zH*nYEA>@IpVWi?c$WGRKg6A7qDMY2{=Aj|@K2 zvzAkD+eYE(no?}Bwux%4oyu(0ZeHpy+WfhFa>$8Z?kNoEshCd1v%qq7S0~ICJ*b0) zEod}cY>pi3qGn$rc}HhncD4cvHt4uQik~@o`r2w!OWWoq`DmRxR#sM>irLAXi*Db6 z3m%QLgf$Z^7Yh0-rV;lb7DHNksj}d~WU)+3oLY^XQ~Z0C8U5$j9Q?9Ito{7fC+|W= zVhk4Wg;i!DGk0NQ+C%xXBoE@|#}%=SM_@TKG>1CCA+%|;Yt@2gMm|f^etTtY=bi1w z=0=JhZ=Qa46uta9Ot-W-#5h42PV$)o8_g%)`_#rhn>{oUbj4@HGQ{8x4GG0Hx6&z} zi`(3myLcAc4Bh}-jm|fktknTgzwAlGtMFIa1;NkgY6BuWzzZ$oa0l&c9=;ju2iI!A zp7A%FFe7iB1;`L^ma$FAq~qJkc^n#jJWZEi$fNl*D~XA53b4dH+5(o$8|1VLR@ynO zm=IL}N!=BzkJF=Wh}XXToDeN~@a>g*8W=ky0JEq8FE(n_?Q4+QTp0LrcYvln+kSxv z@kogI0Gc+)^52J%7-m`%8R6lT)8*7*75=cwayb$eyx&|1n@5E#l%z>}czCR~)hHm? zLJE9*jeFXDM4_ zNEWOL?|10^b&WZXG=M0zEdN3#exS4St9Am*H*BtmFy$b7vq1*$9?g>mO!s1CCx2uk zCh$mUY3Uq_wYm)WlN{v?2U)(L`Rz!cmrruTSZfk*6lZ{-NcW-IDhxS199+d`i&Ssf znf;<0+H{@;Dv+<;r9I#1-zq&GG;^2!ypi)ZcP!AXVKKyhznYKELCrRYO8c5!>yotV zdCZyXu9X#KWk=CyXJ^Lkh0vvUVGaP0IJca2z(1cWae5iTn-)iq0(sgD-`^FGv(KUa zRtT+I6g77u*}wN(4T^nDR96B4#Zq3+K3l6R0C>HDSBu||Wet2*5lWY&g(EJ)8xEZT z7K^1V3^igZaeB^x0hkp;2(d^8V5^%AXz9H&1A%Y@cs3EQL-B5q)_AmUQ$t%C5AYru zy@Bc5{{9u`?a0As2#aVlv-T_PDDKEzg18k?Fv#((riW;=j@XS|eSTj=f<1!un$bS! z##)kVxCV=0(aK4U(h%7YEhTCiH7fzp2;kGIU8ou0<$fwP%)bCW^Y|;bpRQl7l!d+U zrV$o5GAkcmcZOiK*3(RJBQuyRwcN0@@?DtJoz>d4gxS@%B6{Jf?K-M< zOTUQMBl~p_0(*K@m1aTJSmN1o_rBY1mM2FYsG6nMwjR5OA6-sV^+Ps7=3Epu;O$kB z`f?BIFnFAZ`>M_h9_)#U2tWa)>}+s@wO zHkpQqw6m{@$ViCo8&9Ox>`>K0jKoH{L0g7AOYvM?p63iObg|J6BW6PDVc>9&?Q1Ma zGjSYQ0Y{cVZP#QZS3*7qkH1z_XK!zTZB6q6hhJTLVH-rNy4BL2;6DIc;r+`O z3R3|zP2aJ)g=s4Nxrpp7a970`9^&|s{LBF&@(W$Tg)r&zhA*Z8M%=`+sub*2hQ*_o z6dbiBkEKX@R(`k8=bTnK-)qfN3Z~fjwuUnoL)v5l-(Ed9_xcF0d~t5BZn7tjK62Mg zX5pYniLZ-@>Ycr+M+l>->6|# zyCrPH4h1<&AsB+c#pHUs&R)N<&*~+)(pucr^u7}328hOc-zM@P@N z!9b!YhQY?ccg;^)lC@wjBq0nl0{W1X0!fV-94a2G27^ZH@R#UEbe+I3 zPv-Sp1h7t^_C7hc*`C)=S@%RQwjT+C;6m!Mb?0+J^nT{8F!W@lrKRPUlQ)ey$nwsv z@F1Sa2#AYBc58sCewdV=?uY}duPJy%VM3dmS1?(z3je>6b1Hj*w}e2jmq<7XQISG6 zYiVeaBz{v|rR~6u07sd?;8zXzL?kdHi=6Ld<;kHwz2}@?YLGaQOOS(7E2bf%@qA)c zmP<>lacqu1Tjeu)1>12a`op&_ndOcwLEaFBIA_Qi2id+sJr*4~tRSlbZhdk9oWSm}VWBN!-lP|eDYy1Tf#7>9H8r&opfG)zSr|XBLk9BtBYKi}pl3RJzdDj4veP(Z6#M;GM z(b%)amyQi!X#{V;RrLU6sq6w~xL!V+446tzZWyUw0n8BG3J*sz`##;Qh~vnoT*skO z=x!R_Wjp|jc6;n<4=UHb|0NJ`*6`N$K>gr%>+QqU-FoX7dIGMyEIuITvwW+vT||HZ zMrNVQU8j%?RF{Cm@n_T05Kx$nWU&}BWYHPM%2nUDrf6%ZYiML2*`)csEo{{4!ZF26 zaI_hSgGiW7MxZ^M|EY@U4g*;eMp|eHlFwdtbrn`lv>3_0okCax-+=2hfQkT;xn~0` zP=iqI2hKYa`4)dazqx5~v5DH*HtJ|Cl0m~8P01b!b~sn$^ALpkp(ZrgYp9rsz{ZL9 zFGS4%1-tTBxD#-&j)~j)o$TjQFOD|FP5i79d?68F(T#ADuf1=p`f6JbvuStXaiso% z)P;nJS+n)f9U1&ok*zKkLfV|NgYa%4MO`O_QPd8V#?(xgL)8TiAiIJFo*%MOw}PrH zj5V9#_Tu`+bhUms{>>b(xv_u;q1vH@Q%1rv4tPXkBy~rsLb8IThAxN#*gkdzdDR2g zlV=y#-{0R^>e|(b6l=1O^3vD3a_0LXrM>Q4`D@ZafgCXbu?aQ*$?j4p`(;u{3uP04 zHEqcRTww#2KLDX=-7>c0sCeX%NXL0N8)11yQE&%YNuEXsYP;;=VIr(uLHxf5Yah^u zd0MOCax?EM&3O7diMIK<6XbwvTig1HL9=6X$RTJ=RNq}zUYDW@Dsl20=YkZHw>$|38$o|NC`wmt3p!t9Ooe z^yp|doLz1?yI0kXXqUgP97wk*z^=#67pROXw{BA=dPnHn)By6wp$g731 z=5Gvq-F&Zi#Gabe!q?vS>vVNM{ki^bfk~Wk<$*8`gA}=VMr%m=|87tJ4U_xrlEX@w zvHCveiFVNo3k&1p8;P>fezSiMijLC0{z}r*77^FJ4#m zo0b-yuY7a%h-RQk#*W$-{bq@)@v{!R?vu;<+aBI= zV+QtWR-8Oey7|O!hHSc5i>2;i{(#O4@G3;8%v2ixYEHJoG*in|_6VaNW6gAd6 zw&9v<@OFC@wMWMkQV#SUk*Khn!)Q!-r{yP=pVn=Ei6x<|7Nu#Fv8NR|I9t}HPs-0c zh&MNq*L-SMXn@D#{rvpUz*DfiKihJ;H9PpllEwD4;*IpAzn!F<%esTV_Y@ZEpV$}g ztWH#5NEoMWUGLv zKl?1zB7-&aviU~0^zn#b_D<(s<59fxW<_?U52<9JD(x>pDlwdnmcHf5nR%^By=D2Buhrg+IH6GnY z#c1l4o5gv&^5{I4ZL#}xS|_# zwkK8Nc2V90_YJ#vdbI6LK;Ez}LBU1R?B`1LBiIF;6NJ%FW zQBWX7XxPQdD}T)$J4L#%l6%Lj?>ff>7QFF*8btQ`)9Pugf8?02W#xX}t8(sZ+t$0j zw=&yreXd&4%j~K4Iulb}7UX-gJ2L!nYpQ*QCo$8o?`&69uHj&uBxOrZ&BY_wm}=~k zY1jzvu;bCmO*^pQr}55sywI;`&f3Y*GiL^OW7`Z=X)js>3Z|k04jAV@ls~DJkl9NM z_i=ll8r$|hup99u+qM_9Dpar4TP`k=`O}eZP3%ewReQPjMe}ZHa!wwuT*JQnV4`)> zZpsC}*y^zhE1IiCQBCTTDEsK~`-vCS4k^#4n#4t6KkedvL~8%Mb$_zO%|es#aZN#m zUxAaJ|7xpB92=q|KIe(SNIRaD9PLEPbE>%%BGSZa>)Ty+AgbHN(bFKamr6X>$aQ`k z0>Z1WwanD>Lbz z*tL8#{<2u}ZH-swaVRMG0sSNoS%OdW`o|A0>yAp+7w+Q5dPvX!tlo7$Lcas9#$tBj zw*dahT+4k!?of((U-Zrdfq5V89INU(qV?yt$99^!s3k z+ZBV3ncaz$yzYJtu~@4#-#%fGoOs?WZ$fpB=1ouVLa@vuIZPt)d4lX&6pJj{sA5na zRvZv5?hv$bpA>LF9#6tfoXerir%$vL()RWqdHlTr$+(uaE_Y&}IKPwgyCeN++D5KH z*GlBbGaON7dV0E}qeBeJ>QxP&pFW|UHnWj#85i7i;1&ij|FQCi-i;b+S-D)l-_GCb z(`C<=x8Hn}7vvLFG+M#xtf`OsuSL_GWi}#n+0GZd#V$O?>Fp z01Vy7mCYBU%50BLD%;mSF&un;YcV+C)f~EM7TxS!7!l7%QU8HtdJ9BAF0Eiyqb(4r=!Xz?KAaisk%8 z?rPJv7u)Wl5PcXm2x8eyuTr($)hsqTQ>(36qMOJa+D8S(Zsm>?RIr(Z)*)s zXjx#LZe2AjL3K~XTlqHp;x%@~yidm1KIFVV$2az#c)4f4c~yNBSGlT}^|-rwy~71S z=o>3n^OyurmK>E9d z{a@stpDd%_VNQ~tajrdGZak_bhbcE6(Zt3ZJ62jk>Ul|RmKf6b$R&hzwT~HZ<%~Vh z?H=FKk5(P1x{$$DDYIjoz>!|P6A{%tQ&s<4-Zbj{eL%vy!O`;WdABbT2K||5MX!2i zfS+hm$CaRLG{=72MXA&59gcAl0Lm=%8;W@9d7ENbuv!7z!>wKPSUG!~T5L9|bx7AX z#q5)({2x!mzZ7O2Ka@2>u{}QbY@B{g9GTV#xX~PZ_vr?WR;NwN)4}Nq{4cEpA5rw~+f`@^#+BFqv)9y36Cr+9-tFf$p-X z{s+R%Isa&a;$>grWa18lT0#m?NeD9nzbAsOKBI(mEK@ivD(x0qEFR&uld zp^}@}o|U2@ydieTVhsmq__;yu5YryWOaoo$mxezs=U2C}kjBH{@guS~7&;71Z=j#` z^Up};c?%ncpZ(NX_>PaXaTX94`VT^zL72^NWl!b-9AH1w>j4yH*J1SIgXw( z+Tx$HhwbtRS{b(Q2Mi$NT5g=*zCF&0hdnc6^=(B*;@urL23u2TxIpf0<_3RO-{49S zSju;4n7tp$L(+Xe?u{WuU#MkjPy#%1ZP_(T`UUrW#<4~AhdH^jP&WHea*fjj{b$=0l^Jc`#k$y#eXu^hOKXL>@ z;d+E3&5R_GHx-XUIe5$UfLekaiR1DNa=qIxDmxd@!M5`j_E9qrxX5T^+xjU^R;X9{;&$XfH%I6^zU$xo+!3A z9F=KmMT3Ffa>rR{rbaib;D0Vt5#!>fy%6%cw z0vXjKA++A}15AonD75@-%{!BSV|QWg{w%XJ8CzRhPLm~RKSEj}jTs_)o*nrH+u+ZKDra+edxzVsR(9!7<|E-&J*{x9UL3;Hfzow4 zo~WkCA}$UMvR0;?_3*LRKS&2Q8+SoptRys3u`iCb+!vZAtb1&yvUcy~GGNiE5*x&X zA68G!>dCW2`Xs&-6U(zKl%c;9{`^^8H`0hp64B=C34gHH&qRY3JbN3fJKLhVe$$8V z*-?;{KwcR=(%*sqfcc)iO)SUIbbB3y@44~?#vEO@NLnNwgf`emc7^QXBaoR$L01^( zn(WH?ZV{n|Va?{FKrOWC%(fr~u`&%deMOrvc`EG&)w4zk=}jbg{D)Lt->vr9mw#Lf zhh9LCRG_>kbFw#8cR`2USKzhH40MfzH|64WS56M;)&T9o>;rA+_I?SClKLJeK0IcJ z+EH{1G^Vnk?;4@p_q{cR#l}cMFT`5iicero_XfCaDbo?L69ebkQr?;OMea~|4ks#ra(-XktMg|$Nmp)vWtmvpT+&+$*1 ztTshK(*OGPBr54+eV23J<4uzKa{oqrUh7~_F-lPm>7$7z^J>U9k)9FCX7?c}gyLO6 zdciX{3(wRcdAA$kOTchSXbc59NP++H=5rYhAkdJLLwI1M3jyiCfVKr$iLR@{bNei? z_iHgucebv&C$t&|0tx(;xLx3dj959jN1z-rwwt+g7Mg{1=5x%X|Mr${o*uB$5!Rkn z+Y!om30f()C}tj9dH(`v?5z%(y28D<_hn+h&|dh-K?vXt7ZKUdnl*bbm~3MTZ+Fch z`9BuH{7WIHf?!6=98@K8Hn5Lp2{KL>U{4ESYK#;9=^oM8<`}2T8Tb44trdpAsI8Ig zIHE<4|I-eS!O~2hNL)Kb9+%}LVIm`c&R~d7OcR&*;#P`O-_UQB=560*!zTYmdHtX= zenqU0l#~6*i#`8*h2di!@yUuk$1{T2Pfia(GDbqT89N3WRL!O$tjVu6WX)n*t!8bf z$`LK?n8-^kiO8x8&Y1-p15QtBU8oF5#_I1kaEHa6kzi4mitd#=rZxGWf3i`7l1W+J zRh>2t1^POF&7jC$X0ra?Sjy!iReZ@QE0?Ft6b!01IKcy-sJ&2{$~aq7LGuZ5Pt#f_ z?)vOOIVQI};1gGO9{*2)q1o?}?@w0UQN0tK(1a=cN;5Tl>(I#vorI>DHX}#vM>Zsp zJMy!Tbq%>Ew7FT}n|Q2Z?arwW|JL5)bn7Cz z8LambQl@m48zs~_6Hk}R>ub=iIpUwTM4cJaZ#FEp-ClFe@o9Hr-z%e5F{1u{%aPWD4 zR@xIA`dreGgvqHS)m!UcWls86{f^lDXit9}+5Q`s{4au^nuXGDklA4m;eU|@R?VVL z7TfOcA~1H|o;I%nhjA<66_(|m&@>mNuS(nA>luJS7*OJvON+&eeXnZ|ys14siF-(4 zjm4R}2@IzXk`XiXAzq!ay}bH!(do%DTTk2U)Nc^>j+r5%60PQuW~8pgh!67kg8mXA zxx3r%J4{Cv6dD{)v?6Z0ZD#rJUfUg#?xW>|WUVAL@y;8ytkq;($*sct6r9i&kh8~3 zEpse2B`)>m_1pt^{^M4SstA4u4i1+V>9`!Q8vB;2`{I_4wU? zYrxp{Z34jJUe8Yq7Jg$a{uSs5{X1-P?%>IYJ<*F$1fPmB`cRF{I9-m7S7izd;n>2j zd=KT!oSICxF=X)x8Be7wQ6MEv29a-RU41D+odkLvEjULFA$MxRLu{NH>V75 zu-UO~r$1ru*8=t{MTY=SeaeZkE0Dr{AfDgdB&}2X&?mIE`u&hKbX{EP1b{X#XKNpQ zPpu3m{cRewc5eFj?pcv@;~;GsK=A3MIKu1)*10z{D zAek4}6Mwb%b|fj{c)5rI^|;>*>LAxi?(X#S#siyEf<;Y9lBc@Pik`2v20Bov*8-84 ziZXd+Ca||8b&GYmdxSNHs4f&iftu~CsJEe0YhqlPotc>#qPqjxW?C7Gb!)8ZjW#?V ze2;0l$PTL7pSUvjf>k>j7oJ^8(*KsW_Aa}AY||o2IHjh{N-1Q8F57U_GSe}En*_-qz zZDkfnnZm;ZLCs4C?YwKty?ycvFz4kU@HF&zYtOB7HqkmWhP88!LaS=r_Tk%lC}(pi$LTJdT_ zL$Qhc9k!lsJezn^djRVIN?;C*W7z#czXr=>n@WREUWW=;o}2KVE~C^R7LooMv|a6V z6AbPYDAZ9yuphwdxq;^L?9=myZTx#I_JNqb5(Juv?pIC@F^+K%@*)y7;Tj;)!FvZh z`X4ZG$kZa35OWhDG7woDGO7m=^j%{KKs2=rp_!LJ0=f;+TMr|eCZQrHJRFgbBBMAe z#}eIyQozQ$3X9T*Eh!cec+E7YNFil>q zI2=9U4-TVlqsG9OFg@T5q67qL`5TZwZ=Iuuynil2A4|eSfsP}P#v2mPP7A@U%p{Pf zcs8CemWIzw6KEEI%=RA0#SlUJO=K$0TGP}i+YxJ$>+xbGDl`hmW1x!?7TuUMiZ6_l z=uW{7-ItRa45UZm2Q=h5O*978#%B>_pBCsjmt_!w(H1{lwwA?v;*8pD3ubj>8yytM zx8>wkmm2gy)-PCHgpm!1)=NZ+Mjcux_v<{SkO`cjDAz$am<8xg90>B3J@h~;+ocW9 zOlm+xZiq>c33juIlnlgppmh2{X0~PbnK_gI=B{Lh8zQ<)BgIT)KaQetV9@JOlw1pB zZi{Q1IYpsTcjP8JuwlS<<++}6!P4l^)&Pa9T|kw>;Npx0bIqSbI>K4hRB zKQy~sjv7@oivY#iyfy`ffzUx?$HWIin*eJeJ9Y{+QlrPsI{7Lf6b?}gn-3589d)(w zKqQ*PyUQkqUFUg9Aq;X}&-GOiDcIepz-c#oNyLhRkx>A!>~$i#@2|qyn4mI-Z>mJu zgF+lcqRt;F!C}zX-3SjC>ZjqTG$6`T;o;M44>Ap2?n-N0@K&;^+bG8aM!>SY!~23eo5yl3n2hF~rG&_Ap#NDxADZ zB}XD_E##XK@jr>NNps}3Q2dL~XSKk+Mj^u=)~33Frm_g4@Dj{(f!Q92ir>$j2rbFO z>V-ij!hvSUkP~lYo<)76r2KI7g?^xDJ)>zUDT48r(GVAq(J~+@54lr8mp%K2BZ@+! zA^f1xQvn+gowNI3wK@ybo+JQ@(OvT0!K0o*Ooc44w1fx;F80WVa5(E$ZN2mDtq zJKJ4MEJ*_sHQTPI7q4P3gG`TDc^{^C3vpnCcR?^`XDiZ0^Tdv*g^b&H=}|G*yExE> zSSoT{S6XQ+CIc`piqn zwAYYeb?cGdmq0ke*H<-paBL-z?g59MTnmJ!MBYG`W(PuV&T*LgfXpY+f!SnyMHX4W zUy2qiMI%J#8}<^M5u|$u115{8i^nG?`$wga#}&?n(*R{JCr!Xj^KM5i)*%yX?ty+5 z`IY3ty*};6ir}t$m#oWOZDy2c&{`Ys0ZPl}MLXA0np%jQRfVUm7Q1$~^Y2Gf0ejBl zOB^<;;UE5Ky>jKZ$Pqk-3==C1YIPG173v-uN;Nmisj1Ocf@x&ZptTfEr-Dfp6Dzar zX0Y6*FfIm|+3XjawD%wy)3uupewzYg?k14w8xQE#+W6!fs65OPSEMV(^PwW!JV?=sQY zPFt8VB#kD4thCq!di1$j`x?vLyDA~_EFLI}t>t_W#-1Kkaum|S|0y>8e?1}bk0hnn a1fn+xdtx_a8*V^u?$}XNbiohjfBZjIo(Jmy literal 0 HcmV?d00001 diff --git a/baselines/fedavgm/_static/custom-fedavgm_vs_fedavgm_rounds=1000_fmnist.png b/baselines/fedavgm/_static/custom-fedavgm_vs_fedavgm_rounds=1000_fmnist.png new file mode 100644 index 0000000000000000000000000000000000000000..042527a3ac21db56032234e22f9e2fc03c29eb99 GIT binary patch literal 38106 zcmd43byQbv`#tzUK(K2bD$qT}@7iHEt1HR`tclgIW>PwXFApxv!qTpu|(Ugs6& z72rWXeDdV6s~8`j!~gyVc%58q_$Kgh!r>ygkClyFQ7B?_AXiMRFaa4f~>CR z>$RUAhB`KM7dORm{(NV!8VaX%+_N!_NON{L*zjAT%+B%fD4L|!{Sn~Z- zp})vur`7a)&N);oM(5LCz5(cjcytg-wl-iiS?%wlq*HpLvxvr&vqCT$2hD=~8VWL; z#vmbuUxUYSk=V!=q$@H1@4skBxkz{!>-zQUCBLH-Sb9?hvL9l?$8jyB8j@&b9-Zz_ zGm}N$m}1vRFW%UUdh_AKbp;kuUtixFQjNCXxfWLcP6wP{?Xl@i#Yj-bx_kF7+m$Pi z?5iz1V!5nY5^&4{PAgT7)YS>vV`xK+D{a4wjnN1R32pA|wC1QJ#>dA8vMR;?I`Utr zhsSA%4r;_iXAytvO}nmD>tdZONKH-M(E9xKYXVHnQ-(iQg0w_TQZf1YoMS(J6i8G# z&)@2_63ofTc{sWE5ZNgzQHnc`PPmO%g%V_e7zzn(|XUKB7;(k z9~IU#0v1@dqwk+dpB>0s*Im_l_qn&1Kqv1`-@Ypusn$wcYj8xALY@2i09?T@90UEg z)%5L4A&02HOZ}FA7yD?gKZvO~*_!VV@jnsyWJJVNn0#hL z{+NXC0SUYhg}#?J&F$@DwikM`ZTi!3h=~mfQqt370|QaIrKSvmX#@*BDM}w6^gW*W zj8Q4{R>CWo_uePuDMm&{Xr%ome!0}DGTe6Mf$xU8uaAG3oTPv7;K9S6pKgAAEt;*( zVP`nnmB=CBI4NRy_ioGZFvYz(cSUven33aaiCqIFcZKc8KdR)O^h=*B!223}cl(t| z_3r+@(vv4QUR=BTZuhS>E&6qu;ksT6eC=H})$PYzv^ zCl3EEIsYEMy0h}_QZ&`IW&<<7u=scqPamJ6Ue;6r%ZnE-To@?SgN#-_ayk12o?3m&8cd^E00gwI8YaD55 zX$j9w)YR0Jh|gZJ@#$c;(tLL^cdC@1*sU0vpn&sJ0|#Dt)1%$haMz`;%sf2mMpci# zvF|JoVm^KPwAOL7-pkeZWUJ#~tAi=w_3PKAOHW)}f=zf0=zc3GDCkm%P((#Ug}&?R zR#8z|9?T9`HOjm5W*|p}{N!)H^a1<}1Ld=~CVRZM{#TtuTU&de&co^8@DKylc=Vh5 z#=`-W-AM752vSy;pN``nA1P~SbVQPKXy&R?q5?}x`D{muBjLa2;L2RPP_%KU$NStI z{M;0HJjNe!oSmJ^%E~(DlRJ0d?pnTnC4y4pfYo?5 zSRO59vbVSAw;SQX3C?i43yb1qZXv6n*!=Aqxs;R?SM{%7RtLw&*%ob4@v6CH6&1mi zl{YOcElH`V^CsOFd($GGJyU#k`R2^;-%+^a>@&Z=7uohRCwAR;a46B%ud+9)_ul0i zt+BAMh)+rig-u&1sbA;rz)688;k~1rTd$p~YTJ(?b0`6;5fK%oJ!RVHTd-Ao3|Me4*B|uC=;~6u;4^!#cgnN3w})U73o9$><;ziu(Ntf?$1k6q zof&?3aJz9FK-QZY*)aCE;uxQax-MqGBOe@ZG?fRO`_qd&M!_Zzm&?=~u^Y_7Zfk$Nt3K=yTXn_nn+d>)lpIZd>~C@$u=F7*kIB zA75jX@C@`nJ$l3-54&(?ef?fWEtSk7*52itw)bHlh`6uOv|0!VIZmc-xJN`pxU`5O zH&x~MGYaZr4wg&*MQ%{fY`)UVaD|h6_L%Qd{fQI9{xiNA!ERAY1Pz z-0KChNGwxR(?dcAfR@neYT?DcS9#x0B0kvkH@CH6zkBzNYhmf@o0e#5zJZUAzWH{@ zC@3V+{aY$A_q7P<8<4XF(eoW~ zmfcBLaR>-P34 zJv~K$WcVMgaQKC1c^H+pC9o>(FhrR`-79;2x7A)+TZ@qmxws-u(0wUEITRSky=ilj zK&T}dHZu4wOLrbId-=hCxXUK|_KJAYg)T9pzV*EpV(DdZg*Wt~F2CL!?~j@VP~PQ9 zNJvPP@DhY#3KO*Mrg-!mHc3gv+xGVM$F8m|&0+YNnw(L;MDf3U%hM3BY$q^j^fj8! z%^q}cyqhH9mX@AgUbCNrFfXsBi@D(;bsrgtySBEr z*(W@WW@eVn6N*&DGxe$qU74un8?SaM)j!KLVcvh`)F_*M>)CMks~eBB?FB-gKDC&t z_sTbI5Hq=V&lymS#jg>Mk_*kxPl7_4IZ94@UGb7aNGK$^5mtJ6oJUrPs=LSbN_VJDxyaRcB*qevF} z-OJP1)Gok9w~U;~wWgoP%?7a|f!O zrX{MG1S~@D+uQR15!Ezu2hhR@EVF3Cvo>sQNK`Id*@<`wmPmmxMX za`3m4yE`nS-tpQM4i0?1B7Mr2Hy&kaR|BUEZfIyAfj-Q0x6(ElB{Np-WEM2e#-$>a znyESd>B$Ni-2Ek2SJza4a(G-3*Top)hOsfj7zS}Fs3)k5e9g=YzJ7jNPC>Cvl#gJ_^B^nW&sa`g?gN@5dyLVfo7mjEkA!E4B74X@RurwiQ(TdkC|zDI096_;<##j#ER@R)2-ob~ zTrfU0?+gflVEAn=gZDAe)r%xYq1Es|S=JmIAD3fK%rtHAHmr7hqxxp;uMHKiNxrXB z!121U^~TW5myqNZ)r$6vrPr1T4Ga!ataq#IW#5Ro0j$ixwbv)AsmJS|0T#k46PTNu zKb&rq1WXSB%*RB{W*@4 zK8pwq#VR)Xa1q|2ZJa@oKA~lMOxK4u%@uB|`tX2RKvug2?d0B{VCot=ZA0DgXQ zBc-P=t~vz{gDg7qUxZcxT^Xn$zC=VX11jwPqer-)pNET$$f4EvHJXM`F1t+rfZJdr z!+lE3^qwaXG@sZLPGAcq5>GoiI_3eWK?I_D`}&gI{QUd?@!xHGja1#vdj3wAFzl1_ z{Kc2TPDHRX)L)B`f=)3kx4?#SO1|WP0(g@F6^h(80j(ewLIpvXmfMZ;O8B49=G58>Q&rn-@pH48En>BXhbbg2ZNP% z2B<(2UL|;Gq-12lQ2zJ9Mesa5x=BPY61X~Anr-{NuzTqzuw>IF|7UznCuCp@0EuG# zmcn7j%trGy%0M5Y;xmn*x@J5xl&i+&X4c>x2Ed*TRC*a2-Pzf(Qw)v31<>7y z973T*MQK4Q)_80%Hl7{X>Ap9ig$?y3LpFqjj;_evrPh7j82p6$2b;67W0uD&x#5-x z0|EjFn55`I&0IqH?hGnzObWwJL}VYJ=`84q&!1&LMc~PY6NP1GXOEO-f_^;a>9P_8 zV8cKO3JM}`AK`z}r#s~8>gp&f_8Xul5hDT-xgboxqA%6LuOECsYMwO*la*Iz&Hl;)Ghd!L@*LiK3UQ7x{lM1r`UgR4Gv zbNgPi4xYv4+K=lufDf(<5^^WdWpDKRb(O-)&UBcl zLIBiz_jeeyQ;VS-6}Xfl&`-R{2T&PE5@}Op@L^pR<+XG~cPG`ARB%-S7g$e!Mv&`_XQc2yXjJ4DI#!*RNxsWoAG6_O1rh zK#;~Kjn^VU@X_YKIukf`-Vs0@g#P(i$I8NT0dV#b%B0Sn5G;hNAWabBW@Tk{A2>}} zWu@MuRplgu6XnDO`zdtW(E}7Lw0|{>DN>ON$Gr z++6`A2@kGFYW(qGCePpfeQFdAFg+bR`Akh5U;xnCZSQr0IwrGsa0p|Tf101f$|RrZ zQujv6&)8_Rt4jriMhu7UikcdcFH=*dKwugx)_}B7qHsFmm<(Pt_@5f*o77g!tgZ2M zF1W~&U@>!YDx=UmhGpo{)4r%u-^1-@a0im(HYRIz;A%j0HYS^jGc{{}f%N=d(9DGf#g6DSYUJD zZf$LG9a}m&@=EyZsV^K(-=wge`5eSm>ftZN`u_cUxiCD+(9u#e?w(kB(X3sc)ZNR! z>IvyZG->Hj=G(uzY$w>STq#J#zIYLjB|+J`<~Ar?ge&DI;Wkffgre(xIFk|+69pM% z2hNUDckdP4tE=i3SVU+)3TO2Fy`EjS$mnQt6b@*|ua&Vvcz=%{*xDAlt5aafj(xC+ zL&@L}5q))=DDIyI2kARw&k-X?VPwUKnWQKX8DtIyS@ZjQb)jMS)Of)6$yzPS%Y617 zOZLR8`@QcNy=_`^&VBa|4@jrn{-2-8)hwd z&1u(dk`};A>e(+V@$l>a5X)dBuYfH}0+fU$qRryV=%|6wi@)M;6~C2TK?Q?udAu z4wsk=jf~8}wW$5$^a2=2(TQBzF83brzRGcF!082{9}72cbisQn>aqm^XWXXs8oY>- zJ?=f5w|-B7>Tqs2O;i!$g4u)(Z4&ochb3feH*wo zf<3?o8Y(<6P{vjExCRCW;78l-N&vGp1U{^4^fhvV^;V4L1n;p8n}7gmS6A1;a&{aV z=K|u}swQ(Wv$J0W>}&yNNB8Lr>i0CR!=q!rUO_Nmk|Vo*n02h}R`htUZy$5J-w8YP z1FbM}&{L+)t|N8!-T~uRZ37S(Fja@yfBM6lL;MP`$Hkuj1?o5a+X93U^CE%fl(3*+ za9&>CNJsPJ_}aI;xNU#Leg;mXq@-jWCx%)$$KN&)ZCZkAtMNS)1k*L&h29;6F~?iq zta1b8yNC-XIpyf+$S*8x(0(nYr`%~;3P4kF!3S~Dz(KjSQ2W8#-s8_?9{qicWzF&;PX*hXb5L(a@0QlgCE7&8yXsk`}}h zo~U+Wm6W_^WD3pVLWfa`pDXkxJ>$Kc>&uo*Yg-NEx+Z zTlFf^D|%PuP-*-9|0XaJAFpn%!6{Fe$gFO8zD%DC^5hT0m1cnF2xHd@Hb5wyel;)K9-ef(rsIqn`6iv&pW!qdKyi4Or8n-nW!d?jEuDHpmX?jLV%f8 zs--_EARwR&wd6Tj1aC*xVh}8C#C0_D-NJ@3Pz;d1Y~b{W8{RGO9vxuzBzN6LqO{+2 zufKwKnvTVf>t>{)`9zsHCTRXpsO9_3p}1w`79(Twhb2Rf#uaW7opAZ>3S{ z^woQfOJSAL*w2muBoTXHZ|z6O%*=iJsd`$#Qh~jV=~CbKFq%O`Oz}Hp+PcT z!YfNZ{qObk(IwGi57`MYVP0w{8Y050wG}HfGt)K>3?w>>a>K(*`9PDs&djGkHx%j? z1OuxnptlF!U|JKjGzU;ef+f393(XsIX8kwSojZ2`0Y~O#7S8}tsa952jBkG)2}i~y z7#J8{Bd%a6-VMslafW>wh=GZS_!uT>MyO~x@MoBDE`Ss*Ui0!yfe$D=jmlp+Z2}vd z0xzr^*Zu4M`Ms#Q?rNj{qDNt@4*HTqZQy=9Ui!*Ve7BmYv9Ymx`*jOPg?~x^s!06X zQKgBE?DvTHMkplAdaxV)<<0%4Cx8BQJ~?o%?4JdP0es=@ul;q|*{s0CZkitGPACdi zc)#7*3X#|6)A@?exwv+1IIp<>p5xow+}zv?&hNk_xy*ObXyCNV*kK(goo(EsC?y`M zY(N2Ijzz^|7+CANWC5NotVP*&2i7Qok4^e?aE(ReGR7s&_HJ=me)b#>z4x}i`Yu{+=fbuCoY z)my<9m|a^deqBA-;Nu|?a7M58f=A0IeeNt-R2bf;$m1EA^lE^P-{QI@h~k6gG%UN1 zfy#hw7O%{PgM*{#(}ykl8M?e~H^>Ry7gp+OYArxRVTwUWOiYYQ+X#TUgOJlyamCNM zwrIpc1P^JE)@B4((cIK%{L2>%US3|qCjW-+(mAC#Mr1|t*1c{F?le$hVKki`6{$*s zMF043@Pe$YEZDZ1UOdNDhhBaFK)-G0S|j;y-fW{b^9=%Hy|SYPY?3|UJP;APFh?64 z9OUXTmxjke%n7jjbiL^jBgNad&SN7Mx?&MeDJ>{N(Eju?tFA`gBj8NlwrHdOsTi`iT*|)yC1v5|y<=Tt0P}(N zrsQ#WH^`vSeQhjrTe8k=^~Jbv8o)rI!&^6(H-4S*LT@K9FA-&c>4zP42*y|q5UuXQ zN1!w`eav)pbjqP#z&E`b@DboKFfNW59PkW5lPxecWaQ+6yO)gql?Up1gWLj$V`uDhd(i+B!OVBTR^Og3L@{J6gcZ84RJ%K(`TH&STa{57weQwDNL? z385$Jxkpv?Es1)LY9B4(`GTF)&OCKk;1HBO)S76?S55 zZfViVzD01ma zVJ~)V#az($4<~B`Kn5c%EejU+V70UqVli~WsX0^Lc_z0c!n^nc)8EOKFWl-9*vZ*2&1%r z4Cn<~M3#?NITRJAyoIiqG-vv>FyHFhYRbeq5NgB}2caJZD}V#Z1#tf&k)*&ZY!=(~ z2G1?IgQL|_Y#^v#Iy;rYBM#;V2%Uu;9*nl)6&3aPWAX(wGgvrsr$@VC06}&pvBY6z zllFVo+;6~6hlv6X$SiWGeXIgquHiyGwRyb~fVEIYNpBo5#)8$^$t5HuZ}K*zCMB_{ zzY)_z#P$7$50U5zE&?FDeaG*M#%(_YcwG~I^)~kuucgkSEBd{`6ttXr+dV(T;0%4x z_YxPZ#$eFHnnn88%)v&Gd-yQl_!Z3X-XbXiS@uL+=;cTi52v$=i5b&Kc$VF_w!VTm z2OxlLdw3Sz+U8)(;(?=0Ng6G;6@=ZVGZ$S*~O}0QY|?6gnk7y#65? zC3mlTUOu3ukc?Ul2?cHG2g;3m<5wo__#j5w1dVpr&(h?Sne#4OVFMlnt{i>oN^s}icgy>d2jL9+1dZi9hw3Hn7O!A zbHBnRLczbmfecGNA1&@3etU94jE_$a=_6#-z+1*c1lD%< ztZ2%s@52v9woG-3q29Z%kMMZ;^@H=vdBb5`mtRPT3Y^xkJ8#5rk=AU_v?+VnZj-n6n)#c*e%(>rOIy;SqDEl9)!(%tLL9 zX$)M81X^Vd$%WnL2?}8==tpn(V8QE)fJR?jivV>e8i4lR!HG0K-AiB1sj$aHX9o$lYA#cyD9#;DJ7mnm#$H z09@0;03?K`r{{g>U=fj#K?Ma@0W1QU{0}g|VH@@ERM>b1d}5(CwHdgd9=ka?DJA9C za>%TCYk*b4Y4EY?vil1GDc zY&luujD(V4(i;ILX(n(I&%L#@_M>m_ZX-ml4E)SVm{6Gc?NTD4GKl3A3>V$4<^^*u zxtg5g4n@bGf9bzuV-_&?{3L5(U=K5s&?{;$P$&vKY%cxc2tZCt#1-3Rpr_||`>l(V znb8PvPB3CDVM>J(^x*-r6a))T-6Qqya-{b&N;&QTnS{>E55sTkG((=JP$tN%!ub`1 z6ky_!EC}=EO2$Z_bIm{`VO$372c#UDORq4=jt2ln8RYMj0Vjp$;r119w22dxb>n}6 zsAT~lMAq5zn!mk|$BIG&a z!jh9IVFU!cO|Ki~0_HY0dCqjeve(ASF=4vlxiyCkFNWW1+e)`U`^xB2IZUJ=sRW*0 zgUKp@2mG(n1%^Nn^FX!@QOUa@8K;obn3$NE+1Vh#BYu_#)2jN zS^E4W1g!ieV_{}y4DdLBPV;>ngTz-vgmSW}4Jil>q9Xm0W^e%?!xRabk3fHg#>0>L z491zY%RbOMgsD!#=oHLlC2*=C0m1J)r(pdCa*y+%{2`OrQJ|BDkPUExcR=lo?Z+XX zkpFRDLX#7<{IgCN-YsHkfF!P|t78=rF%-=dXJL`E#tutLqELrH{Qkytcv>1Qq!EB{ zA7Db;?G}q;lD-8YAsC0?gG$ae`tSgz!UUiiqkZ;~>qn>(Hkfu~N?S&fl3oJj=A3@z!+3s5G$Bg<>U~bkm>d$?2ki9xzJCK) z3x1y#3u0bXxMk#KO=4}w`nQqiZk?s69x+jncijC6LtUCryw`!gf*l+|HYKaOq4E zdTG`^CV8|)MvaH5Uvn8Ah9_m@a6um`Vu}cNsH1d|O#T_iad%>eFvnVVVsA0IZ*t;k zv01;~MiJY^w#`*fcc@>KnmAhM@=FbSMH!>IUT>6w#n!|xETg78>T9wjg43?n&D%nv zvc`C!F}mn$H=G2|>bA?ZLeCv}sJ(9`VBu zO@8&?0vHR6j#PAx@OgKyVQhJlNr%Wrz~3kM?Yx!Q=`XXRy>n5Gy_-_(A<7npBF84p z5v^o2d)oom36!wDfBPle^4R-mWjDnoL3wA-Ai$8qOpsM0ATcASQN>hSmpsXY?B!Zl z^me)ZZrE!TEenbGm+?00QPDok?#0*%1=fR2DENHchpC1Xm+r zD|*caR5+d=IRwqBedi!bsSGjKXwnv~GGimdxOgi~cs^@AZO~oh<;n8J&bV`2=gM0o zb(K%Y`Yu`6<*~%XAn=39;bf;dMqYvyMqi-wY6;3*?3kwG1vMtGTOpHI05PhwH_(OX z^EYv>HkRYx$X{>myTr2O?b{Tw-0-DqLiZ)?bsBhJ`=Yz#_cU(F=9tsOqA(^_hh?PG zc_{G{44VZ`yfeN(zA?=N5JDBT6qiwR`9jp7txrrm~ih9#y z#{1CEUoKmEz|C=9Vy9|jE4HBAsXU?M*5PiE5LGc^bg~GZy|q89RUXBd&C10%PNYPB zS1cc5LTY_MZ4P(CRmmb@3Dj^?3~SLa&q0?NjK!V_lW}H4N72c z!B9LLG-1oPZ#tGgcXpm&UA$-k@eI%HU${t&8nQsh5E|Q2CoxL`r&ml2|4iwGnN$T$ z{p+DY34{0db!@e?D?HXOIJvotP6B8j(MBk~{&+hl5{*N`vLWZZi~eOGkCv#(G0uFQ zE!ah3u-{w%7)`1rU}+(n8~Gm+*D^XvjgV%bUJ%oXfb=N(#tjM~q2JtBff3=tbn`QK zREQ>916v)!@h#9JHJkiTFH{z&Xfa{6Vp!l9n^4MNk@8qk+2V%3V^heT6!|bYJdo5h zrd`0?+9uStdMB2p&r^Ob%sTTGvzkvp^d~(1|T- z%GUHb`WH2J)TpvWjvrs-QR*Z;9{-lb(b$l2 zULv6I_=>u_^!Vysz(cvL>ee~XP9SZ{D)sKQwL!uhrrD4(f+<7U{Vx{~#}mR{ipt73 zKsPVenwZWAVIXlwu+hc-4Aw2EbD=oTccte_U6@c-7X`=Ceth}b%RAiSC3 zH}s>0bVJa5=%({uGQv89K23@dFHZdZhQ z1kZqf>gAWV^zW>%ewXm&_8GF^y&2Bd!PgH@VM&%Exspx%cI*>bD--cWDaPW)r1e$m z__yO14hyaYX6m|Ky^FuT_z;jGS>IcY2Qr)J1Z5Hih7xT?1_nZL@xG=ZI4Fdqtdvz$ zzJP525!OI3HM3#niS0;^%F(`>GnBcNQql#vjl^n6Ny%~$ zWvDdr5^f;;rA=qXMUf+5&F9~%6GCz>VDD3@zCv*ruiSmgg6VwK8Jn>?IVnA9Khf%2 z?E{;WpMRZbRBGt2T9aWhyYk+Gg`G{&<3{Roh^+r={*A$7mk(f1w}Secfdmc+usN7d zv;&tmx3j}HGcywr6JtSww~s-pfT4*6iFpL=kf1T-J7>V>2a|NT@V3JydedJmG4GMO zT*ZFIiRW)6POLDOSE^LW`~@p)1O?Jqf|x(Vjq`?-BYZ9%R<@)y&{k`pR-CAPI!(Gq z#3!_?sE+WHB2?+qbr5tBrGhlrEl5^D*b>A%wugrY$Xjl5Aa*zrZBBe7DbJq=gIt@1 zkRku|>+)cvflC;`nTQ8r2I98~tvEvOgvtcIOR)Qo3>s)KVNW-`v^!W7i*O-9`# zfBUqnN@|m?K}B(SH*LGCw3<;%nbcl8oXGUPWD?n!^}65O(l%t{rEfSXp;AW7$C&i& zch6M;8$!*3PWrfXnxXN=MX%Y_8+BLRRQURb#QHchG(z9Nm7g)IJT)z%zu^P)b>*#?>fSpdMPHd+ z4>9R}kv}{m3XrPKb7Mv}FtFF!Rd{+VMzmvT=0WdmRA5>H?X8=a!<=uoFNlSQaB?uS z5=QwfW!7BIGU*l)PjHo7;L>3Ue+k!F#%y-QG)vxyCpN7KT!D-2IVqBN2Lk8WfsTWnj8Qy3uJ*($@IE} zMiew>8k=k1SAur-`jF(X{*Hu}cIiiYM!dZ9h6KqzL0MT)jWKIP>vNV#%&ehEL~rt! z4+LJq;L882@zru~>&F=7CpDvyN__)$I{`w%HM(U1!J~MbNN3GSkqd|4NjM$p2g{Ni zKG>`MFfMZ7YodtRX|lOcuH}S(ZTg@Mm|3giDygKzboqO(J+T~4 zQx?!5*7yTF<=fr4*rb{xL_3M#Oj z@^VXQ7}L0(T?bZ3R80KK(3pULAPKA6>27VeeOfXU10v{cH!t`ph|T@arimJd4sfvb zE6&i(nNnL24gxdoF(PX$Rdh((%EHT_8m8gDL@*Svl6QLX;w=)Hx=_zHL4lZe;K zTA2~DM4As`tLy~L{?{edbmb`g#dgVGoK_R4tfb9P-E%MzC~3R#r1U2PS*&ZG92@5^ z5zn3;{aP2cdPgGqJ%q%WK{?hSVDd9g7M@$h6}tB%KlDJ)Xy%lYHBWrOxE)I7T9NTVJ7U>_gX5-E9{6BC}WV|%V ze~$-zBXlVatzZ(uGIu#Nd^97XhKQ|6-)UI$qE=h4mApw8B@2f^#9E=t1rbu@bMXIt z4%*Kv`2;WeD{pi22Yk|3Wca9j<0`IqH(mwzulf|)R;%2~}5B&51VEw}vF#!aS(d=zPTHto_YeUoW0au9_I7bHqO7*rtmOLLK2 zo*XbF=Z~wc{NsL4Veq<0|L|U%7MJ5LE z1>GUGJPymo)r+b3)9AtKD+g~w^O~Jl&s+u?E45jpG12TpI7!d zdHDuHyqL~y_K7XrwK7(Ekv#(6eEu5(3be0lm;dP&!%D|d@U#~}WxRG4jd75m%9d1W zk$aFijf&isjkl4B_;jhQ#jZP4-qV-7Q(33f-XZ}G8(c~0wQA`i%yNlU7s_v)|DO_6 z{q)33OvrR1qPAmc?oUr03WZ;Q-MDphaa!n%Y9Xd)MN}>;nDo~QC>YtT%|e$eT(eyc zeO*rra$P97wqAu|S~H~DYexQ8Q)u85q@X=GZl6wd^(As_pTt6?(x#c~W^2)mTG899 zJM7RiC;SPGxdB|-ik;S|B|km%!uFHh?gtCQiG*t6>~uN8|AQt2$G4a{%hpGPdZmQu z46;g5s2IYSExndf{@HBYGCs@FC7ejSl=yVK*+EUjPK>qoT1WJZ0!Q$a*X6mp#8JCgnkZY)dqLQ*k*ybVj4+diim+$h*r%<@Cd?IOyk{9JAsq<(|KBOd>!U}YTUud03MM!PBuLMFUJ9Rxux{QS-Cs)SPceM}ubZW<5VG0u~ z%-dS6PPk|4!LDPCjFqF_iR`yaVMs@zS^nDvL#h}U-6O|e;S57F96*C3*mQ6O3mY35DK*1v5E-OLD6oJ}En`#)^J5Xu&5Z5FpdeF^#QZVHPJJvGx%gK9 zl;XcSX%SyZ`*@Gt;D0q!k9^%;HWu8IxC6{URTlwJ^9M{@9qJotM|dU55iwRIdIiMJ z$w%FS%r~!I|Iz7=?acg}k%SIg(Vl|BfT-Rfqu8&?&c0qnsJBMs0%-!f_j)w;l2?e=J2o~-N1trV@56?`>by~^t9`6s z!N72$)@7WlktB>i16k&dCahSGq%u}CtVBC{t>*h~3L#Y@SlN5-N;UMozrgC>egmBX zhfa_aLJ(iig6T{5Hk^fEfzctvR&*yGz?g)UWwn(4y5;q}YZ>>Gl#1fJ>yD`-zL&ua4Cr{V zBctLV&_*+?CjQ#M0oSHb;X5l&h6LM4JLpe2M;{S*} zC}B9c_4vt?R>Zl7;j{H&Ya}@`@O&-lqd)JCoY6nnnul;0M2t4S8<-*4=I)MjIOfI- zlLW}lD!}X$#{Qd-tutxzXMjNa;27kkxgY_PygxrbuLWni8L^f_+(my2F4DgZDG{#b z>Jb}Rj(F6UhcFgK*6b59EDQkhv`G0_EEWkNdc50zaVrE!=+HkkT82t;ZYjrxv&^+m zbj^~EqFW_~4GpZ;lSB<#R{Ho0quiJUc@xugi|Mh^OK+!puQ(Hi^I5xSwMJbpM@*V7 zpJYQg*NKG$A)u6IVOj^tEps@GLL+2Pb8>Rye2D7*G|*A{7V3loR#fnK{s&>NqyTlm zy>m6M3r8x+8Z)EK#C>mGu8N9v^A_zha|hx0Z3#t2Xguq869*6m_NmafeN(HakjnN& zX=KZ6c=^tWHq1wuo^_mPEK+wk`aVX8KI6RrAK9kxQFOg$PHjb{>tdI^w<>Q*WMGAR z{8y=Vb1lW38gg-VD&3d$2J)H6Tnjm&c6L%7Z~|$dK!_!?!n=on;JhoOS#cn13#B!D zAmYOl5h%|02fe9)KC9Wj^h<7W`}MUq#?3&Sq~4zh@bT-bW68?s$_-ECFyFmE%1B2f zyKcpqXvp^TL+%GbXH9le{hQ`?wnf`#)_vuvQ!Zle(?6c!Cs3nM7*iOVPAg_S$A_8Z zWTp+bg{Ii(Jr5O*FA`V>1~H>wpjdFxR5oX3JWn}~{Srhb&h=Jhd9+HnshUbywM#A~ zV6jwsi&p#9H7N6FIbAPO&-wIh`Hh3BODp%+x;|lRmP$VoJAcLm!^1;r3YVNa4)4cx zGP5@S^A46BYS~y>Sz+Fc22@Bhgj4u%@^}VZ4{&U>wVxYrqZ6$Bw_JPT;IJbJ71dLS zK}n1nB8GwXBaFS^We6Mj5*Dx5Q=MHq*=mVoD%;yOruXxtP&yZhVyArh8R3=sc0E|x z@i|sGGeBR{x+Q#oY^Bn)y@V|CC@ZpYzXYr0qp#2)zRE&Ha(UJ_ zJVbyBt-O*YUaWKE}{1aTKGri91J)yk;Va0;7ncDA*^WvBCU+HnIp5s*tWg=g%*WQ$fi~rE~dJ8Xj zTY%Z7!KAvc@RfmN?IJ#Uk&P2|ShsY!044)|_gJ)t?Ea;W;oQRK>1ie{EiE`&tn~OX zKb(%iK!LBsB}oB*9HPwD4C!(Nj*wX){0tE|q8MB86*&(Kr{C&5w=O`K)&qG8Y1QyRidox9z zKV=Z@U||YfQ+XcGTHdYyuEqmnMRxkv&y296%8wD8PX946w&E3jTaa09_N1AW{hB1J zzvDxQ%D2|T*LkI($dn?*PZBUjokf9#k${_EoA~bUw`0wsdRwwgyA64BJRjPN)eDZw zsv6Cu4gqb#rpB**wSwy3ND$<+Q=3p0<~O{t-1^fR>&g>>_w=9iBU$)ky{*P0R)^D+ zeo9$>wHy^#Xk9dSyf{7YTHSBXDKP95DAxVkud~t0ZiN3`aj0tIS_AP9@`$W8KTi9e zuY4_?4Xn)9<7c;1QoOA9SaO_ezg6tgp7 zd8fOYK@i+-zrX+G%8`|r&xZV0tU2?w76~7wihi$7RkW)1hP*7$J|VjCna`(~n=U)e zk<1)Y2kD%CbIH}ac_QThI@o`Vnuw1n@h<`?&r3d^==>#pLqmvq%sC$z$#lDwKhB(A z;Ktxx`>vHPa9`fH&#GU5Li;AOq|+6%OnNB6%GBIZ!W%h9nMjDeRQ>2pz_NB z`@BHbAP(?n%C5f;cBLMEdT3(WK&;oTuT{4w4{EMVsN8?Uhv`ZApKURMaZ#M77V;;; zC(<=PKCOteW$jZTilocKV)oCUe_;kBAP1*J;ef;0mwTAOmoTz|)%uj%Hc|pQh&cje z{;7(3XD>umlzua986MD`cO!ei@CLgUJz8T+x>>c)Y;(bRsqGEC*Bke4yuYy{MUjNY zK`Prl?2nw}gtv(B8hs4gts|o7<%U*-ALHGi_zmYK4eQ*mot^yEfb($~kY+}XNyNgpp_6#9BR^A%QXotL-2OpdrFyM5ptr#nEFRNlodAq`DR()=%ZnfgM=@T zI{>O34(B$^mj^OwZagG`u`%BDuNG|~D6QEpO{@EVcl-Wn7Rjbu9w+?*TCn8Wqt5kb zG!K|XQbo~H*BEgh;nr3qH|#SXH|h20E%5zTyuaF4vRs?X(aIulhfox|6wmaCC~~=> z1$p)-stDsN^E-A&m^6ebk;6C0A$bKQB}+*5K!m}=yxrK|lLXD>izgf8P+eudSN@}h z%mT{p==Sn`S?QzGZ^Qwn=j1ngMq6|F_`Zw{7)~>jHW(lxStW=p8l(ni zxSUF^_QBE79CB(E5_%{U_}=Y6Q%D&Y?k!_bUxF7M6^q6>*#FwJylp^@LxQG?6j+)U zh0T{~%HE{#bNV+67c-tEU0hqR5dIDW*`Uuq!4UDcE~u!8`?9cO44A_A;p~p*&N8Y0 z!R#e#Z44Bo)Zk||sApbORbaXZKPrP;%K$AC2o^0o5J<~UBY&dCM=H~FllVbRn)lh* zfx_(PX&v0m_Krou7KOeXRB%vA?tevQ;V_2`ips+-YPsA+YQ%l7>%6|*>8Cj_9pO#2&zW0@|M#%{n%?gk)#lOPI za>U<-UEBWaOKC3C1MmJ&llaxEg5A-z!}NCIrP!$A`Wmt?`@y?NC@A^H;h*xds;YJf zRKw2!D%AF0AHNB4$ogvXst@y}_zDUypB+xq@Q7985Lwed_G@)Zy2)F#5g(y?17Xh3 zHyESgj5`f&fyVnAH*P3EL?{yre%{p;oeT|hVMp}$SI1*#$D1_6~S_YCs@MOyG&3Yj5yO`_HbC45mpsxG=NZrls6bRNO63r5@NCxbfE*Y#^}$%rNGEF2Pr*J&P+WW!g_sav8>TuX zuLphN<;_aF4=*1qWBbT1WnU$LKFZ=tvb{d?DhaGI(9f1q3}FBO**ZN1b|3r(5Ay#0 zq`#?fW%K(JHV*b--K%PGOL#B^YnZ$)SHUcgU}KUd}rD^;u`bAeJHYe#T|N%m%Ly zLm3E}W(YND*Ff)<9O7t3Jc5v)LQQ`#Re;7CYoACuQNEKBjC?j!-wZ2P21Z0gn9ZZ{ zY6E=)nwBp$*Leq^y0!*4zyD(@GuVH6cXwFhYX;Z(-Spt%1&@w#E9DE#glHD6>3Ir*ZBOktysMR#BYb*g_<*=mJVO#bM`9%{>3D z*<@xBm00aN;%as8=)E6gTWX5mprwEbIL^0RfB3JZz)Sbnlci0dN!Srqzx$Xl%&4rP zW*(c8ZTCtxQmDP71UI(ejEoHJXBin=i7fU^9E`yTZ=NK{c(588bc7AOXfcIGEb98G z*qPsvmR?>jjA!xKV*w_KjTG=zt-sveoO^;{Cy(#5jJ_WVVo7X!;&KM4Ggt4_9>WF& zR!~}B&xX4##ryx1VA}F3NdOXVfAmbdp0@j3#7+QJJs6^V>&8b1fByQl#pyZlHz=Jp zz0wK|sjmD)w?#fvd9viWUE6E-sYkuX?9CH%uo;AsgszUf>Vl3maO@S_E+7F4xqR7p zbL8dALAP(;u5_H6oBIh3dN4nW#Eb)Nk&%&_fTNfS zL6RQVc-s6`vec2X!~CnSp8{Q1{f!&ew)ROQ*&%65SwpFxi@g;f=T92Z)6(=f)w%7Y-b;pQi;6JYgRgiFCTX?5AUjqSin7L532YXpKs%u(%%&m~)q%daXr#J^ z#>>)D3L4>XzX@DuUH8~)(iA!Bx7(5Q%a$Gg+W7xkP_lA!Gf?_JJzyssTWHz01glQt z&_T^qvvhqHfQ_{CG26w%x8KQ7c^)y8-QOzY@}OZ^hUn9JH&g0J(HhO8%0gF5vr=Z9 ze*OoAA!?T!xS>Rp6a#~o0o4;q@CYHN9z?<+5O~4yA@@V``}d&Vf~{K>z4`QoaiUa0 zfs0dFU$}N!@Qqt!1%yS0r*Np3<9N;n@AOq=Wp8>0lsDZ_>o)4>byQzR#Vq+_!TUM@Wty6S|~0jXm~i?C2qSJM--Ln}^8@=+wu_gTM6b&gsg zWU+_JMnKI+$+?C#$B;UMCvYVsuf*2b2k9A3t_Q6k?7Bjz3;`;E^1w*In#8^ca()i_ zGvuh8z=(;I8s;W)vbR0x}dYGJvTNOjH2S1nwwtLXz*+Q{VSWHd1rpEviPa=P}fdfupU3^=~-i2tD_2ld)tHIaqQ zfPL&-9H+Ey=?aFAs`rn-$Qsg3)Uz+z59+~cdAH$5#y*3er3jaq_@xDAXG_|^QPMv* zH<#EWf%8|(Wnpd}05KjDf+^=^enrtwSyoo?v1%{2NXdY89uw4ztV>l`P`HT0pXI=` zDJa+hIONhkfyw;*u^2Ran@lp}C9O*-ojB@W;>`x4zF?pPAT=T85KwBcP#7TWVh2|w z0OdOq{!cKExxo7+jCrF|fb(6dESO!a|(%&JN#O*+`p+^0OjS;-~a=`xhffucFBlCw~<0!GRycAQB{^b@S?#ks+U*qqIlY-C4Ol zloezvf}kU}j34QH&kP}_VY;zyiWq=We8Gk$&TRgL&-eF2up)pCjjFC*hI}Vw*QQMj zxV0d-DUuWjiAW+|8FBCQm>kY9P*Q^Boj$mGV6$^!83cughc}zAsNYxI?nl%Ztz6+1 zq=o?G_;v-j?qi2f+FwFKpZq#jWa`(zrs+BU+CWS~>)AS$82FIC6v z?oaAW<&PiO*Sg~>_i^*B4>xUxGc1vRFW0;dBbjdK5s+FUb5W#p;Dv~u`#trJ!S|}b?w4}-w26@JV2IM+UZMmY`U-fJ9{Gwy#0JF@kq8o# z0aP2rXOAL%O;#6TvmnT7U)I+@cCv~#5NSUvMz%BJ@xz$*BF`IKtb#&98PiY4HGy%Q zkD6Oj?#8Ssn6W|C(4l(SGi`&Yihq@eTqkxpHs_V$%q9C-28cSIVI@4NmO$r0xs_}V zk@sb2KhAr2kV?H2c2m(U&>0fXxPAmPvliPNZmjwW^akPLL{wlEkZFB@iY=xP$NdzH zKgm&rIB$o$`8ipuljqEP^@z1K1+>#8%&AE_)0P|$3DhHfIHcJt4BL)Kt9Ckn)3T{d z1#%aLlh{0>I4o!juaHI5<#QH|Wls~b2tZ{xeUKw8>^*sgK#E^gRKQHzw7v6%NtlaK z8dhi`I9U!oxSw7smyl2mm+T?grRV7>6pcTpW%w%;>(3(55rhO>aUV>R2v&f!^rp;l zHoTzF5|fe>F230;B7Y zFq8gYdIecdn%WwSJ}3V0YRK>;qS{7*p;|X9u5{<4SFPSHh=<{pIV|%eJ%9XBzZdpP6UWl^jg2|Ege1@fx=ZR*p{?H(a^SR4 zhO?xhCd>SpKZ-D}#j7cli#}6&^*89kxtHI)tf&qrrMs5f-xKPC8@IYhd#E}Q-at^^ z0TDL{v-oGHOFs*7ZilJcYV}LFDaMI&-^2OOc?hUQ(X@9 zQ^CIiY|Zqyq6S5crGc0s5Ti{RgJ@qY7Zenff4Y{Re-)r85UB_F?P*{^#(-!!b69=lF@X)B;Zf6`?o@|9A&*)U_{iXdv z|0&(QtLmtrfLQU!s>CwoB&g|VwTk8S;T_r>J{$lI;Chh7VPF0VL}SvsLV~Q7Qm@K{ zSXbTri3jx#g+h2Psrapeo+`&sjuA~M#DI%>W zsRyB&1NxBpy%V{!sxVt8Vm`%}__D$N4Tn|`%qJh3WL?Iww#0Yr8$`G>c$PnZ|E8m$ z#fGGfokP^n*HAPNwhR6j+lP-e{vwW##5e#Cw+_aKuo0lgk)jC|Au*SLBp&WWBDM3; z@Z8SLJ@5=#R7xpSXLZoo6O%+3IpZuQ4r!;*gtZ_M{~uRQ927FUtFT}6;41(oz#4?1 zax4|_6Z5~qj`GiJ`yay}XlRlCvWkHL8-;)zXaEK4h^XP?5pY&T6e%KoH7qVZ*qCV% zj^EfjEpNXCEBHFfU*efhoZVm?7>+Ri^1DlY07h0sK;phk)62 zBKw_c|FaMb>~67H9-~=%TY;Ve{)GS*3DM9|N2`fm-<`5$(ZGLMK*ckKG~SIuG9$Ya1IsysA!|O7s-uXvsK)k?>s8p2smkfI&$q zDHTYx03R)dJAwI&^Q`bnsF`rzJv+FKSQ-)&2ul3UGfPNLjJVD?C;!dxF4MOoa&W-$ z_xC5|8dwi^5ds4_paqGlC*7z-`e3J#9z~tqJJ*D_ND#n3P-5$v2knKc4eXY%@CN3c znaF#%)#CixE3x)QlZ)OqG(@$&f$ZA5Se!NM?xUEGi9C4d5G{oWN`W{XnZ^bW0Jmau z-^BYkS!Ni#RDv3l3Oa-|M7L>_P+IZ^C;noZ+n#Bm zfMkd)P84mA$C?(P<|G)z&7Eh2?(e^I8Hgz%{lKes9mJUw3bCAk(S~7j2`qof_ynDJ zNT_`YKL@Ehb`i;9cK?|>i~4^vdnl}CtwD&U_~L3%-rE4CtVxK1t)MlMGlyAQB+aOd z1dxgX0aSyl2hn4zsg>`tqIc|hUe$$ipZKhT+D}p*TigRd%3#Q7*rtcT|I&7I?yK0^ zMhfM6yp{iz*~0y+HYEAK-ak_Loai`7?Tz9=3Yv>}`HA7g%Mb&cpBaB3EmvI+&y3c> zb9+e z+CY%jMXxbZ?CbA^Ht$}xY#9+W*CfQ^$*l*fiuh^JCMzTJ3eI>G%AdJk%=pyK?rvHN z!2#pqc%g%TtV@ant8&yd1oeQoKJh`8JY0Rne1J2cQIV7U7+%aptl31QT*O(h4sVaR zfiyI52t}po(LyCzGyL)49b0NStLg7{=@o5q+)Wzh7X#m_w%f3B@I#-MK)ZF*aDv)Q z{fqsKD>av?m@7`QzZ_QqwI5ALGN|c_B!0a73BA zIMEUPR}(w(47=C5zpXlR-|L^fzIinxMH%tPsCWJ04MG$|hA`-sALQS`P!qu0F0kPr z${E}^J@Ws%4B4BQ`6A_9q&iMWeXs$j8X5#q?Qeay4FDkHRu-&_7jsjuBhw6?F!EO@ zz5MKLHvtK9BF<5kJ-T^?p#w7XVndW~zsoZjHl1KTbGx@W|5oI>kT-BEMl`biUZ0|+ zV|eG+YIvaX)vR8#=7Cs~%aRk0ju9lO$1M_qf^nw#a!Sovoo91H_0N#$IypI&VzC-E z6QmKTZt}S~xi!4J$>`wK9D*gpSnGWtTzK6^U&IGQIK%Z}1xF?-9Qjj=O;lfrT|YWA zal*XNWlxI9HxyNi$0GPS6tbbVXwYB8p}dz(83A+B}V6@v}!_9MJ%eZ4qu_p zaKbr2%t%rqfUw?0Y)doA;zvFk{S=0{W%cz2a>D6=sBj{y0mLCQbcnrC3$bfN4}thE zl9LK)UJd`9)~gjb8HvXrjNQ}|OT^7Ka>CbG85}S2jmX~FiPIxi*-{lf*9{z>P>aIF zYQ-%bIuvX@+LTyWznpksx(Ay*`kai#hkKCU$yaD0b5T2++DrJP6zk6=w)_^euyKe5NJZF{xGm&qL5Y)Zr%o{E*7UUQY0D* zF)T*G6g*Nqy9#zH26a9g%_n zMTZ!niB`hXqxY=2!>Ds&J9o0YZE-2??mmT>ywYRk1~HkXUpM z3=n{*98y2RRYh!M0lb0*l|Lh4RZKNAiyd1)+CbbVh&$g!x|K_=+%iS;I{i_@7Np$p z?Z~kG51Bls5F}*H3koGA-GJDRZVOLGLoRn6E-_XE)UM{+o!>oddTs?WEwkgAGnR zUX^i3z$Y_q#Fe1USYa+}E`c~ki^6ap&u_uUhJH2P}j2(${IT(Zb z&z0Z6HwBKh7g2HA!ua$2sJ}%Ih8kTBTD4Lt-@W%{Pn4&o(Ac@d{D)Xrz0U*GB7i2D zWP$@q9r!@ZwT|$(_o3>pu|eWRZ=hNgyz$29`ot3H8`EK7VKqn6pCUgF1`wXEyZbo8 z6xr`EaX??28LxdY$uQ=u7T>;|gSg_7_UZKfCpW`pTqj1Qr#A$R@LSps46w5nNf2}$ zrcpgtQpjo*5oxv<%+BU;u+?wHaY>6jE}zK{ohAA^-oMuX)2d6^%Qe_m#EbO33b!cp zgdM#AIOz9eU}jDQiz1q;HCna-1Zlv(K)vpsd{Z)@00`Y+Z&v7kF|jZbZ_nHVQ=f&I z0STD=$kg^dFi2sD9h{*kDuV7G4%^{y)7i5DA1{x zY)%t?Q`>g{zZ_T`x>PJ|Y)N7VHmye1R(Jl(uR*wA`@?!<1-GO?ZJ#@Au`#`i#?#X? zB64qv0n*s?R(K4|OD!ai+8ZP;w@fXnykentm&N4}-R-oUTEe%kJ<82;-2tOCJ$XU{~{?m8b^^lTYy-H9DQ!_JB_D(mDf`I>Ko?&wVxfULhSKE8ZT&e#`hs$3utcU_3S6*(%mgZcmm>iCVBkZrD(d*SU#SAi9Ci4wwd-(!>mJ_7!fb$P z*5p63JnYOm)VPO^dPQENmNMa+l+cqH92y$!D5KrwRW0acD1Fe(tXEE5Fuakgx+VVv z7v(O>1_DWgpA#MVy2qIiG||7nNxxOOdIOWh6;0~6j{yUt*I3oYo#P!Wv4-~k-4`R( zUT&Y?6PYKMs<&x1-zxPDR{?(-D{>sn+b8zeVTX02RJ4NdYeE|@PUC39Kg5*vQT;te-E%~Szx66R2U2%sLdo)fTViC_S^u1po9G%^B99$UWNq&5jl3Tl|Iof z;737)zD4w=1m7Ud2()XN6qRuc{y&E5d5eDh_BX#$ zj%$VLiBKf!=t&`rcK>L9-Fk{LSd-wZptz>0eVo{e#=!XTURqt=_ zL^#XF4I*P*NeWP6riugkF4AC%FToX&(gpGhU^|OH(s`8q@S`ZDv^MoZWoQUH0g?L4 zqtTEB2cV1)`Vnu*YJe^jLOUd?FlPVzvzhV@g8^m37yl`~uUx;2;yYybPBYLWWI8KO z;_thlp>?w84bvQrluYVmm}~5m1q_x#`OA}wTt*>obfi;_!tCeIPBls?ssB)*sjkFz zq-C2{i?Mu?W1wV%)A@Pb6DtH8R&P_>nb@HxD*%zGp^hUy|MA(50MXHy=5+N!h8S3x z8paHP`}7z@D`624P6EJD)hj+#uO29TbP-@;;g*5yUps50q{%}sF;XccP0QPGbp55%()<)juSus@!j%L+Oq#sY7+QfA zK@^6Sks1`q;pZ3bNYP5sT9-Wbo{u!Ru}Z{hOz}$?b(Pa8pYk)qkA>Ds+SW2SMNx%> zzSY!fpsAP!%o*^DqK{yt0j5*vZJuR;`jcwt@phLyJ4n--2M-=JU+2CS z)GDk`Z{rxO-b5cg&|x#;XRFws64GLMw$zl zNK*6*_95fDrJ}iQEpD8>RH}y+unF9T-~@}RLF*@qBkQTn3hHTTyN@pDwH6u9$EIcO`W89vC#S- z#-s<`**y(u(Dx4EfZfau0P(0J%o*(PK-U)yA|$ZyzaftL33b60m*LhmFc&9{3qp8DGPk-r0ReJW z2k^>~s+diu&H14{vxe53JghY8#}l?ue~l?htEw~)qV@#xa1kxs6Ag=#P%w%D7{U?? z=^??KDrU!4F)>)X#hi$PZ3X&QCeIKzzQlg9b+787_?EK!?VJ>zOf19_y=}=Jjx0(| z{6=|Wqj5?r%p}kWaX)ooxK)m{!%#>CJ{CLid@gUzzol5yU&%{Qo=E#h>uoWk3ik)6 zDrG1T7gH>@JtJy~E;&cv@qnv>nT7{aU5WE2`ush_2s3cCAeI)x^*pO33fmM~eSCNi zFzYseKLX6%lAQ@>7uaAV=a0r|gb9(uw_C8Ix_~*p1f$icCilsk{c-=nL`f1OG|zLX z4HVzNW1L;~Q~HXaQy7l*Z_fdQNalJh(xBid0;C?;&TECIU(vutPf-fFZp;{tJ$%T% zly@-xk5}~EQ48Woi1Wup4f+)sN~@PFCHPC$0js}vrj9!!wHMHH8=&E&kR~HJU%jHj z1(lo9f%*of6={y>T=Kv!ei;^o$!4xl>eTO+(_~2CU~@hbW;9%cnJof7Qm-3bpW(JP zqrBBVm$Hv@-+7i<3i~RZ4~tJD3~pTiv+3BhJf?xz<~S^{V+bQ6qcy5Mo)%!5LPT=` zh6Nz=B>0S>HVo1c*)m3IB<34`X`~fZ14@-)Oj|yr7<1)iG#Zoe5yP-(z(=oN4?BOf z+m|5UjX#YT;ZQkudIjh0LrbAiE!f>9U&C_)&5?U4|KiTvX|~I~{2r&ZUC`tuRnOgv zs;tRGaypE}Dc?~xlrUMl-%6=MwyeEhfv<)hT0-zs9JqGHpL5}aEKg17YX4^Gq{(%} z+$_mU8li|3Nt~fMoj>SKKF{ED z7pzb750`Wag)m8X?e)6f7ox>mdo3Z^&zm;S#o^W$HK_ysYc!8ZJ`fOH8gG|KQ>Dhn z8npuo*E)Nn_2!A7uT!gPOcba5_Vn~4GK=0-P*NR4zJo0bg6&*vT2(-o|&_EZyf@fYYgGiMj??j`c; z_qP?|8kSUTjWDyXX2$tooOe2ZI>H^cGAdJxuy;P&soYz?=!+*0LSEJQ^^cuypP8*j z=6TxQx20KIs^2{P2PAt{!o&1 zs?dq99TX8Rs=PJ0hoEM?ekWpQ(k~u=!GqswaAwJ$fj>&CLPe?IOha;Yc-3(aFV15L z@n!k_Cy2!g$lyyS27e^x*mb`>oaE01MsVhsC3@&z`V0@xzxn2`CA&BY(dDqfj?zN$ z0lf?A-h^ps#ARYrQ03$uJkjQHYfgHb-}!B_U4z$hLv%xp()&y+S#gcVZ9Jc4eM1vp zE3{l^a*kln&mHd5F%Hr4jDL|qv(XH1Q)SM1hU-#`(yIGG_Y35^M2Q%65lpB(E zB}Cnp=&w$_X>}Uxgk@E)$t6c=Y@F}91|DqUyNd3b|Co5&z3u$iOsJXS-djB_2@IMN z=jA<;UhHvKr0P%$ts?{J=Fa({E1J_)!ZGDgYl6eRV7zC;rze>iBUi%W1sI69qvpze zklAJSWxM8|GuoGZ$~;jDWz9m_@4M8mV2S&^T4>a?XBez|NP)0da!~fSaN*GFS^D&u!)4oCCFADO`lw8 ze*Ia;+{a%x-E8?$oIf1NW)xG4@;g!2WEdn9Sj=FQzH!@?vhp^yBd4g#elePR+(?W5 z*9|e2Lq+j(`ZvTjJ6=7r>FYSvmpO7JZ)|+xK+Fk^xyf-Q;gpRJ8q$q3&5rJW6=$P+ z>dC2*o}~jxi&_mVd0Yn-bkM3Nhg!1df4fednyi?%=@W#=UO|k5h+eqe_2neG$m3o8 zM=K*C@Y9ln%Lz4ty7)6&I;URk71hvS(R5W*2u;K{3gT6xA>9SP2YHrUrn5dZ@XRAz zlVxc@5hHxr zpRa3eqqsK)$5Xo*Bf@*!?GD(US$OS^tIv(B?NT3O%J*WWLTW#iAFufqLH=# zQga*D9FR6}+B|j@R}*}OxuIFY&X5lGWSD8|=;&Z^cvj7Qaa#=%YgkKcRUC5 z)WMt&0VU!rtVlI(&+x=>8mpev7TYzb$kN5@FI8rj4cmcFo2+Ohu32@t*PqQFf`|{l zvJOwVM(a(#4f7@?l5+p`odCL)4ZU zjLmA(r&LS(-v%uq>TwQUh6233MugUi_IXO9`nF5FbEG5#u2HDwUdJ4n61lU3iPXR~~ z{qYr#uD%&jUUI(LF=ru${R9C<(n`9AX7lLbzwnx7Lub@KF;S3G`sK^Pj0E7k zm;n(c<4S7ue>34~(ln(~~1OQ6m{pbC+R>tF_* z%gk9yM+aWLs(U&nA9M~1BU5J8-#lumyBn|$VV)pE6pYd$a6RdHf)GYbk0}I8sF^_) z4?_T=TwTCiK4KAW_Z&EW8SuAgA|JnHzGz9@bFq!>zht(3@=7n;+W0XEQM5dPu#S+o zW3U);e|O}?JTcNDLAmp0JLdyK#s|>@f$ZeMIHKEvR54l&e%`dmXKSP}iwc0N%<+0^ z_YU+cD%iuI7On@ zo@*kn1v_@JKYzAw6`3x8a@-b|RU;4HQWY|JLg)nBIuU?BIO`ZrcsgeUU>qLb@x=d~ z<<>?Q96icnK5?^#kSw6BTn^T7XKybZl6~wLIxH1~MuLw5Y)AQKS&E6mXe$#c2wcW3 z250f(*v4idVCRmnrS4&0w+{a4OoTcHzApiOU{f1L1U~)eHmxQX!|KgVL=X6idjEgx zU;Z%#Ap>S#A0`GHcvWo&l>iCDssbZx6(J`lpaWLNk53}xS+IWB$O^4MPe$rg>rOJI zoAj&DLn31xq4v0$B^gP;Zh)1@@`IcTPw2>fYZ`$}%u%k+xX3MXSiaQR3l_ zF}07L7)KO4;x`=jL$UgVQr?$Z{}-HI%wytBzt1=h^c$3!L3^}lpU84x|E@)(dOK|ytjS) zJ$IfxdsgG816o2v$)_heO;wBrS$8MGA4d3BFYtfs%kTBnJI^@#zq_tYn{ZuC8Y-(N z+UK#vfYLxije7+gTVC6G$Xf%*{Pzbm+kMVj-5g0LsVVOg&@EVZR{NA{DKN6T= zG{fC)3gdPT417hK zF{DNp(eZR>IJ&2lLufp5#BX6#@q>*TJBN7D`<(x=QolvPfUPqq!BPA}A>Lra`t`JAbTxcG0Lao{ zv{O9Bxg)`8a39e0i^4Ey72X_1)vm{g#Sfs=47Qh8wcEsa%|| z4pqnx^Oh+BXhr%9_?En5dths;UrhuqctGf1(EO#1>ZRv`d^J~6G#eROxX?{mmvW^acOB(CZebX~G4nka_sPC6i$j)ZSuB+4Mj?gePGSY3f zI&>)f>Q%Pr8#j{k^Eot<5S@Onk(HG6#cL;QJ|>ONx!QM~mED48*LnGU{uMVF z(LVEIXlP|)%gs9f7}^Bhg+r3gcN-ke04j8Gap?d$=k)xmiu-BmwxGMmX_fhQdwouQ zcXCH~Qj*a0=kC!Ez+|HsBlN}*vloOmZq!7lT);H(y_s7atzdxt$&;%=@msEOGq>^U zU`cr=3l`+{S@4gARsma!K7WjM!=34tPPdmzAIXQ$$=+_i7-#7@w0)F}w_JYBzOj=d zT%ZuMwIFw73a=1)`tH^?dd~1=$YhsL7z9aIVAuTkN1(1nX86>FZ@%{7~Lzok6C*ARnye2tXV2Vf9feONVF8+l0OT)7<6a~BaXaRdu0yf zWHTgfG080L9)%93EmaX>nECm6lv(9|&COH#5T#iqfZvA3#zL~PTo|8&gnll43**%6t&+7q8;30goJ-GMl=iDG z17LXN+BMqbx{&yIJ~Dvn)~#C~S;gxvZ8sUBR5!8j-$j$vlY$Hy&76L4M~B+I^|)e+MvX?Af#Y6!(+!-0{JDx^~+ibZJ9-rg77m*Ba|^|{5+hv5**tSSUr+8uX<_oQWIO&)JSv398i ziiSejw?+ySp~9!M04|`ljR+g4hSXg-F&K)Rx z7;xAV2ggg{8#qsSa8(^8yy02s%7%NV}f5 zwtxK^fe5RT-XH{e6?B&UgM-ntvZq=@h{y|=HriyRw9LDFvDq(0M$%FeJbr8j6K*+h zdwJnK{=zUz{qe)WKHrS^Ss}stU@z1E;|CcJobBAB4ZvDRUY>`LwN9K!f01CBMNioU zw&WGzJ-dr1j5lR9g@=VPg8ZP8TvvKn8cR`Scw}%e01q@hUG3B<@t5W0HZnYCphr9s zisp6UGQ3=aciNnVw@h>G@$O|vm2xZr>te3vyTr_quh#4G!vPIbzD& za%^}!-CeVTxdj*pwM!FYqMKV<>OOxCktm*8($LV5mYGRM`P9%rX3lePaP$uhs99Ov znLA4;p+udD)SL{)1E;S1=TAv&5<G1)1$~4Jc=f&sp(BJQ;kKHA?}@sfq}s*Pz%3Fp*-XOop$Vu zwssgXB=PEPE4ffY5s2T{7wy?OdL9;R_ojN|PJi1$>4HpScX!R3H-UBb(UHgoD!^s+ zuH3aS)lGQAstkhi^72b5Ic_7C&;Il-JTuSSY=_-h$o>K-_d{0IR@X~!di?jLUfh>@ ztp$4e#0G~>=V#w`6j?^1uAbfrIUSC<>wN{TLp%xYy|?x>q3g&;1{jh5ye0^fa3Kyz zEGn|#zk%%o3frEho7#|N@_Uc#-`VlBCBnqYn}hBm4Pifm-3!(pw$>?yc3tp1`iF+p zU0t8-_rxEyZIqt}mGE(txv<+1Ls)s8j|(xhljIiIW1!6cAwl+ZLT!%l9y~dUFKUU+ z7hDe?UJ7DD1ytZ0#}%hKF8X9h#Xz|_eJeZ;i-B;H2ucGs8#YdLP0iS|3DGMHYiHk? z&V??1z~U3Y&>KIjNh2c;4Nc9B(~C_;p;6D<^{fGWh(blQ)lxpmsw-{ z@8=5(w;C~S>25~GO7wg{icPHDA}`-OGKtO-V`No#lPMNzJ6#+q+jbuGP(Ec(PledHI5K>hqHJR$2(4fg z!Whac3?QP#fSvVW;o+m;*38UXj(aG^#>Rg5_;FjJRzVEzPFHuOxi}YwK1BfCS#DyC zZy0n{n3(#BfniK+Y#6Td zTW@btUf$}+U1JP{GyMGPWkOuJD%^?0p%S8!NbpP8kZGXVnwF7Klza|-<{(g4Um)se z?cJ-UqT)AqNd<;8xWX&(@t3$P&(i?PJKqyyf=O;cKz>S^($dnDF<_0~XUgWzp)`?P z+%v`;bD*j&L#3;4Zhi?3)H_l!F)=%R=61TsujTg~%)xDli<3iDd+*+;Tem`kg6K$i zrMp-*Qh+w*5*4xzbT zYrS6A>2`E7@dR)#?yItsQ`mBVzBW4cTC7YO-u(Gm3iHL|)xlJ>*o@^5d3k#!A{QUe&lZ~|l z!K_JYYafS)jhq@gI?%MVw2JKHvuCHr&bG87_K)=#$z!ld=*2r;Nj-RymS$SKQ+I!X z@Qqd)rkk7F(w{#{?BrApL5hl~m!eLXSXfx1j{T_Cv!lBe3D?kQgDKByMiiN@I-|5F zegxjdhiKLh1-h^Mi%muDI5^Co9PS*<#R<6Yuiq}d-B{ChIvuos`!SuE#=9^5{Wt&k zSy^SX&W_f&&{Ol7WY_S=pu_i8MzBy%pFS-QIS&rppY*{eBKlb4G9D}Y{NtmGwx>sX zo4dP_>FM-L@_{BZHFt-LKZKm^FcULKlKyzB(_Q~yaq|;r7K@x5qlm*dzHJj&{snb* z{P65Gmgwl%l~2i>$nU>?&8D3xOCTd7lRv^N=O20P(FNf zlkoN5YInM|o*FHANc*f4^YZda%E;JGw*>i~98~m*+jc$vPd_GQga)IXpV zzeoF!g#IC?PUcK|6jA4xF*|t<+yyDNl8f-RI6Xam>M*&Nc7}8mjevz!Zwj}@0^E#> znHk*$MrniMgXrk!ULp!2)J- zp)j=TfEyD;pcu(b18!rHG*5%`cHEXms9H zExzc2UO^ENEL2`zo})n`M9;~|$zV&3*NPD>J^jb8Ur7>~AN+Dv%0pX zX~0ZIrfX_zql7lp*qCar@Ln^!)#OX&GG24_M?>E$DYzi~z~i-A$AP?na-@{bv$D%+%2S64TGI%HX&ogCJA{vn6s6QTS1IueAjE*d`?W)>E3P<7sGbpC%= z-*L6V4|jyq=DZ*Ux>zG0+57mCNV&_nxYi3n42WU_Q%+V$W_TBw_=Zy6`%PxI9Ly0x zrYdM^@%*EAZ@i|YzXKC>c07G<^JgI6r1B2I-?ec(2?+^^-BiiUlPSMN#=+9t^hHJ` zdK!F2C1yBmY;39gW^n$g;*p5t-di5}2)`rbxpY(1X#nlCutr3~&mt&D3g?*HquJEd zbhJ4@VhZw8wK@)JcWs;o@@SlfPhPF5nVHS%XvM+l{-icYwGR zzCuc7sC1E8o{~fR6<1wd9X`Zd_`7%S`dfkmW=AYTs30bAlt{5192|0t%PcgGF-YHN zXB6s9uTM7Opp+FA6~nq^gN~|DiU$kNEwPnIBO$7Zy1KfYYe|TRC^~E^1pRlHl>6of z-|1tbsuRCu$Q0d!O)$%WJqmAWX}MWuAve326+(SS*fggbYEvOTD|}vwo!EaBtgG%} zvi}O!04<~m;(?b^}+&+nk)s zvT|}2frs~d?o{5FvNw&5jcsr8+aYCOC|p4CF!Mk!v{#$jSemlP}3kg-U zCz#%{wzfW4dYw4)`!_C}UJC7 z%^8fA^Rob?f-^BO)%kAO)HC&c|4tT1BY=xyXJ@xM{=0Ut`o52pnK=;x_Ss5>7!?&& z)ZxzJ$Ieb13kwU4vJ&%J?V?Y*XIXcHAxBYBe}vZwLb`$Dj7v>Ttx+cDyNQyP9y?6< z-Jf%fR@PULot)r$JY5v*&Se=HdMIGsG?sy3jXoPTutHSiaatVLAi22j{Js&m)k#xc zS=qkPdY%B=$HK*hPeLck<#`U7$a8s+;P>y}CRL7PdiwfXP*UgMG$Tl5cW*DX#@L6X zq-TZ5@P=i`sn|Uim+!eML=1BNRc&kV>hINejoGhUnf)x#8kw2N6nu8*;JH3=>BEN) z*Jas&j-8W>2Xl)Lq*2bjIo5McBs=tlfMCSV4|!cX;D!a z5IE6IztewG$JcyveDZ9G5sw_Bcj;VVfUlT4Q znNz2?X??&S(2A9f4J%X$ljzu3JwG{7Q3I2fK)HF)tM${wyy$2QoI(}I@y^iY>^~x$!vA=>I&d<*iou;2c zawI}U@*0SpQx9slxPDHIXSiBO~KUN=hD=d97;(N>S2(w;*?Qd*L(B z#^U40#5n*gl*emaGz~eE#0~39Z+3dkcaiKaeaEu5w?__At?Q)wpMfi>YvdFZTTpU7 zKr8}eO-M+f6?3K}Cnratpsc+Rak$Xb+-&XYdcoG#)^l}~?5%bN&;A1h)IoXkSREDe z_4E7C)`nVNUmtz%kl0(3$gI#c^C3dF(c6Xg(xn{OS4q#MC%xWjX=yJc+?eRZ@8QGQ zAN~2$x-Ap~fT;dY$l%ol#$`+p(`L1<@kPdE&p!J-qtVybhvH*?w_8F`P;q6rB9zJ~N?&a0FJXBHAJq{ zFA*CTw|#Pw5&5m*;!A(mC(Z5W#k(F~!lIy{h>VItLRL^rtTUR74TW0#mhlm}^IF&2 zYV72aQc|<;`{eb<<+#|{v5|yST`fjTLh`*pmo0E_$Qbo2sI3+1F)FmEsAz6+XZMdj zYPs8la&hOyh|6Z5!g&W^(AlLWWVJk3h7mNxvpo@VFpCFJg_VT`*F9~Bq=2SH@GRirI&7yA(*uOs6;6-H@931R8T7DUU ziPgsdf*^%7-qTi$Z|a+Y*Yv}|hMj}+i=l9`88P7PciVGkio$O@xcjit>i3Y}%J5a> zq+h>&y}+_n9<~4nCHdetabQpo)yBt=v%|M&A(~yOR-X+f)Xc9>$xEXZY=C$n_lE_a z9w9A~h8N}Tp@9SwQpOM%-x73;^fs_)qAp{UstF9bKhL4bahz-rBPJ$Bsw#3=)}$aY zGeAoe3YlF?ORFDQ$is!_SB|78^Xf)20Bq-Ytqf~i-H4}`xDN467^v`DU0b^gc3$ZI zFEuDP+`X#tbU3he`KLZ`*?@r6lR3D1;X?$2PkaHW>2CStuzQg^=~u53dO?4ML_8dr z7ecnUySuyP_Fw5d{|u;je8>HDBZE=KyW{kDU&wVrvg+PXa)9`f?lUMT>Q;7kQNMoO zDYk57g5uHL5krB5E_59S$H()~vhYk^kL8toaKFh#93R&ky@T z0Ax7co2Vm$#;+@hO?4BX4<^dS*7kepZJE5cZ|9+Kh2`g8fe3yK3uiM@dfWH>bRPqy z_l^>FuPcUvQ}EuXpjNs#2{$*lWblb3q?HK&y=Cr%#-L+wK-)8~6XnCz*xN5EoP=Uv z;m*MN^#Az5yM3akr#G0Hm7i~ygS9vC<400vrp298n+jT_k-9JA^7)u8tM&K`gyq&( ziD|2zCzoFcT4T=5&u^`iwdO)AROd1OaG(b2jxyvqp*uaKko*`RAUHX6GKqtNf*#XL zTtGpo4}&^11K8@|U_Nnnc{vucG*aneVq$nCp3_UXqV4TDIBvYehRzjg1vA_Rl84}L zA`=*8Fi~R=piA{si*omu`Y$7^24&Zz$yW$cUKDc5KYYj;Rk-%fpm5tlSWqxDCWZjT zA|XKoxN)gJ2P^nwOBKQ!;|ZtepFe+I$odKMUA^kD&`Wi4v{D8woFcT(xuuX)b8Nd4 zU|l`}K%beL3okDhM4AtiI`_-C#I&K0E;7$0Dx9^!K}DKa)ONbPSM0Ihm-lx*kuNPR z&Fu6ksOssF2g7-NOe=)>T)>ilcX1m~=!dUgOM0gDt$Vk3ydfn+=;-B}zer5HI9z%= z4&D`V*|oKnq@*P1Zmt3HHn7;)+WIj2@sVLKl=(z~mIF-aqNk>&kSm!A*l1?$%g;k@ z9Z;+-oPAp1JNR%WqtZki;H|La{CBy$GLXP1xBe*TyJ=NyR!#WvAqmp|Z~l3kHK%ZX zcv1df28-`@17!p*iL$S+q}Gianx2nq6&9=Sj&v!UpF}Xp`L{tsw)6Yzwd>b$gHQL2 z5lRGdkAPQY$k~bC>d*Ih`fs$t-o4{CDmDoV!@`>bcoWNi4e1l%67e7%A^5YtzW#-b z4?p0vl;wtieSZJ_brKyN9i$<8qQ>sKyF_+&x^J1kHh3DO+dVi)Ngk7jUSv!DfV&Ycn{`-~|`KG2OcUVo8pK$j3%dM~F-?_XY8QOH5 zoSdv5v&=rg<06rkV&KuBytgrJ**`Ku)85`rX$L*;zXsu*KBvl0U}~^Y?a+i6EI;{l zC!7Tza6zdC#WvI)A_Et2}G z8CS3T>8?J2j`ayC6;ln3*qP;J9;6x3duISd!f3g51WM7Q=ALQTVmb|+wfxQP1=j&!0cP`Bs||GQK%n z4FHyUl;UWESCLKQ>gv550JPz73ZJ}sRqQDbc~2QY%)l1W3ZWR}r2H%&^(i(uYvHe7 zzkc%cX)c#c(2-|F$Rj_6h+5{?s)-2WPI?((xzWudyMPuJf54GVtw=IT$8+2;Kz3g}g@ zFi5&9A~$#w>amils;VRef5o@P?ERzh?WvH%`QF}Masaha5Uo7D+h_dEr)NFKjEk)g ze+|_`qEs?9>8eW*B$Wc)-JwH3efhwNg_Y=+`c7;i?!6Kv$2p-6tL~?y|O>RWT zKdFYPcs8mLW^AE4mDiXM75U`J6HMFB#{m!iu9~Kf;6fJ4rdm8-3-HID5A2Ickj z_;yIii7#HzB0bdR?8nWmEnT`_b~ZNQP)ZSWS_gF>fno4LAA5TVk%pc~c3KuF48x;` z!W-tRbKjzc9KJDh${$h;wujrC|nDm=!a}v%Vi0B%Yg5*3_14Z{bJO4{Ad&}zMeaeR@og>- z!l0~gMYAfwCmG^SLcw_C?&H&aemZ@Q9Bh>?Gja|N4qB=Ed2{uU=mG-+HAH2eG)O-p zp?~8$1sKT0#H8)R2c_@17ZkV72N+h?`uw18508idK7crXrTpz%&VK|XR5rzjhCWft*xyC6+344_BW;zD{Q-|C54og-g7jU08M&D zF2kmkS+gI-b zSXU;R{6vC}{%}S{MzU~ns_L#UFPkk((u+Ex6ck!nYl|#fTY5>fp(J7SH2y@Q>gRh0 zjgZINKqz6OkP|I>cbFK^BSKy385e~&1tq8YU>yRmXWhp^UA&lRlQBIGWPJ*Gdm;M< zt_er-WgxwPheAV^oV$q4vN6ViuN)@wt7-DDeE zXoJ5vblPtDT2`>HHst}lLs_L85aV$fFCh2R99awu4MjoT(}23xhlDSZT^54UQ&O;J zzLJW|QWK)EXe1{SnxLlM)7s{Nre ze%UI}ng8dwqTqCA!+fIehB}nRH_p#ZQmTP|u=(;rXlc4t0bvN?fZ+k^gZynIGgIHt zb2r~Fodtive`0WO(B|u>tJHCyf0^%B?*1i#le6}{(oPdOJth@4I4HlO=Kw6HL(WV7 zI=nhhKi$?d3JI;+WAaN!v+AlB=IL=Gq1Y67NDfK@dcG^(`rf_AJw0jw|K8}8T-AG- z^*Sx>5>PQ7i{BXI6K+4tl7JM3R#h!ZSxlu8mvf(**$Mw!22elg^w*ASuVqW1>6;i8 zk6;CyYFG9A0rDZ!XIb1g2wojZ|0p%yezlapgIGBS2DX9Jq1 zI(*~@(A)jNgO30iQuu2gEPlH>-bS>!^qmcq5zkeCa+tm%jZyF4KY&udFU_Jb4ZS8C zC+AxetnzAvzUzPByvJL>UBGJcR?*u) z>}D4g#oZk#wT31mb{n7xHXa^ts-K_VECgpP|JY$-^1i?NzoI8GS!}|GfXqkGjPagc zhE}r^7U4VS51o&EAk#yY=77e%NKa=<;;P?t!msn+3-DPBEu&_c1qEFB2T&O+>^@T? zM8@ziHLCx;gU+q``zqXTs;WdmV50^yHP(Y33-1D2Ss63v=tb_TXWP+Yt# zl7NDgp1x??-^Aoa?_o;`pMo0#_wU^+9`GE6<`tiq7@Z{_lw!sJncKiD5bpv*G_;^R zHyQ)yItXL~4{kcij*atrU~QY`i{BeX>FD&?OuIkJZ`#6v(r1_gr;pJ5RkMDJbn$@dxV`pc6B3M$Ybzx?Y=lluPb)zC0AK`rXV~=_5`&)I zc~knz$z*8kPKz8gukaF0s-0}52!xi)|1}4oMAC0NulJzm$rIB29bpNHM_F0SKnWEN z-rfp1KV}AMGW9RBsN<8~fGfPbM9@w~mX(#w#q~cORL{F%Skq|(rN(*jQ>M5J9U_pv zzzYB=55RIKoV+KgsrjPk2MCu6ImUW(<^!#SD+4fK5S^Nk@=?$XT!%IYk?)YE2S`T)@DKZEr;;b%93X^F1@7$%zVO{iH^aUnVF!)^0~KGGTByGeZWL*a}#46svJP7LH*(_e~Zll0cDTjjt+og8npB7A5C?!j=8 z@kL;<0jG*VX9*df7)bW`Zd)WPK#9EuMFc5{P5yg?t*3wKVH@6jUwq4jusF^Bdq^^5 z6BgDJGL2|xS>`n=R(y1Uu^)<#)=N=>($Z4jqg`$!!Nc{SZnAqIX$C}GTsb+p-9JB6 zT|#!}f|vP^`OgI)fa8xhBwY!NaUe5r8%;ga z$mc0xg#?C$$`=S}IoZ|(9xxa3(F+I~2(Su}&wPD-tw6wvT|-VikOK%u2jo3+IdHfm zs;a73Dc)@lR5|GbD#Qh#(FN%rVMXDWu%Hn(i+RhU`J7(`c)k7_7xTxENQ5Sf>+7Xp z_ur1XLZgVN5>p}PL4n(!IMsA?x(f}9ptb#|r(-4z+5m&peQbn+O5xV0vr2XqU&%R@vUR04zW5hC;gWaIo8Tn(A7Iplyqxq9=${t&`9wv~c8_#h|O&H+vU z(jy?D44nyszrR1UsF$Em-wZk557F=v2P_8Exsh4f9q2C|M@kt%yhq5hf4u(bE(H(~ za-JJgw<|Kr$^@WQc?=+qji3JlI1jp!a0YfWKSn`Ak2A^0a97meWpDa$S+=ULZyj`P z?x#myAEu`*ftDtF4Ei?6O@uW3w{E`M=!~OjheF9GDJl6+%mV<1uw$1mUj|w2FeMq3 zXYyAW88|U$)+)#E%Fy9ukNr8giTmU0@2r{uD6C6EMbF)Lmvle;_@NIBgEAnFp;Bi? zYHAJy6qT2klLGgL4YWeFzQCKeZ;2p!Qjvxo92x{QH8t&!cPRCDsA~#cJie6`{IQQ4 zYb^#{Ojq=_pN!7bZsB2H(Ej?QGDIXc@Z%q#OLd&6t3Z^>Sx8hoUKWksHylA{1!aN- zQ5zt>(|d)0%!>keO$f{^A{)-k&gKYzwbA9J-k5H+gd9TS>E)IB#);3d;pbe7s`u*X zWkhB`t{*C6Borci_?MDS!nIgD;Uw0~F93M`@QR9xu@T^aI1dgF37}&LPfbk~^q50; zioS3=zXu#3Thsa3acrLs^l*`GEr-{g(l=U;wcXv_Nr7rec|#o(&|s>>HMLlgZMeJ> z+|rq0vkR{&SArvCZ|4%PY* zfY^#MzU_iM+74xz6Ua6KIR5Rxkw-$`nD0{KHq8&k3eowf;ZAXYKAVN)0(mn$meIi@ zqy?w>eDL=9$M$w?1qB5qV7tV{#oM1-2EQ|rI$8T;KQxr)(`-z$0#G3Cv6zl-)(#{? zRmbU;W-CWWE}(e{f{u2{U@6{KPQo%rf=EUHQm`^|;{bK3B`1p;4hDnxzrB)Tg91#1 z$i(UC>E-@=&Z*KmuOu(;u8ioE*VfX22!<075MT|xV>>Xpai=0&T1xsRMUBH?!61U_ zNe%)dHqgmAJ{wcYj*bNy8K_7^6;8>L@zI7;l7Z~IiMcrukfVlL=NXpQuRlcc84<)* zIGU9VlGV)7;WyQf+MW=?S|be)AWo2X#RkBDgHWXK204%|sDe)r4G-df z9-|>O>G^XS0H|?tcI>*Q{9$SOufV@D^J5Z0-pPpU;(Qgp>z3Y${lajf=WXKphk&2V)s?5`Z z)SaurFCwBZ^!m0^ufLHTYwlAUuqz;6bn{>~`9Dy?|Bqzz|I06aF-#E$%uG8dWuOOb z?(KCVJk4bA;r0Ubrex6T&Hnlo*(ZNY3|bf6kovzHeEHqkrUI}XQoaGzbwWb?-sJZH zqyR{d+G~HuYu&n`dJ6ii-#Q! zREr}bIho?0bpxsm&p1Gc?z)h3dF@w{Ib6rB=f?`S0LuemEMS=N?>h3NZo4o-3Zhq1 zQi5tSKfKq=1M`I4u2_SSn%y%bx0ErR^Ttt2;1*)>L5&=fO{K=mX zw$jAR43rxu|3Y|UgKGrgp(!bpqvPY(R8?2x`eS;*Bo!AKiLI8e2Uv$Ib$oP`g`FL} zCeIwWi2=GI^r(3F;i16XBC)3H8O3uW162;HIRceGiXgF|Cjjm(P$I#$P;krXY6Vw3 zoB~@=pAomn?8-_Uuoj553~_~Qf)tU`Fa_nt7Mu&Qi|Xp?1?IKJt2^3(nt~n|lBUK2d@HZ6Hf-!e01=2Hq!b)lC=|po%hjtS zfEL<~ErSdTr7;v)li;6%l=QT;|1uoLSKJjF!Zur3eOp#`5fsV%uugP!e3%^&*GEOs4zuhqDdhz3J%gwuNjK9TO7lqznP(;B88j%nuC57r zdF%;@t1?$_UJhr7{ngF1e=id4>Inb;?+mmo-?^`6gAiV*9~`+-lSTX!kFpP?F9=HyXxo98;t$h zxBeX}yHNPuC+Z7VsYnN#4jNWBE-AWdt6T?z8=MULQK~{#*~aWKy&N=3FElUowp7Op{gDJkQXwUlOQ2IlUR`34#NZJM*8< z%+K;SA`N}zDW4K|qA9_>v@#vxhl9|E>v|eB1l!_HYfki1tTK=D1w9cK+MQ3Q{3==&?aW{8O(@np%^mTbf#0 zy3B3xtA^zx(fzNINtnr`Vv}{-=L!<~7RKJOCt*DUovZ_XB2(x|6N+krxTx(F|A#{& zQnSkO$fb7isMu88U(i65E+HWy#0G%$+hD^in5==~3jFY64L^`BJwRkYwSnkc?zdx~ zKXQ6{>H&qa@?s17#4%=kfZ+zcbOpbaf%zF%$pqg*=s9wqO<*{H;lpQE$SHanz1jWO)6SIg3XDJ zqEl&0GTk<+ULK6Sj_j{;QH%<3A%Ob?1ODDqNlQq$03J~Ht+`H{Au>|ZQ0tEm5lH>+ z1Uk6_{kIo@X^Qfh^1~yd<_iVoY)txp3RC8*TVpEh1!Fx5QyonyUOjm#T9;n&Wl`6) zQK#HFWbswZ%)9dzlRN5al?g3bwgdLBJgUlgILp0_%N2(4$YD~0O96HV2ujc+j?EtWPK@Du(L$0n`@)TBoj|0kKsA@p1*}+JPrk<}*6w@P_Mtp%Bty z&v|WEoKeWiL=%aR)%NbEln%f7O)tO-;8(1D?%u;_8zqlN-Da0JE7{znAJ&l)MfB6+3*=oaQ zMifM#ivir8hlT=Sno)`z$+Pe?VgewV1dk6X=#wCG+)3&N$8#I3s+EIrZ`6ErVZ`Si1-7)Uob{a z5T3nW^qCN`V?Ab)qXSoa7^v*e1S}Ik(x!kO5;hsJ)z8e#ghHdqyEal`s|uZ_6~w#7 z0(ib)Gv#qFFyI3$2CI)X@Mvo4>InH3Hzb?{h~mM62f%23_@WijITy{tR=r{(nQ||u zYvP{YeKH^B=;q@FuX+9_2Th@jXhoNiz{z(OoD9$Qq=F-&3&p1NZ{En=Yh6MJiNHYF z-(S)qpf>^va<(UlEf!<4FH7MOAfN&Hi%ha@pgAC_Ug{B0WNzbq0bW+5}!o4)zGY98Ier0 z)BBY${MKe@1r%=3>Db(vD?C?Agtp@ovk?0k-DtU~Wq9q z;0j=(%~H}ynuzta#pl*B$}*|TBk&6r_$(i=h44PI3sNjLPD-L6dpCo?4Krq2C3GIz z`24GA&7oaB=GMhu5gj|V_wzbGB_|l`gyB7!x>xpk-~Xh`{C44&j9* z4s7?`XA0Yzn!b~TNv)bgK`b&0UgY;_>XO|8f|3JQ+(d&!9Gj}*ND=IW@{ZDr`}z!R z_=*LkK3j$tGaZoZ+o6EEBelFQRAXdeD(iMi5~JSPrHvjU{EvGXN)g5LC3{vTV|cbj zk8_Lg>Z{b`_@*uTh9jy=Qe=j@O>eVbMk3P&ze!g}QE!HeS=y zzegO~>-v5KsusMD)H7n^)1!GvkO*q;P$sd!jxJCPN3p9E@c(_jiF#sRG~L~!;b*rc zePf}7gZxKqd`2`6!++d@fz80J!z23GJM(+<3lx;H!LQfN<4(nrbdo-A(W8fO|MR8x zl|L5dCAJeMqnq*ax(cQ(RE^~&Z#g|IUJYFRQh+rF;R3)_@uh^nj!CA-s_me zt9&}>R_4IY+?o{PT?nhgs*5aSXIB7Wq52Dxf)8;hGN&jO)Tbo3tABlHqC@c2-Vbir zn#fF##s}UmB=%ing>U~m+;TNkUJ^A07E50n`rIALqTiZW|0A89O<=3s8GlKGN8CG? zE@7Y2AX%{DW&{leYa#i4;a^2Dt&dl9MsO6tcvKN z1Qrzc_g?+kuF2$D-(U8H)7|C6D`FBao7BaN>9HPCzwBm9#_Z$oF_w_1z<4!YF%(&N zfBIq44V-&VrST%z1~v3~)jYLEHpu;E_XPprvS zyoqDyU~P0N{(tVv{KB1HV+!M!Im?-W!#rMSo3=`emefwm#OZ>y!U=MZ4bvCs(!j3(RJb^Y!1iT!6Y1)4SsK>k3m^%yR9B7^;~5Q!>fQ^|mM~#iKY(9-2?) z{PN9TU3CpyHj#V3?SiKWIXpTCXJ*3atcML>QRA_5CD}Qu=PN4Lt(RHFzd51%JyGc0 z?rmu-R#qNPpbDbZL|DnJ>^VaQH~zl4#eriX7bN1!wo6OuSF&`ZXZ!i80lZKFd0|1| zQWKMpwdqC20af{(AHSzmHXLm9&nl3U{o~)Tm%%m3+T_F>#mjeXmzFSXP6>%1!}nf5 zJoJUM|F9>DP>cUgeOuRhTZP*%-Z%;}-P8|A$HJ2kUqnIuoPF`n`Gj_jm#4q?1vk{$~f`Tj#P?jnYn2wLY zg(aE5g215JJUAJzgQPO^=TFR4^BNW&9zuA#A6&zg?&6{twX9E3DEQ|7tb`hk&bPI~ z-8H=sSa(sdjEG$xuVxqLf;L{I9CZ2nang!!W8S~7;ro>E;{oy$Tv z{-@ZF&xp$1ai3;lov&Ox5hLm0%Myr#+>mxXq83LHAW@IqR zAFql4yMhUZmu@gjlG4#3GeyX32zMnD@Y0~*c64>|q;_|8wGR*1^auSI7$~>xzJRDT z?_}LtpAK>-sab*fY?te0n^R6x@5Tm>vHejF$6L%S#fguM72SqSs z!fZ}BjQ8=@T#^rz1l~2SSkuyy8F*MpkGZcB>i?T``cEL8r&nK8#0ycLx~v5*Xo&8( z;`dFzs*R!ttBg>gYS?*pnjQ;9t|qc=SX1AsSyBlkFOfgu9xwgq^PS*}j601FYx1aJ zY;-~0E$JR}fj~|yQVfYRPPb-!xze>B$PHH}pRNmn%NMcr!WWLKEp!xVT1Lf3Z*;N} zpl^6>SY(9IiJ-P4l4XVMfgk1E8mqg28z7T=cV4=m!Z^12LQFJI*{bKv zFU*;N4HPz%l%*Y_{(j5xBwaBhrhQ6us$mR89QYX9flLHrjRDU&%t#@EImnjJoI=NL znhId5|B;e(=Ot_`jheS_`I*lbzlSUhZtZ=v?RYPR0GV$7?ul@D2;t^j(uv>Btm1xW zsP$DyutR!w&2L$FE<&yct-)jampU9vu2gWC(gS_W6Y-`fx6qx;mo`28c3GpwmAn?y zyz;b&OK|-;ZVv(mg@;FMBB7p{Xydv&Uv~Gn!qS7HFwfraQ=HF)a#fgC=&qw=V5ITN z1m-!Aj2TZSrpI%UfItbvJ!CBG4TkeI!v_3c8g`O9f=po}tnRGr)KYqfW%`!ISPJ{n zw5w~82_(hYpG)=7=)(3g)bSgXfbv40+(VRH3*KYryAu3`;)MfpI7x5dLy(zU5Np6s zo}hOKBWs)B$iB?QRiK46_^{BQ%s1^>{ON}zWIq~p-KZlAZFxQ%Uu6^=9VgpcdEqRi z3`zzUCzfP0ZQW*3(_VXBLepNZx|@sY^&v$=WSQd0IjOn}(mgZyGGH=kzPf6(q@F^*33>Rdih)TthKnS$f6^ zN{zby>w2S!G5_AcncMiBCBevI`E`6$qtQb*RsZ$|cl01Mg4exT@u9}bBz?jsChR_& z;bOP@#bt4K;TBPZxm51kM~qq|NV0J*pWvJMaT82e>Q*;-El;k$JR z^h2oiPc-rmKttPtK}*DC-t*1KVs{O)n;l)Y_+Va~qJ07-=X&>LK`sUd=B{}!9d6p? znD4LCuHyR1`^=gttq*3BwdbN=7{k+i!Y~;+v zt&g=eb!HdmFOr=J-yWYOCo_~irf$pop}3e9n!xYwrcgeBSNERY>a_w{l5&hWAA((| zc#X0FE`Z3Y;_hCay9)DWqcFMZ*J)<$d{j#m&N6e|m(?3bWu|O~EAP$%kHy`2Bl?J@ zjQ$@VsjnY3m_2?PtwscJ++4uM4K`u@D)WSk^XtjFf`P(6&>g)`k*q*`K;X1NCX`?R zYZjy!$Dfr~m=uB^13X~i=6=^?QzF=e-;|Wb*7?0r2ss8jiR6@)-!84hegEVKSB}Wu2G|-C} zs8Wk&Lg0~Vz9$V|T8jDJ_Zj1yej{#2WctJ6hFD}=KG{HJK0a}5+Y>RG?+e{O4))Mo ztQ19Og)3;Z%D@m_!H>;rxJ&9}hyH_k&m&$gM zJ_c2#ctI^Dx;IS}PqOt^%ikzIJi&65f8a&m3wA3w7+h0wQCb}OX~d5Tgkd-&!sOP; zu~huq^k$gWMuuI%pq#CfCI8aE&`Gd@t7Uif)=7{IZkW<+-R{BH$5wC1Z|}OgCc;^F zG#v@QogZD`AJ@!lSuG=Na@J(+lf8ex?TdE0uI`nagK`Kp3a(?!G_5yLOVP3RFe+qj zK5jGlIhCsdwA~M?%t45bj|(0cIvi7wq_{FRpZ`Jj?6wk_#$@iYDZTBJS_<4M&(Qd^ zFlsh-Rvy)XTQltfZGWzWiqZ=T;x!0hyu9eJqC#RcDi_6q?|XJ4b^rc-D>t``j2Yzn zH#vDE!dZ}~K6rR|fK?(i@XYKYn=iZ&8zeW733RXdKx$j=q0{K@=ooHbcUWN#3*P3S^8 zjN;a|(S1cP&g#Pb>A0#RM#cJeHrN2*0N#4K%yWfHomnm$z%_cPoK2J<#Vohfdp`Gm zXGg9h2!^0KAja@&M#6WxPxrZ7$vH}dB?dp@TRNJZ1r?O?VE<=PC;xg)6QV;JUk&Q|2~WuJk&T2dl=!(YVy$;e#A7DH3)o z?9V;93hOdJ5!YW*L4u@pO}4vBv1v3Ex>Tu~_<-&3TiHX_#|V~=d2fDJDYDl6SyVtB zx06HgVt?fJmz5usjC3YW2FAL;=th9PfLKxAO^l5-pyS>x=#JRH+}j24uxOHA8) zU{mFp>6xU}8of{ALY>voer&6Vh2!jKk7x9!)C_wk+J{uOPU^RXS_xl=d-j1A7H`A=^h<5TD&g=1d z7IuzHjioA98}VxFdG^+m^_(U?>jW=lUP9hsI}-N^txw9VxyHfD!^9}mZ!g?GBWHEq z-L^y2Hrp2#aJ*1LQ(d&-ubY?2g5T4V^vt&vcBqaj2^$69BIoyQG$)*`3pj^;m-hkH zSi_(Q!WKw-uSSF04l)0s5YXLoN0^va5CsmQy1+wWc*YGf)K*x+3j?y?rj7(Z9iq{| z6!I*{=V4$jhY65);P4^xm}1vpg#RJpdF<)IhpF+UA8#+hlQm-Cf6s%V0Eg*OuTdHa zP(m{p4W56(q5M}CXcvL3d;fD%uR9O>g~o7JcVYGm^}>hdCYHq!KIvmt<+W_m;^hxb z9=$Ii)upPs{4xa@m9{G0W(KyXFOB4;<;)+cnGJ!^ zfaB2MR*dS&D#pu6Pr>nLGgN38y9l!x2qn>aeijFPI8SfB9ydI+0}f;Y49Fo<5lIFp z#9U#xJXLOu1>Rt5yDM&9e}2B_>`G)#gxUOD(8-qIk1+NF!zUQT^y0+uP=trD@1QKA zdRkZi?(P~i5d7WR;@-)^c*o;mBLqWi`HN8Kq^19fpG)6gf94a+ohuhkvI@jAetdT$ zyixZPQdWaLBt_r&)?WC-+Qdtm`09$8l{+uz?sio~8b+fAneG*%aVlCbR2?;nA&W@% z*jWB=yBq(^4w<7{6%<(5IF6+;2}T;zaHzN7gXS>MrjdJFQs1CdiQln z`YBupQzAoptC1|uxuYHQm-7|=28rrU7B|kSA_oFSq=xqbN41%D@MdrBlj%$k-{ELn zFfWE?fWvSGBPJ=&_HNG;amrwEnN{Z#jtk1T`U?73ZZlrw@bWqq@sy8m(- z$Rhw_4)Qc7$gilje9h-6HBx~Gf-rde?^-XtL1vY~v&RbZ7I+3}g>0~qfsu*k02;7Z zcZA~*QI3l{58}fvs~8zkgCLEX0qkrS)R2cu)1x0@%U76K_e*QFnJlI)8(%h7(=bKG zgx1DN6OVY;JMw*g#XlGn$%0a%53`*{{d~8*C(!T}Bk-J!`Q%2bh2IScUVO}>k`tO* zw&X$n2v%|orVGDE6{?%NA}XiFp=u$ID1p{jV*weQf)lg_Bf-c9z?k3%U_6nr6@H%N97aIcPR$CEc?% zCLtb?fprl0QHoOyB7q0);7_DryZ-p**f5M3BHc68pjnt9C~G+)eDM1V#?2FcQv+#8 zLdbL;q^7sRnKtl12*iY_oh5(!*_IQIIbUJz7Y6Qx$+%AFs$UVs8{bNa-|bxb{sn1T z+Pm|`kt4dmir=<(we5RZ=qjb(ipoe?XpR0)f8ChWMNB{4m`l+8-=0}{#&J|GBEjd; zJoFuAqy@p5j;Or-PZW}$a{WVXQIJhaEn7F1@vtrYeM9sO?p@tJl@gyU#k@XUd!uM+puebPsP=&=b=})*lHA+`dQP8cyiDuwUje^8 zxJ*&tCxM}r{QfntqY8ol82USK&SnGMEg5)FKDP%mpYY5%ZaHub-C+KRLaDHu%l=Dm1Avtfdu$ zT!dwgr@Vp9{menJmp=oIBBh{+fFhI&esHAS`Zr|`qXFaAh5^W?K~)8Y;@^YH{#SG7 z9nN+C_x&%Tp={D1A<3u|4I+g|8dhXPq#-+GMoYF-N-3+%vMM7PUn`=BtgJ+`XBi>x z=i7PyuB-ERUDtga_kG-d+{bYiI{Es1KcDw_y+e<((SSoVpN?a=d;XP}57 zinl)$F;}i!`42^mJB~mQ90wu3X(yZ+%oVchF)}ii$Unk6It2&T6!HKZGrbUPf8W2E zDMPo`qWAObT`oHrNUCW4z>Vk@#c02_a7XTMgG;Qt^~~hB1HI#mZ}VTd7C{?T`mJhE zpZ2%M;y%f0bKRc|`eN~kq7BK%p!84x=>!0mD+D=kYs=SgOk%RXz=>0mTmb_)-h5S-BRLnC9^bS5NG%RtZe+Cez>^wH3$&u2@FX$p)Jg?1Pvw0uIYK~@Bma$Bgws>0p{t5Xv=RHGzOkY^T z&Y0L&o-PVgt_;xuz!%F0jXmt}uM;g6XIRT|X5CP!VsbX(doB*Ap(oX+fW!Sff5}5A4^+O6xrCS@P zj8J+3^I$~1mCA!OI`?3eSNk%}AkB#Q!I*R5NniX&8YAo7| zGpW;7@&ksJ%F2pTDhYYz&>ELUEBQ%E+x$ywV{y0K(!v5>_Grh5(x7#n;DwMypJ|<( zKM{Gtmq3oXfd0^w>F-1K^*X3W>#R>P$?!^tbrtK(|6&SYY51%NDWf zw;$Mk%t)1dZLTJ}x9HK69Ro#9kKDPLx6>K^JZ}|Lko4$dOZCEm(FCQ|qDz#_xkYo~ zG>VJqE=bcoJ_}(BeQ}@uG_nDzK5zD0g2@40J?KlR8elB)fsmmc8zruyr9}jrHeUPS29y8;h!4j<$6@~YehdtcJTE@X*e>VNyftb%Yy(woU0 z7Zp7;x@Y(9RVaZBEK^c)nc)BXgel0+{>47T{8;HCgjA!K4=t{hF#EBCRXg=AcYFI; z>a~VDVT~@dzjbIbN`_C!n_V-E`fCg?_%~Uy;knyjUIGHtRb8PuTUdypDo{6M zdiLy11PvnMqis(QvZLI?C!_y+PbKzUczmL2eEqHOs0aelr|H7NdGlqAL~MS;lI}D1 zB^gWedWOxIeUh2>cWR_f4YRy5bYtt>VOa7d^GR?B$-`>xQ?tBss`&~{KTVOI{Cja@ z%d_o2+4wTs9hkknCDl8MN#YJ$oA#zlU>n$pDO`pIe`I2Uz)LvH{wil2GcPDhK?qBz z5T{@4e5NAjHpK$w4d4wrZ;qOqi@>I4eDj6-Xh{NeETezK(4O`OQ%lL?GQqp4QOK@-Q{<;!+`6@E+sCY(d=>OZwlLJ^(kXta zakR(L8d(RTHnVEg$mMIr)&zIMhQteqq=`j_7%LHrnxc|N|bN)ywQ7GyHGQ8AMH^^N_Jw8Jv^z40Fjq;P?N#H6y-NkKh4 zi`?`A=qV?M1Qs|p2{YL5^_Vj_KX4r)^FUr%U)bIX;ZKcl8(Ru)T&$#&rJ~{@FXnZD zn2yAw3H5(5);w574Qn(v)ac4yV1GZ*#=Y{4Ls4v6Qgy{ArfCzN>%~uv3!-Mo!?=@S zP?U!JMsPWKFY%Ii9HuN=$G3fanvyf#@qUupv{_y&HRFlE;df7|cP>%~4aYppI6sZ= z5E5b_EA3YhAn)bdQ@xwp?*TU!O+Jh z`JmzP#PQo8WkSdsa zQ}A$+3LtW0V6Ab$bS7@&Rdu^|7%mW)8L1T&&n&KPj>2J(Z^sTU{EL=IW=6*G)x-L1 z|CzSoGy{So_2tVVxWn*F4Wdh8{RD>3_QIclqrB)igC1ayP@msGqPr9S0-l%p=X`_3 z)_{SU5}P!Rya;~0WL+otAa#VTG@k1qy}I+utu>`LEe%uWJDGM(pWmp~_l-f6ed)=k zW&-WA?Dj^NnQ}d3Z+=)A&M)z}mBTxJt)%<9P#2}S6h8XO-@?|p-Z`io#tRPdQ0CUg zJi<}V?p4lg+Z8*RpSVuY2yKAjA=Fv;B^62497i*QZ2N0E-`#EwOLC?Y{L5c%rIYnFloKDQa2*RU6 zJW`Fg6|PnmQlSU+0x-9$u-Nm-$>o*(xT_*a904xhHgICDD`V|Gg%AsZNO%<1!Mo?{ zuArb$4QdQhCsg3Uuyg6S`U7Kx42&uK7`t32#Bk7|DZ8VAZwyg1MF>yw`YtZg6oQt; z+4?HZ0D9@>3Ro``K*Vw(25t%Z;i+_RtsNOXBu-+*(+R@A)Tr z7KC}O*Qc0X;2YviZDXD~D|f46)JSjr5_w0xQQimd#>Q16lRMw_zYy5o)yx@}$7XNr z{A+9jj09If)+;3N6$eH}Ka;v2a9gy)8A_631lx(LQkD481CaoQGC%u+Ir_;Hk{6>; ziKqaFm8FFFhfswcKvhEjM1n%qq3W^BO5#UA49#R{Xh;Ah1h94aSJ%o%GMAWOh%Ceg z>g!%fT&-)d!rTCV;wyfu?#nYPNDP3~0PU@f@^lQ#Jwe3SY?S8@JfJBwl;-`*pr3$( zl2{q=G>tKcD1_krbEtI~y!T9ip7Xq4FI;1^HNhP=4c6wsa4{)zoasy8J|4}bZPvY>sJ+D-DgQPJQdHdVy^Yysh%f(O?n}m9 z`D%G{O8H-Clu=96dHL-ro!>x3{I-2xpU;kZl@7GfYmsvU?{wT(M(1=9ETci&BI7I!Y#~q!p$XF>P%;5k^W^C}TfVI2XaNkN%UtJNba-CJ+Ks#8jiY_kM z{J{VMvXDF^g`_Pot&!V`JGaWm-@h1zQte8mqeq#s2^EtR2cZZzrqbZc>)|Iqi-(`+ z;PQiBN6a9Lg;+}=BuWFThkOhChpPF<_(dK!AwSvN#(6W&p*ZTOw6TTj%Gw?E$_A=j z^^csF=bN3a_UOuQbm?7ndz;-Wp3SVM~nks+S;Rol;y*?Yq(0z(Jr042LyahSjGUFB&!h>MHsn0LNI>rATSB_$;^ zw=(5me)bh;HZ-%!K?j{DfBPmp@5l|~7NKU{>6i1Et3J4qO z%-&EFqxf$EXcfJc{^}B|v_^n<0oAb~_FyCz_rkP`%qR_PtElILT^@4YSaFeL`c7p{ z*Ng9msuyik=tvb6UK__-EU;@2Yr$8asiAh+sfE0d@~aeU`ZT7!Hx-{=5pt(=Pe9gB zO?pht&!;fY-OD_=E4|*)zAhp>IHxOeSGXZ1V}*#z8`d|l-IT~UN=RX0gN^DVO_Q|r z*l_ZXjgT~z8+nc6qA;n7;5oIOow6mB7nmMPz*y5A&|P+WUVkdbwKlV2tX}CtU1CU$ z)Abz1MHIju)blU9hfE`-P58`61m*GEgY`nzjEN`ll{?WikZDDtB|UcbK=z4RuRnsvb3W zug!=i-`Dl2Uu7c)wIcgZS93A)IKKaj|!+tD9$fF{+?+#+Gdq>`FGCD?QKUz#>nqO^eZPP zM`i*zvj_794-V&8;!H3bNxlbV>zhb;sU`$M`9=)|_2me{F`rskxW9VCGfW7?C4@E# zlln5Ge3W>ItMc@=RfN}tP^A;uYhSL2t1j$^Vd?=X4ur=259SSgAL|lCLv?j^sN{Jz z!ahfSD9R}RG9Yl(cVWc=D7xTNHSL#)3tkmQjz#6ndJ9E0Z&EBj^?j)MTyFb1wJ)pu z8BZ;}_3mzdwe-4&6s)B-a+3QFot0M%30Jpl|6NC2>_HprNcB^uGI+F?-IsUFMhHKd zH+CZT{lMLd4+C9YU0H!Q&}aZ#e;|_n9rJU;^922wGc)uCTDI#QVI$aF39S)T*QuYM zj)vr&U;OP5H!;2dZP8Pk01P)^5sfqyLhOJzi!eEfNDS|V;)xluh-^9po>hQ-9VpQP z$fA&+y=Et)kNUYo={oVioTYqK08iuBH>%s)U0h?-AvWx_e##Ht!QRZKg{B?cGt+#TWs|5 zi>59wvJcaoO{0-gyCKM>^#p`IdZ6e?DjWlP6j{J0u-Y{u_9I8R7z*wvU|9 zj+26-Ap70>;EtYFZc+e$&7UpT+uui3jAk_T;eI=vz4czsudg@JJ!xpI%Fo!rP{KA*zK z?!$c`g`G|5>e%#nj$jF{3HgsVmaN)bL31=rYIL^+zE!yk(X@W2WxG>2`D`CvN?Ygl z6tEm*+agy_{IuVQoR zXn#nnOVqM&S6?rZG)<)8)KP2C$ivi-bZXHP`TQ@Gh9g8TBLen(^y+UG=V&pU67E(k zYIi)%W~C(biADhC+vsp_;%IC_ekKHS&xe;yyN*&vcFRqKMb82t> z<$3*r5$BD^%JJJo-_6|dp;uo4+3#iP2?{@JIRTnQVh5Vph&(kgPQoa#Q=H{lrWQ}rt z4spPb)y@pB7h&%09ZQJ$kz#Q27DRj)vruY_EJ#z4QYVzNJ;2C6)n-$tFS}RH5)vA< zBZ2a5XJfU`1AHyg<#_?RZ?%(OLYwdIb4NK*KshnCLTB#!1|z>i6Jg0lA}P6-H@mmy zi^=Ny{f@=#oUrQBC`E z#xa97@nR}!SK9Vg=nTq@NZguQ+gQUq!poR5la%9djRO!30x1KFk93w%jok%9hb2NHtur3J0Rc(~h<3M%sc+y43B3Xp2YsH^`DkXD ziI77kN7dl0o@7g&UnhAP)4R&8NdMukTGt+v8wT(E?}#e}=Z=hTS< z>>a1O%YAomR{AYJo%zPctgGDG;6==~({U9eiT(_={e9k%+iuMzoxd5dA$x1J$EWfB z&wlnLP7(U72Y0P`ccnDkR3yM)_w={>JFS3A^Fkc&L` zF_PxROUAjHn=1M!CHvNC1jpfOoj+Vej>(MWCV=<2$dOM+_Y(BSh9#MHCVuFLp}bq; zK~A%da-nKk5sv}m#@aX zS{zxmEb>Z(V`n0TSV*208%7xz8WLKR?g{NLbE)uFv2q?3VZU|H@cosaDG_mJ4vx+$ zt+q;h@c4ONx`f=M_4P}wmg~R2*Q@*V$pgBIl<=~MOzx#QnJimheKgnY@*(c1yYFb7 zk$8rZOZb|N$YS`$yvvo_8hvGg52{{B_||Q4{`x470g*8sn0}O#+Vt+o;v<(A8|cx? zQ`82#KGp4TJK7u^!br?a8XdihNza3Ume-o2ZlGkv!%sny;6KikBz=c3T>EI7o(TO0 z&I8k9qbn~?d{KI{VdLnhw8h>>6Q=K6JtjAO0QzETmVi9mGpZb^?psqYZM!w93Rg)m z15~=ih>Scq#S`|7&IpgWg1;pWgm-Jk%Lnre^REUCb!9=?mgoKw`6)7e^*i>PxG-Io z{k3H9{-fgehM6p@!!*)pG=C*FGfY)EM-G-!6saEuspUJU*YeYOR+weS~n{ zu(7i%L`D>SntY@#)VJeb2;AeDj`n;r!w!Aub#k8@Y--xF+vj@i)wB6&JA&J1pC^B; zt1cLImC(I7Az`)Ws8&uJG+IKAb_)7C4>A2rJ`!$pFDTcERm5Xxw_01`DJ}gswjnfs z-MivKP$z&eC1Kw=(rt$`zE9ZsF(}>^os%vT>*V`Ewtxv}vEP z;2y{$hV$@U%$9Z0RR` zqL#%x5h#=iupB9Ux?`%plZogTO?X&0U(cmB2IzdvpB0)qhlT=iyQ4hj=SV98L|Mp=dm(iei9ib`cHtCV z(dGGuxc5!`?AudEXW0j`RtF;7pm?SyEHZ6+ZG7F@P?x*+!tVVdAuSf|0~+luM^p5- zh;h*9r;FRK$wp7=jd-aCHHUY(C^$ zpjXMd{9F3rPHQHlII-I%w8{apgIw8WQs@ChkbuBQ;Rws4sv-P_+8#(CIOX3EU~;da zX%?D_zNm1aY!L*l+{>qa(=sGwdkS1K;R_j!eO3MuqUXwc8@V%h<==`LPdXWwF5%m7 z0E=Xu3J%d5xcpE}3_xw%7{NWMGNU3!Dbwi;hi@HnD5L0WGt<-2Xo)ZdLqCORQn02@ z4SUS{p@^Mekq;vpF}mQHe*C1F&!Ta#U+X3;(Ta6nu7z@tQq#;;fnb4<`Azm|l7&-& zR@}8UtX|V5;>s!|LLY*wRy%xOEPs!VTeoaw-H)}=<7q1wHm^pX8-GO^$Koo(=KN5H zP&s^9TSPP&#TDYeL&+Wq*!*ZWoO=PCF2;FVdx9vq%o~zdfMz@JC4XIgef?DXHRi1s zzpHtdbZFEqrGO0pQ=i7)-~)EJ$`z&L4eW$=7`gc--47Rl(v@|{RYOpR__uO2`;)w- zKL9psOuw)abC*=3iPD|)U)a{Fh7kDAmE={s@Uj~?8bu*)KhhW!X-*p05IhlY8c;qoN?)y#_n z^y>5<#oxWf@mI_)I11HcbB<%DaI(iByoMyuele~hbcDkRRX5vy6I_@2eJ;oi24E11 zhIgmyvaqUuz{#j2bRu~wHHk5>Eua8<2HbewJUT^bo#wl;rIN4tJZ@?4*9&7D(mrD% z8~LeQ{z*8*`SEM0dGDyLx$+T4lMs+(yEXKtuWwN`(7~M<}SA-BM|+g~7^G zSNVE4uOl=$T>Fq!82nti>+)W%jr%*`=wX7+Dzx$43fZvF`*jRD-f<>GH)hdw&eLpu zPC<2ce-MpmGo^JU4Vxz9XMDT+K1`gMke?i;napeQdl2s{s%|2IB(TOMcb72I4Inqy z7SPfXrzNQ1v2Q%m4)e|NH%DRRB-vs}Mtge7wtPpcui;zJ8I@^0PEk=z^W-)3AY*cy?&HTZ5%)h2C@w9OxFr^D*~%J_aw zuj~_4rUL`V%i@&XJ}uRfxx|<~58ctMD-bjA6r_7d6U(6yhX1QqlKwn0T$HWmwZd3X z^wWMr^g#HcBrT6?D#m#{6$y1H!qOWF=qcpxvk(ccpOe%ZCx+X%=X<51;CR}EfopAc zP=?=8a?pr zhnnnH*7-U9;-=#L?M&w%ACz$2!f{E4U!#zVQEF*7~9WbWnvHE=AVZS%{7v(Ep?BjD#hyus{S zdyr^VTJM)Q?}L?#DBQh%5>G?e-(Ir0p=l`~uB@xjD#I7?d#UFF5J_)U)v}U5;0LTB zA#i6gwfPp3Im#wrRG_cC=;ygJh+zz?qpig8^K`8;l(i3j)k||R$xm)DldBW^!~d_S zRgX<6Kj@_K;Oo*|Gh0upw>8CZ2|3zdgXf~B$0qX9jfBXM%s($Vddz;W^s4?U*@7!! z!Kk~>FDI%agT*=|zjTFnv5U^a@_qd;lmobZkh&H_dXP3)vmF~dEya9Ie`nE-N-Qqh z;iuFKye0XMcclFksf<}?@wFHWd>tWf^w#c%Jj*S5z44Za4j9}2`9DDlicPmfL%#O3 z>OPlU;}ynw(AO?s$YE>RlQnJz7cLg$ig^72bl|TM+Q(uu-YbfTk9)o)NU6=p{HXFp zwSgWl-VYX0qV?5Qx3t9mAW^WYo3V05_#&81g)j8P>unUzYnZ&3Y)Wp`AK4n*V~$W~ z`9JRG=Ny#}h4Y(bi|+YVkpdpY*gVpYHLsHUQzd=IcLu!O+CK>j-XhPSHk>dBDxSU= zG`eg33QFWguBfA!0=On2$Jsa94;IJ?e^VnALmsOMwJWNut$*z54SWQO9tzCa5F%t%+e*ednC%$<4bFD@G*xEJo?pL$! zicvh%^0*cBWKJ2l>g=l9QG*Gv_#d~12ghw2)HcX`zZJ?s@eEwj^YzrtgSeuWcl+Pa z)Gr)b|6|}d@GqgQydh-VijleUVWjY+ZBhWfcjO4ntkqxThsUUYP6Tb8$>`f@bF-$i z4%?T()hX@?8=a87O_Aq4rmY{@FYGStyYc5pNE|Y`y)CV`P}Abg?D9sMjp_lohsFgwotkAZL)wF1ArxHS+cC<>jSPz1B3xqAjNEDgS%v5f9o@@&V#>hc#! z#XCOsT}b)Js658ZF&OQ#^yqyslY#Ax~{i^?GG@HBC_m7k~NB zS7*%)zV-0NyrG-dyrGL1QZ%S#(;51o>-3ixG%z+j6WO&ybjzPt@2zEa`cN87Nwk%F z3+`N(qji5wUMEDooyJ=In8=iOH?IK7Pq6RHHG%@*@Z$vMueo?QN5zrJ1M zzxE^!1{9+pVBv&O}>Nx>-d4D-+Le;`J@<4+eF#gdy& z)`Z^A8Whi@2Q+VAT`I7__PMhb>ugiOKQ5(LF!rozb60nx+t*DU8ZRi6x6>xeeT1yrs`vfU}Nl%_kpo%%u8;PM#T z{fHbz*$bxFR=pg|;^1IbVA+qg%OO_=ysYod zfPjGV9gqy44};Dmr29NTW9qCAo5D;#AKUUM>@(*&2fJ%HexDjWADY{}L)y)QP4vih z5w3QD=tSlJ0JF8DV~Fsl>gp?itZzP(5MDTxERurJy1AA+?~8Wog1^UB}t=P2#(+ zq)fIR7o#dGYTrE0{x>=_glq9%2#WaDI7qNitfiYkQxcSI_6yx1eUM%PK?B(wO!c@$A`|x z5l2jG?p?^5__<+M)pGT^$hs3zaM2Jn7_@qkC=vROk?KE`6|foE1v`BE_AL@}XHqZD zn;DWxZ$@&Mn{>m44E`DfF9|u}At5(N$LAkt1&3aMwhW8$te3&ywRta=i?i!G6;&QZ z=O`I6V#{EdkIi7|*Gl5d-7U%i3>Ht0<$sB>xt=kBX-;4j5Xgwwtob)QTVX*f!XrH) zHlL5sy+ZyvD!iGVZ1>@7g0n{Vuxe%)Dz`Qr;BIK6$H*?wjUNAX*1JbsZ*W=6U+~gL zyKMKX3!&E*0_PYm|3dIvTj24B;*#D9l&vf1dBu^v0xltKURdEx-pL=GmHENrHg?3l zc3Vx+kNdqa*U7%pw_`gL1Xc2G&E`{V>T-HUeAYzb4HGA1QFN%S5$GXs%^RUsHa0aS z-Hl13L1gFK6i ze4{JQZjpfyHPK+(i}ytN+cAF%aP|U|+!M-cio6(CCnPJR ziihTzr>w1mCfi(@(S)7njSRUz>Nq63HNIW7ZmPCq)HO}lV@M)d*7XJqDT^4CB5~Y@ zY|hJZT&ynHT1m0%XumjD2rJ3Y@F&hD@AbGbj%zP_%>7NgebDNxm(OE^>gG?pE+$D^ z8pvLv{x?4hDXP%{rPVtgd0gF_qJ(OLTzU?a2md;tZ2$O4&*C4;x3;WL@8?FxU%PkA z&2TJD7(4&%uwXWk{hP{U6&4hx7MgbaWBu+cFAp*Cu;HkG?C$?DON&BIZzp*jLu=oy z_^|dp4W(Owed}<_o6v^1P12I7BKLLwj7s;;+il#o6vOsyfqr#aNJ87R1#kB?tc(@2S>)X)&U`|b zXM$=v!}MKO?m2yBWsk29k73lE6x;oa+T%=_sl$3cl0^%&TJz-ksu zKQcYbJCRNZ_#Q6Et~7AABGbM{aO_U`73lO0r^sO2V%%@~n~Sow)QbW|pEBt6;VXvR$9sNqj&L4c_f{3=|RG+x1J zzZi9Hk`#q>j~F|lr4qJT7p$@f9LEy0fITq!kZ%cEQ|u-Z51`(eAuK?16-KQ0XvChk zY?lCe!RUv|I$?{eDk~3Q{Q-X{>A&3t(J>9;&@`OZN#|0^2)cGaM^CM)I+~UX>nU2I zk{mPivuZJU*bJca-xR)a!v-1(aLZD1%H+adj}2{DwC|&xAeMrEK}Zuc{OBm~X#NGx zCfh26B7{i=2oh0ilLib!ZMlqS?0{b<+~@SBGN+*}<8O#|Ov}4>36gPyLi`lgC3qa9g1;DL|e!^|C^-@eC+RS|pm2=PTcnwVeGS#QfpI;iR^3?Xd z^`QmYC_5CbP@Tm(hGFsRgk*=lUcI)zq%~Hu+5E-P69f9e4##6VL(-$$ZkmnqiUkcy zw#JD~ZW*2yX&xqUArnm33UZ-t=CG@06?95?Z+bz#K@i&A~AVn9%=QScp*WUW^J{>|koVSEobps}D$?VklE6SfL1V z7ciTq6~6JePnoE{tqhm-BPwN-3=!BP9@6}LrmH*zf_)g}XvhMCuY8&BCW;^aat$rQ zFd&ZYxnyG_fBN)krRztuIZ|A;R%v~k*^0jJkiVeDZtz5uKOyR*J3~WdX6DG$0 z$EBEa?Ivv?daqi z-&{)3cn}m6L?%JW#RM%8h_o2}R?d6KTw0@P_Pf+}_M(s7)hxdyg|-(D@UCGwXfAoc ztkzaClWM0L@Hs6#ysVJE>BGa%jV(rp?$sMG*Bso|?8FlMy3jv(HM^}uwz|0r%W3)T z4&E!2Du?IGF3bzejn=LY+&SFf$UAFQIXo%b@N3A#D7jyHe)K^@ZS83=)V6V+>R^~S zYM1)@^=Gk@IgPwVI2e6Ka{X?xU%wGwckZ*VIq1Qv@4Rc7FNgd2YY*IYn#?pQMKFhaIQ3u`v{#5!|NP+1Mnv zZZ*zqwaUD=*)S0*YXgA8W=Bh&j&59xRC01+q6m68ilTcfCnx7DKgXPeN0qBiku&R+ zQ2#v^R#wN+v#qbcKLiwUlv{pd*PK6)MRmB~l1fK)f-X%vRe^dqIW_gzjT<)#nCMb% zPb#L$qSP|xl8Z01Vr}t0pUA_C}1sdliYAcrc zZQ@wHIuRDkgtWBkp&u@}<`x!O(9Ez16n(h5wpdG`+eVy^`AR}T(y==hPENXTNMts* zwzisETk8-Lnudnf$&;IdN}nL67u~o~Ic%qOJ@{2WTP{}7OPIH)laU zQ~w7|8VnQ)g&N>>Rtz60i~I{|@j;#{{D1jnvH*%eu>clcrwpd-c)I%Hfg$g|hQzZvhj9 z`XMPUP8m(@e0_YXp$AVwgb)xIXg=JQD%6_vG_w@Aj)6sbP1Ih{LJtDGYi_W1#eu4>T+p46bgr7L3rF9n? z)L8ThbqzR_xwx*jwyf7)1WI50mRDF~E>w-W?c2oocoiH%C#E30e1&KOEi?~8Qj-09qFGx{FR{8>{a}qi(4J_xJG8gt{=6+# zaog;~?WcWx<|W0&=a641FDtu+`-I(|G^rqn2Q)!}DpfDl_)1e#)4=aXN8$u~(XaKc z*$Ky)G5x5VlPMyiq9J$h3f?S=!jP7dlx*z8CA<(jd3$>;Om1I)|5_Ax>snf# zdwVaDg{n3idtUAVP0c$nmZ4eZQz#CP)z#G{CMVw$#77u)527nm-|(=Ghe!VJy!^T8 zLl9-uA||s(ld6)^(tg+o$DH;|e-E>+t*PO^UlD_-=X1$NoJ|D}$~X=j!_T$mwcv?p zX=}%TYSIe>iq5`$SC%m{)_wlmyL}iuf(FE9b!~0Qh@AT$oDHuA-2C_NL<%KKnvSy1 z4t-q&-qqIC{r<8W?>`2e|0HsAa~;1x;v69Osj11_U*ZguVM2TMw7@0x`6ABkro2w= zG^YL%@G`yqY7+{sUAwO<-e2gO;k;(2+>>vHVV4mS6MGNie*?Fve`%)#vP9nzmrOi@ zjQn9ohsm4A=)KbB+|3h_ercfbl;8s}pOyxdU~EG}AQ|C4@t}3%J2x~slly__@+_XH zX(n17p07_a8s6SLH#58w<$uYy+@D%oEd?JSfc@NgKhQ{7x%9dO4P^w0eOmdC+@{IW zaZjEo0GptC{KqVV%_r_j zZDKxG6)I5_XL;ft2DsoRF)@q6NhW6I3!uxP6;DG}n#@pEQHrb(8yDA^y%%kLN;|6& z_a%T3AS@-N&2&=%CN#OslZRvH1Ac3W%@vc^&oZImJBQX{+Q*JXB2U%_pHAo7szWzp_!Y3gQKH}oZLw$&c~dUl$B?G zJ454vmF#;%%lGeRFl^7G0Z;k+_jilFeuNvv{X06wnih`Z)Fg>Az0}T~LHf;KOz9}p z(^07u7A=A;z|#d!>?bt8&tAUNNB5U=!iz|1mT+{_!Wz&&b#Y9htiBW=o>%JgFMdyFb9tJJxfW<%{7E0a7@OELJ^|7LGKxfKWyyB4{dezDA%Fa%E$_m zmm%%>PTY$jY?aJb$1vW@Q>PFXpf^QAM#hBo;7i9rJ#8(mo`9Wg$KZVO_46A%yBxz>% literal 0 HcmV?d00001 diff --git a/baselines/fedavgm/_static/fedavgm_vs_fedavg_rounds=10000_cifar10_w_1e-9.png b/baselines/fedavgm/_static/fedavgm_vs_fedavg_rounds=10000_cifar10_w_1e-9.png new file mode 100644 index 0000000000000000000000000000000000000000..005aabbf67523546b1df821aa73774021e9a37ec GIT binary patch literal 38228 zcmbrm1yojD_b>X;A&rD6As9#tB1pH0C>;`lAW|YNAzjiKgecu1rGS8RDxj2r(jiDF z-FfEnd(Zd(?j7ge|2gA0#yj4j;`8ji_F8lPYVHu#+t*2n8Hh0$4CxI;Idu#MhZcju z3L?aZKbgHw_XYlU%}HL@NyGMmlgm8^Gt8}fPIeD%ogP}6usk+%aJ002B)}uYbD4|9 z!pX_bQG}P*=HEZSW9wkfJ4{UY6h4H=PVtT-219-i{fCt!nQe)|L^s@!lh$-i_&w^T ztEt6Iu(I5VX(D^)&-#Xw!-T!U`o`qz*+&eIEaS7}EnkP|Tjj=Z$3Do}$+P5%&rOwo z{n~ozbPnNEBgeaCf$c-n`}Y&>ohrxL^-B6B@ipPv_K0I!e8=IJA98*ygex)hY~OD*$q@!x-#reta?{r87rTI>D( z{t!omFCO|CN-c)}{2Atk-{=usq(Vb#Q9QJuo=+hmyf{(hRn z{cW8eD(9#yN6wr%qq&xumDSkYO~Lw{Q0UP}x9(H>>DJunvoT6NZ|?};5)iNn34Jj5 zWk1__s(Ssa=*sG;&SzLz(-yL_7l|n- zEv=7_4(YExq|Z>lgpGsKQ{`;^^VhHGoz>}A*Bmbt8h#97_C6?SJ~Z;$HN(Zn&&xP< z>Qt`Sc!}v#{)zAJ%uYxgZfHc9-N;lw*yvNi_>`7jIe@Rwt#-YfmX=m*))C9!HB;}4 zvooKaJTWta=i=frQtkG>x!QWLuw~e`F5;|g5DPmyVOu0))5d(?^Tfo6D2ctCPE)jI z7oW2b`W13Y%W2;2>FHTnTa!swh*;Sk_ipOxQJdKu{$Ohg7ys(ADZYWBA-eM4!yi<3 zs}#>Qe#_NZ9d#LxaZg|pcdu`4#pmJS`TpLVd}*XIJ=T1r!v1@u<3)Y}ff!3amK4Wd zUxhvQu1rl$8Qa<#E!b00Q5ipaM8?L(ru^nAX}(TTP*#TnJeGvS#GVq<>oM!61RjuV z{r=(qWP2%9+8;lYDGauQND1dj%Kl3oalF?bJh&huBlFFV7-P|!LG+;Wm0GT8b8G9= z_Yzb3YYyiWBF}`(%~>P}+ou`qhsv=!{TZV1W3eDnNHVjvEgH~E`#jn7bak$W4LwSZ zd5!Gb^4i+8Tg&4{g?ngO=wK^qW*;2wFWb4eT>teok<&oo(IcK0FJEevS67S8EJ_|9 z2q-Hnch`A)@9gb0HZ`RijJ^(SZf?d%#gwxcRM=r*x@$e3#5gfCGY7Yr%q+sDt!jiV zyAZyy;Uwm|g0UGY&DFei_Uzf>vNEk7UPrs;GmHA=4;$b~#&8p{DTLSa)*bjYHp)cI z(BNHEQ&ZcS3ga!d9^@%~G|KShjZl%Mco_Whmt*x!OiWa&^7X}fTTrmNGTFo-N()Cu z-frb8W&f%9`FRXRK;XSnjDX#Q6s-2G^95S@+87^L#p(7KPTlf{@oQyj=?eGedozVy zS6KEo7qj0P!D>9TvC-kxtRwg4i`Y>H03ozLipM>OD!f?KrK{i zoRwhvuTDovPW}A+at`)GkvVa^D zo$v9nqGBRKXbh8LF@uU$*6qa5drd)SSy(WrB+Y(#Q*!?^mF}H8mY!eXlxQiju;%$c z8mqpf{QAlbJ7;Ga*mSr80s^Tw<9HfBjC*nZoSh|xP6G8trq1h7a|?$>IZ;Hbpc>Yf z2ovz^*)ysrx1XOVv3yoG2aQg^Pnxzx(!gQ8G8+*ZN(DQE@A2>N>Rr=tRIo7-`cX{c z-`0+IrlVxmXTRX#;NUbgHOcNQO-?`fpm7TA&;&B`ue3%A%E~lCm-h{7$>jAa^lu&Ye{Jp}SO^)@sL*P(V*4%@# zj6d(YB>J3bIA=aRXUj|)FSi-Cddp*PZ~sdC$<@0atMaenOiN3!6t5>fkTIh_Lch`S))Eq~g z7V7@!VhcyK5jLc4^(y|07cVr|q92nMd|o`!f9qx}S3`bLE%B2l`~2P{9xD@ALFCM2 zH*emAb8k9P?_1&c>vm<7Gh7U+=&h}-c6N64sHw%r=f?8FHf8qQpoB{>zJDKAQd06@ z_g8WZ$4f<>4JSk2J*#gyYIDOMiuBxJGdroi$rJM2b8?#NmgcU31|X>Y^wTFrkE7jR z7DJ_%hDt5{UtUzvUR-4mvV2z282s`ZVLPu{cN`$79&o882|p)YN~u z?wCpZ$yRfvg%Rcq(I`3nNDwk!l6DLk^8CIQ)kB`@_bU0sVQ0XUN{6vLV2D2(6g?duD?9>qiirx?l+1%~h`IWy1pk^Hi}+tz$f`qS<_ zEzX$IgoHB;Cr*^*x<_1MXQxBGj zEquFF<^jjk75d5K!U8dDD}@M#v(R@wfBuZ|IXLhD4BOP!MnFqT%Xj&*^oK{Ib~`JR zR+6aLY%Gs6KP6*Coe8k=35TYrpKeVk{>wpF4B7+QvLNIf(_|PR1xQ0gt-hA*sKmvy((yTU$i57JgjB zY0f7+oaEev3whP$ZtD+rc6RDNeIkUvN62ky^kepmZ1b)pi_t25}|2?*jB=znM}syos%o4$&wt%imMTzo2QS=d2Mb3N%;e>+q0y{7I8 zdsD?sbzxD_>BH^u!SCYitgP5gPsyJa6mW-!hj&-IId&$B()py3@4(^2!oXFwZ(qD{ z0pDl#GVRQ+1g-PwgPAy^VrXzztiBNj1qIFZ=hHnJuQi;l$TvMbeQ5F4 z>njc6rv%!$vh&bk+S=Np7jrG<|1vN@3*XpgTXz^#U0todJzi!_x3|3<1y|15cW`)! z=*hua2QS0+=k|6PFE6kD1Rs3)@rK30qG_nS0VO4u8LmDIp6Q6o|H-+KroR!-uD;=I zP<`ToT6`R@kplETaFQ!DZ2UXoFr7b6C-f^<0Z zuz7qlGtc9boe74XloPGiyRFgZO88tc0Lah1t|W2vtK-gkmt;?wwT`ZyUgNiKCjrm4p>o&1uli!`uFK=4kr4P7Ii=MfEPAkO z+&4K^*LBZ{i!;KtU2F|Mh2!YxXy$Hg%%Xhjme$rd5RG$GM9^^FDhph>(%c+EZNK*O z#>_aLP5Zgt%wO*~zm-`Jb{FUip)NA;_6{m?LuJ-I&;)1XPpe*We!Mt93Y@P#fQWjf z@dPi+wQHwtMCkpf`v1_T7K0DLk3nxM9LRsHUdA3xeBGKHZ{ z&CQD++EzL*(kg`0`vO)u4M(O0P7vS2?|cD)fs?Qo@Glubj1YhyTizfZW_mn3UpT3M?$FxuG&nXhKA$rly!sp9`X*r!}&aeap%OL2&Sg zvqHPQJPz=-Smv$mKZb%_ z4SUCGw9&)U^DkilU<ME~L8d3boB z0#Uuz&S{nuR?Z~p6%K60bY-G}Ucy5dodw|QguoZ2lO;Sk3?9JFxa{y#k>vDM5`gqL zi0g9emrvAek2N8>4=9Y0Q~PawcdD%Y@|ZXPK1=6t^SWP9V0S?M=+0JQ25w;5;D`6= z^XCTGq+v)GfnH2QwuO!=RQ~BimoeY?_*1pJv!_W;@#B`1l(h56>J%C@a28c#1FA#S zKQJ)xm87@C?>~RE+M&%eKusbC2*#yXN~oTD@`LSoD^Q&L7b<<@bhkP?J6U3s%0!qe z4I+Vfau$`e?C$MN!6m}RW?w!22~3ooi%WA~>@+=n{qXQ(hFphY)J1Hc;~N9tMP9y_WC000mwR6apQMvEc*=(4QM zDa#XwH~?I&)vZr(f3$GHfkf07ax!GY3RTbBTvSQYoOQOhr-Yw>{6{@2Gc%JYGBQ#I zeiLm8h$aamCH(mJD}HV^Hd#AIM{3YP@Zyy(5c;vaHLAOdVL^!Ke3Gr&A+Ybj=;)I( zi*<)vZ!l6jut9E$*QKHF0Q{Y~Wb}>e?!fHSl)BZmuDp;!Rc^gf3P8UB85!r!va-s+ zX2`-w%?%bM^BXBEhqFLEJ%2Mc=ePGvJB_r8ipu#Kz(IhX0IaEI&CJXYjg5`@0rP*B zoXm?ZMz2f>!}4&f`c6Erq@)p0)tAPA!ZWh7^An;fD=VM9dX?jF&|4)F!6>Rdb2u<7 z(3^GZbSe;k83lzP{y#t$W{xDe+{u$hTO%3s-=4Q4&hoVi(jKrz-f)02}6z(`SqwKV2xWJk{vkTZR&8rHTPUr)2Xu12ROO%VXxlE|b) zmym=+GfR9wkvA=OmdaK_v{wDo;R9?+h=miZ84br>Spu;rY zM_tsK){sE01K2Ur?|Gl#a{hU(Xmo(?>uF1WIG*f-d8p zADY*>`AwP$V~p6tyjpmc!|CBZ+C4)#^R3m5jZ@m)qMJblGT6+y0p!@K&ZG`{8o(Ph ziSe2zLO&g@L7ypH3wz`5kAqkOfSUX(yNmPlNTpSm5aQ#j2TBp|^_@JU&zik_5!O zIdM!JXlzPhAAq}ee)$r!8TR_%T#`wx7OCJPG~6l?|w6W9vvO<`}e~uwqpi=%AfPz(_&CMC#x?UtDF#)H~h-Er`I&5OX z`1j~Xoj;CmVq@_q=1KK$2(8elp(h^dF&J8P>;C=wnt6pN>f2e>K zEI;1%;^-=qm@CDdJ9n%G->kWA36QXlrKh=HJb(V|#f#Ucor~PoXE}UZf+^Wn`cxz* zzX**t1BvR%*Wo2$5~G)WN~TrWs`YdErkmE*y6(VH%QpA&w!#}=*z=VqxblVobX*2D zPQq}75V(IpVj?3TV|V~r_Vw%QX*n^ocl=m17wth#>kz#7L5?-|h0$n!?|Y$oq~Dz2 zt@Z&zPe1OlPKuGwj{}t-j#KKYIL}dsmve5Oq*T>jxY$jOm^?f!kWB#&@~f+>uk>7J zPL&Tsq!J(~Ip3p^3ca#&MftJoP-j3*om}Y8j}dvSM*K1?$18j@4@KxXxpjAC%vA(yjN$$Cwz4xORL%j-k7e!LO>}$XdHQNNvE_t-Wb~{Pz&gAB40HL#g z{zL%b$n_*sNtPfrHJw(MTn6bQprD|jHBhBP^oD}k;!7ScJ9aBG_s3W*=K3d`bLSj& zdd>SkE-s3$oc8qb;Cmls-3wpAOQE_XcZ08&Ar^7lAQ8` zZ=VEgp~aa!7Z;mmbUpNV&0%k2-pc4Y1ifRLVqP4XT zRO+6URVYYl(RaGiPKV|h+!=zhg?cI!J54W|<6p_%b~EiU9HNmIUe|u`V?pku_Gfcz z>oAZ(Ob_=qgCioe+zNJThpHuK4kfgKyS_IzG4VrWjfjZIs^~gsE{V(U;n`nxno|X0 znWOwPF(DzGQS`BKy$==#HyId5R@}9+K_m^Z7MfXxWaQ*+u&uOO)5Skh38lQ!vVyMx z-;ZOP)Le^$!N_i7PR;DK@`e187NQO?0Vx4y5|WZ??Vd5Jai;-|Nd3;k?xrRjOG`_R z(Hb!Ht{fF`k;8NI@xef$x(?6?*#N8jaGtI9=|&Ccmhm-{y8a;Y>f_@>-0wEmohtlf z>#dR<<*8F4KoRt9JnjQNYj?W?<#K$S`HhgR_I%+^-SHtg$Riy4t+kJCe<1BkC2_G5tuPSimq>2m7BXe5?PV!%V#?#=IZJ?0S%)C^hFA4 zYJc!d8$fgePj?4Q5Jb2=b|#;;AHwEX0mYIHBr0?#uu<(n>puYZqg^{2h`K2Vne;-B z&HySxx*!swK=0-SX-%sTs}|%N(6F0f;fx^&$es6iq z=+dV}oY-Ljg8{BhgOAxB^TCWiUN2t}^ihn|Oj|N)ZphU_f;T`s7NCp3l%;@r@Od4$ zn$_NU|KKb@4iKFDUcRI@F)=9y`TD+@Slwq-1@SQzs=&p~ zOP@n&I1#aSTKGmpeet&DZ6WJ>_k8hA(6X;boViAYTnQxXC?`uWf>;Dkg1H-xGO>!@ zJE$oj45h*?x&BIYiI(&#wdz+o?{f#tMFd1)LxQr62b>8xY&|(@Y%wAD93rh#-pBjQ zfT{8m9_)f(*<98VESd${+rq5_jKF zy;F1_)StPLN&$qF;mJ>frHL%&m3Gde6p*&tJwOCRsfF3jcz=*Du2}ZsrlzI_hlHrb z@5my=k(ZU7n)f!%@=$3A%GE_6Z3&XzcL@mzDL|IP17T(o;NnT}KT@^FfcYSKkO+?O zN$~RmK$<3k#iRjK1y5R4`C6sZygHKbpDAS`<{jXm09Jws zcF#%BKyh<5IZ7>hWTx{5pu9DLihu{&8zN>ao16W@n zKydRbD=Pyrm}_IMM|S$#1X!#L*#dAJ^3bjz1o-3!@(7>%hQ(P4iM!{!;2r|u(Vu|s z=LcK|Xd+wn){u20$a?LDU2C;<=RJ?E7#^pmrQv#edrNb~5QD1Sh=g8)YX@+Vf-!fT zY6dk2VU=U=46>Seb{+36@*<)dJK*t&gTR{rTL*i#qN;k8FPa`9rF&KkT4#>V{?vgo*<9#;7UP=94~NhSb9opj$c4 z1lJsX5`t8h@zOW~OzAw8L`G9-xw3J<+^|ogE}OR21G)g_4L%7eZ21*^@M?Pb+=hi7uvYZXY(?k#h zG`pR}=Y8Z6BmG@YPB6k01+%OwVXbDuAD_=%xU(4?T-x&QB$oe&5R}RKzsYU=Z@B3H zdjYWj_JjJ|j7!`%ttzUjkR=4gPS|l)Id@m5KrayX7xJ9Ge*KEvwC8W$=;!kP>&_gU z7<3f1}w)`01YS&0X~88{??xzXs?_G7f@t? zQY#5%33Ldy*Vkc3Gp<0N#KM3!l8~6ViV_@1blc6o^(F|s6&5iugZQlm0_F*T6W|QH z2DYC=QMt7CzP^6n_4Rf8rIBj@C9cO`pbeXZJphii)krpY3xHSzKnTYHwHeSl0Rh>W z`qQ@6YLom}EMSrYFlzq=8WBJ=FnEI$&*lBjrGhYs_<2HXFb*bu{=_ZO{UzQVk{9z8 zJM8Dr`=9Rf1K7#W7#<#G1F3Ra0?acJDBZ}xA@uWskW%&G)^K~FD$ONo4ASp`5NWT4 zqrGqipw(Z#y=Xnhx0ju-3kQ)m}_yf3ApO-{aHHP)3h|HBT(@aUk%(Mo69Y)OtPv z3C*NGPwNwK4G8yPVW7~0qH`7&1Rq1sXN*B)qSSiu_757Xv1&K?6CeJ1_WXZ7%Uy(& z3%N!h2cxi|gai{p-X~6Ikkv*43$RSx0qY);A1j|PCIcTKfY?}z4;BtM2QM7rZlK*R zr=^m;sQXPDB45SD#Zr2D3~eYrsTR1JXZlPf^@-GUP<-m!@>jSnXxuGG3zub}zHUEddv7cW*M++PshM|2ivF zA7%QZ7oYeVVQcvlV-%w_GK|La-~98JSopMp(mWcano`{>Q!h)4e}sDcy+LORoUqq+ z+X3!31BtN|y;DA2|9eXW_TlokJ}I*s)fTW}_O*SSsq3>sri>OZ_{4~&*lN}||2v9iXb2)O?mJn$hRMjHkz_7x`THolx}g+8&;ghU(x6w?*PHUR@+sh3AQAKZ$B*0@ zB;Z0mE~blF+&us;(*%q;j$QZdr3e6Y?8!GXdly7Q)>b@d=xcKWoVg@8-55_iC*&*9 zei(76Y4ozgr*yvsJ7QSn!#72r5UZc*>RJhP0=fL zj?gcw_L^X8R~Jw}r+N^c2{6w(X6zn6ZvD7i8}{S9d0gc&y1+AM!Ye9-0j^|jMSmCMF7DEj$DY)T-7*am2Z5A<-e*ScImb#;haAvA%KfyXFe*a$9AD?sul6I58!U6$Cwemy$z`dz^agz@ms)l01*QcTwLCFOkS{HK*CWt*I+l0bH@<#(87su>gD|?%o^{!5-@tzrY{(WA zD!=TQAtWci4me$D)CKI{PaPdi5MFK<`wGJ0-*l&lh<4T2P3<1NGwf!DBqb6oT?Aik zFtyt_N81c`>l7@!1t)0GM~jm~r^iUCs!{;ReG1e9@>@7yF-t=-wl#{G9D}T)QzM3; zx!}T+S3mtMF+M&7f|*kg5(SbZ?dmFwA`*}ofT%!nyzKL7cZxwGx#7W%~co4d$R51nRzJIF?gTn1VJV>@@dG z&kd}7l;yCnU{}}BnEd%O0Lf_3QU?z8Xq^q+2B+-)5Q{}(eSM4b^;qTd&VE#U{dkz~ zV_S^ZOB#(KX#SKvjDIS8fMqdp{_9JeSaXm`o(oCnlDzlOzlful+(UN^2v6ndl0e_SH>vv|UT4@|3F&Z{`z zl#|OatCSa#xocze07WC+S-jY#UAnYWt;k% zi#nSZSXmRN#PRwsl5ky_kWY!E#GUwh53WrAX;Rj3Iqtz2cC`Wqzg4AghJRv-h(!R4 zuZ+Mz#4!OMKI1o@q+?mX zDSQ8wC{~RmftXSMjhK+&x!sn3uXMd5ix6o=@tu+j3Rb^g%3a1(26d`b-6S1vo`6w} zyp!l5Q!Z9v%k?*QW6xcodF4e(UxH^hPy9t14wTt<&um}s&L#Tic0=LjX9*tIMQD>|3wAr3-I_~hc zJJ5btn6EAwa^6Q@DCLW4OWn~4EH+M-RS~<{>xP4o&ndcHOnNhJAhSzMT)gDL z7Y4`zBH@!^UH{G<2?o8x~re!&(drcHwVKTGT?70;;gip|^4VO>OddlpfgHMu9fWzb)xFOHz2ZCKOgq zBVM~S`KsCM%*99A*0Gl?S39M0ol86a*5 z&Xw8?7e_~b_@xvNR086uP9bJWnd%Up%YUdf|D%NFwMWG?6;sv&oq79QzoAI%ghL-c zu8D2~!@;1xd$rq5CkJ4`Tm27Ypcujk0e+TpydSi9kjj`a6!i4L*Boba{?JiT`5~Qp zNYAf`mAqR}2<@fI01HtF*)_RXz-o=J7OT*_c zVv1Hbo5RI$mn;4Db1Uh~XU`o8^^kvI?6j(g&1g&|oe+g~i7&izmTv|(0DT!JhOXT86F@__SrgnI%|P>*k-{UK;q+$=UpU)7(y&RSo@%>L8p5Q5?J< z_0pRq66ohM>g&^DFUKt0YuX{3>u)4;3v^H(+F;%?^vYSZ>NjGg?6y76@&Y5|Meyh_ zbCCg+)qW)sE~~S$oWz#tRs+Nc`I#`HMq;Pm~CJ{^bze zfXt$@hKxs#Fg`8&h0}|5k9?`!=6zDHQ54Iq?tE98un)hTFAKMzIm}m_GXs%2iKSsG zX$Z{72;FNiB-Zx;f4t&>2pSkTd)-=KCSbofWk+*63r(}C|B0wpS&vhgH3$j z0WROG#2U-#b)M@%8&?X;^$(kWE7^H^-EWthM1`k^)p(_hSQ~3Q>|GbVe?8M@>2oey zh+BG(Sr3w0Fr3)o_~-0s8yl+;Y_2;rNBRCa+fO~c94r?;Y=}lW;BM(7hSH*N1ELN@>$>;Wn|tu@gh?k6rXHCa+Q0c~d@{!6JBo zbw*hQ)@UrpZQK&-<~>} zeu!43*pk>Oz7SUBAxrWyATG*&BRv9IlxhJU*8}N&r2T+b`--GK*5l6A zD6j4`3rVfnj1Hj2r+A%1&{dxT7j#z^u&iN#5$Y6D{3m|@4hLGIqs5q;aE3d4r9(fl zyZ!T-sCR3Z*V;6MUA9XK^EVHydzdZO_*ps#j$VEC9YOn&SBt$^*k$Pq_`dl642M|^ z7BWIq!WYG@A&ZaZN%+7y%q!%K@h{6gsejQ~?nxZ}r%&9?r_W~F|Ah#f9@HT|63IAJ zmw+|cnHK$1?xsu0xX`=`8Z(2A2)wNVG&dAX0nbJ*-WHLcI;rL(fAnVSFc!5 z!0417q-IWknpxw>Wzh;R@n!O27-qsM`2C#srD=QY`gg}kUswQ-#XSk*AM}dem@|f{ z|D0s@=PbptJMaHp2(LgpS*#L?QD=W?I#Ag0-SY@bj@5Xzet^EOM}Dy1-An!m>p+Z( zVi8N@Hm3ZGvmwCX-qc_7Nq#w8dLMLADiyreO1-_cnU2els4L(s{$Gf>khr?+yr3y8 zA|k^PlLq#lu-(M`)cq5*Tt15D)PLxeTBJJHB+Zi|*kJ8~#e)Oasr5NKI~zO!A~>yXc#C10jBZUm!CD)AZc;}`aJkHZQx{5o<6O= zEk|oGSaY)IP_c8Gd&?>8D_vNRO<4?JO-(aT^zuV#ga|V0{nL+Q_g6U`PzvsfZbb33+){b`6GO)H8mFm`O|8#m#w_>vc<$oZ!q=x&DJ;^4 zVd3B#gJRYGW{+HA2ZQqJFmMqFl}1aJCiz9j42ViiD6;~_Ryh4-Y`9_+S%`Ln#EU<8 zeGQN^f*?HXXbf=vRT$2U)?NBfytyf6i$K zTBKgl$j|h{3rczi{P{|}s=|Tw%x!+DF2ag^Uc1Z#Zg;pIIpN=9dr;vy z`@kcC$%md?jf)ZcC};rjEbTiU%V5>mIXQh3PC27Way2S6)Gs~VG{pJ2QYKgiIzJY| z3UApI@7^+cb!uU`@Fag&Db5fGA zJcgmjEnG)NJhCWJFas2>37CX{vF!$kvX+YXyEnD95D#B)2`CG}DwI%cTPeFpnH-zH zXeBNwQg=dE2#cTEr>Wy#-7d{4)!DcNA_FLNPJh`17se96DhUAXPnL@oMf&w(1RPoPh))3~mHMfHX@bITEhf82DMc7}|qy zRcNdU&3p_D3{C0==5KT#xYP_R;aV{zFtE+N?ZVC%JcA1n7EKW#-Vf!z|JG9iu}tKlHD=)60gwIH-yhjX;E;|gPt z_!!8$hn8NEPhN#2gDJEuEq&2E`v?Yl(>CbRBV!~Xr`+gEw$Aa4tMogUC6Qy@#PXz` z7`rsl+w~9ois=<8w)mFw{N0lmFc1v`NVphCDsg3~TlNn`IyHVBp%@-t2riG{S`q$; zDJl|;=yPG3uhMf)mX}Rw<7%0fUH3`Q{D(SoSdP+I!J@)8M3YSjHJAd_fq7)BFy7h7Kod90W?iUN3g#MwzO;Dq;r&6n(@j~D0P z9+;;vQ<)8F`1#v!R5EVv?d@@Nh(*gU)=%95(X%mx`XY>kYJh8=!Y|*?K*E*O&i!mM zk^9%GiyZ65dDgcKJ!FT~X8=y{(;j${p1?1a8O*v`Rrp}k;TN-UU%rnbiLwZZu-hUc z$(py?tFqx(Uw3jUZP(nBD+__s4aPDHN85MR0_-q9KkVww0ew_^!Pn-{`aBfw8<=+> zbIs4IGT1Y2nuQjt1OG!qY+ngt{C8p5hQIO5>=mnB7QKkkalwiZ!BCT7nIv*wf-TdZ zQ~HEhnt}p$DSR%(Mx{IJ@HE4~U4&RNXdI2bz3;vWPKH{Hnjj_xTZz9o$NatnmZNu# zpN)CH&Uy00i7=)w!ih!Nz0*F+LpZzf_4(u2ytsb}zb2{D$82_X_Ih{@Adlk0s4Qdu zvm7U|_+?B?SfOTVKls32AUh4^7tOHI6rAo?qbeapH_*&eXjSC7(jgyk=A*y=;FSHJ zdknn}#8$O_&JmSt-RqyHqP)Knywh+(%2A#@Bx5m9xKgHHQ^a(B|4#42O2HGBR30c~ zGjtLnC5;eyI{`%vY>S*~2_V-tVHqMa=crQk1@r?BPN%;}N{EkY*|*Kfh(EQEnwm+; zAjqcxNVw%{i{lA4&&SDYPfk=+;3-y<1@L0yzwJfTM3G?Oa9lR8zjW8bYLq6glt&es zxXaQ=PgaFqh+*m#YFsQFo#U{4@?I9+tz1ehKB3;HVKJ0USu?EH-v5s0Rwg;u&XRtw zBt@1IGX3q%DP9H^rbl>JxA0i7F9hO{dO9Kal_!%qssY3I2xP*bBk<#IYNR1b*$P4LhJ`yD!;D)h8}*?4(^uZ+rf+9Po+S7aL^;|W+mkIx zer-K$yXN}rwSPvq*!`If3g8W&0!Sj-_7A}v8IzpuYsX_Md!WrZ$m26eII;HF z_?V#n&7@DoL0=VO63%d!bw_)gDEPjZr+VI!QCIa4gsocPlSI!6yT7%br$Q2t+;-Mg zzN_7`)S?1cN(B0JwIBJCkgIF?M-t?!Fkj60bv9cnNIxXLBIZ?Y2}y3!&W-NXjQ%F| zviz8W+><6Hv-L0^03w<1a7eta7az9a>p55L{5U$C=iV4{Ud%69-z{n<9Xjah{NbmC zkm6xA4}f0g<%L^$zlG6&p4Z;|MdWS(=*xfSkh=%qP5G7T-mNaksBs#c%Bxv3a}ZPh z(nGI_2{mTp5v1GYzn0;ic|7$fhu8OM!6mVWw*+gGbl7DLtO{FGrjMr2G-v-}EmDmX zy>X}FWpri<2~LRak0CX&Ig9W0gxk#Koced6$~A%3R+ZS_-pS-@&qJ|AxiTn|G9y*r ztmgvcG)*KsOMeO(N8sS)Wc(P?9v$z6qlT^%d&=y%C zA;Vj71sn74v)|%<&|G7XpU!`dZ>6~ zlqSK&k3B;33jn~VrEBxvS`4Xhuo$Q%*LYme3vmd0@zS!a8+XWCpF42 zRoG2pSJ%|!&xofIb_qMmTwjDq>}x|oGf7Otj6-T{sh;8n6mZ{7ERd01g>*u_KhtXUWKW}dk~bw_`s_aV5kkv z2cp}W@5_D;K?xAgR7~#x_oIh5=Yai*(ufa7Dkvcy<`2_CXkZw+ZXPZzddCdVSv2ws zt{rp=3~-qiFu9N*Koupcm9NyQ3$LsJGdLA6!YinZS|@L4-rJ+PoKO%x`7o2g_2d_; z7?8vG^Jy?1_QkZNdRLYRU8A&>r9Vm%zVZmb0kvS^c>?w+F@JLHs)WzSqS>1ggLHWH z-%}1g1VSFhKX<;5=L?F-LQ?edldXr4SEj{GEGT0sN4C|Yv_NiYC{6OeVVb8x`M zKx9PoycE1sgaI`=c$ioi&?=>Yb)iQFVmq46L~oRUan~5%X21Y3l3pl}14RND7aCLt z`FeGt0SCQTWN|yNRKv`=4Fc2<;VjMzWy^Drf3GKgS`$-0LlCNcB5~b_kX{e78s~c~ zd>7OYt85s3p+w>JM`%0(a##TnYelaOsD!~6BA{_-o)<=lKq7cc!huPF*oc)GqFsQ| zi%Zl>t2X`&(?Mz)z0f75Yjbn6#AW#|rXEdK!TWO{9*2>-fBzilDVkXd@V5o!+kQ#C zRV}RS?20p$EQG;fVNK|sC^^(@GzQ*kWBAMm%??EO&ySYty&A&i{ena0K^VY`FGg^w z7p%8R^9&7wzXcBwvTf`(&qI0~A*>;@ua%QD%Ks=<;Q?H&`5y zggrsW(+rEYDPHPYuV<>DS6L7u1S2=S+VkPxO zvXkuMu`m4l95K4E78Fc@wNjf6SCgw!N0++7$4ysfwR>g`W%M#Vf zpoeky_FrkRALhq1v|KX0A6=MSXmToZs>B$0zKT3v${Lv4X`s97iUond^S7@-Jpbm% zM;CHfZ-D){m;@1LrSo0BI0Vn0KSx&}#p=fJ8xoQOdN1CfZh2y!)v5zc$4mCR$A;%p4F4U-Ew1Zrg*HrkwECmc$u?My3) zNsPFig5z$nY?bM+Cvi3wb6AV(wU9|~yDF1nasaO&>!BWc%d4fqy$Z;CZoE#CLFiF9 zq)uhHXs3bdBCj63$P0{&bl^=8L*{~aUiiWhgo>t<8UE}U1&ZGxMt%cE^_pRqM8Vjl z@xr&~-$7o{j1l`574eRF zfMs{*om{22j0`r)al?DZa$>k?!&Zl^i_U^=B#=`2QA?G-Vyu z%v=w%H7Y3Hga*Yb>})1}EfyG@#A9EiTuCHMA}PTkXZ6?pen?=dcz?wRLCSq$}G1rELn{ zjxt1hUDKNWlp+OxInKyKCbj!>Ph{+e0~4O$ z_wcu~fl~GJcr3h(I|8~co8Wuq$xcmAUw+U@kH|Yb0|^*lKyRS~XTW_4V3gDH7$dw_ zN`r{?KL05c=z}t4r8NNK0+c{Ep#e>Uf^0Ed&JB}>XtMaSx0jd1U3h;TQVZcN5-?@e zj8f<@bU_?@wcH)idj@c9veDD;LGvY5|~6TW-%e&{|R#O zz?0K|x30rvZNvBP6~7sC)k~R7?P1~;T^5W3t)l%wW2%k@ln!WP7Zftb2C3f`Id02A7;W%j&*EggBe&vpOKYc ziq7lBLjL_R9j;gA!&I*O^ejCGyAV$p$`pYhU4MWTqbYhb3fQWqP$CqSl_7zF4=?ls zbyM{*T(uJf6#SfA?Rr%rZ+zBr}Cdk(maSsgO)%PGxFTrj%JFGbvLDMTSU4g+iI9 zijp~LFoec)-PZo?z4q_?&i>=P&RMVZYOlSP;rTw#{oMCwxUTmnHWuYG;Ph@^If5Sy zQ)bi;qV@Fu^J@c`9#L2l@9?~ct6&d;H$1a_xjQMqy$SHjTU+GK+!*UHd|Q7qF+Sqx zZ~Vs>BwLX2#b-IxZ`1ZovmUq3iApPySl zyXEBDM^YCSm4mD)IUWxJBIZqeO*v5?^X0lO%G|5O+pEIKIK^Jz#%c|{pBip3r#h*8 z9PV2OKtpc!>%KnMu5hq9Xka{oODt{{e{9^i(dA7897~X}nOeSo0BBQaOpF19y&;XX z6m0UaBgRGLhQig@UhC1CnIz}Mbcp2L4p;8FJrv{o5nxc!)zL8&U`s7vjSBeXU3d|| z6nqR9fm}aK7{$dSpM#J5=aSR~>)$@#TcAyUj0-`ll}9=@cgw7VgWuNgbl7{N4J;S= zD+XDF9Y?#Y>4w*de#~5My zc+i$I_J=|D0u}p9s=Q)Nts^fw6Y$_W5o8gIS5H^2sulZyF!pj(kJwNTCGRDL*s|?_ zDgnhOYp-JzDwg1hFtV}f!()y}dZ`NKhbk0v@q+8eAv97TTz`)W>_t?e2%v^k&N5>( zq)#O2#@VCM*}Ci@|MZh()>R4NC;T;HR1W%Gmuyqh@7d*~-FJ#2`n$wICaP0Nv6yP- zM~dnGnx}ie_CBRoEITvZyZ38yrw~V&w5>mOIt{O+_YAqs=LOR`iKvzk#t=u5I1tK4 zViY9QD%cy%^GXgnIJ_Gjy?`(R2#Wh|(goQNak~m_^_%RLLn|d~dko4QM+a}FpL>jg zH<&JWq~-39Kj4n2BR)OLKXUMELo*JsUTAbQHf{Ip=xnT7cr&mqUI(+VwZrtId_of^ zI^bYCxEu&1#2mD41Z_8a4ToA;A+iO>DGytwnWb=f4x_+pVNzy#bVOY2S^ja>48%dnvH2CO|gdQ^a6TRhTCGI z3O8=%0qO7l!+mY$rh zKF|(Y8$PF&1TaM9+hYhk6d{T|Vsu#V&c-qD>XAnW%VpjyJrgUd9#Y&g!Iygk3;mr` zVCW|Y23&q5kU@4qAcn{*fmFSfWujY_78AkqP`HmxjPuT%kL<&(%grA!_)HWm=mcH~ z;^VX%;5g|j-+D^$%e2_LGLZnK&4Crs)LK1(r?%A?DAkny3GieHuwN1QP$m#i84aO}>LLk*ECsYJf2 zILeNe7Q{RZc()v{-GquAI*rD?%3-a94@Kf+$dE`Y1(-`?3ya)0^Eh!x0QnEB7Zk;& zLrUNuQUNJKB1b?)%wO(GhNvEw2LPOi^rHW-xqXn=uL4ttatx@wlV{El)D|jd4cFAq zbPk-MAbKsZx>RcagdUk&dGq{@YU$EwIto4=H zRih>ir*q$8d1EB^J7u^{8ho8i<<`8k)kuatPp=sNHYG1@w83Qk@#oNMrHfMN`bL^$ z5~%!}gFd{aanz#dhR3taUmI~Z>fRtf@2d*iCrXDFC5M&)2H9y-QCh0&=H`|((wwq^ zz2~+qs;o$-`_og%Z zA-71Le#lKB4M0#m2&W`b(Vp7I|CcZBdRMj6NEmw_{%U1enuLL=i13<;ql9U!Ed4sa2mzHES6Hz5;ZSr{bwBWxqR z+Q_J=%P>Qu$8;SoQIe{`Ex7-^r_I?pbA(v*SeOQ|UlI+>mUBP(Y3NyM5YfuaYHYdn zIFODaq~XU8fEX#7kaBB}&6&dO(t2k5BBwrvUv%AM@uOv>U+Lt!!!NSN2MXL$4CA)g z(p@+zedGC$yZJ|_RTb%3tOMxf9mnE$sQaGRQD1srYd9TSWT_Vud(G$hcGJVjIjv{o zuMMuJx@c5fEZRc5T7PfAz2t^!4o>rnL5J<~OnG?Lto^;$Z`AtXN0F_xEScfSqgsgSq`vXMIu)wRra=GH3Oh z9(LpmqdK^vi;|VqutqgN`2MlwoUS??%4Jtvj-m&~zwQttF+dTiF(lC1i2HXmr(@B9 z(I_1pEJlzL**g-<+ELWF8WXYR3r@SJ?N%taQ}iZvzOMKUDw_j3jFbRUF32{|%ncbu zrn~^BrFlJH)1h^w){IREWVV32ckiytrCj%%2%eklSuHB6S*dRgdM+UWk}fWhqkR81 z#fcpdI}xeR8hL9XV0lg9 z{<*6MxOBzpG>;yXsVTSYTb#MuNy8Rrmu1l->zGg=*w;-);@GAy2KwpRJ%uDd5WO!n-#eg%95jeBS2 zd6+b^AAy|GB~SR=yISG=kKjZgS8s5xv;0z3tR_enY0hF zy;w|@z2C5yefjwzxcl||{HndQvodCJb2AaAz%EJfVPsQ@gnjuRervO7cS3l8ft3#S z-4LXO8E|;J9aNIed{ZWfZY0TMi8;>6$;o^#RmNDL+3ujSKn?zfqvB%k*DD_XCGau1 zT_C=mzN*No-`OssSMXkZ+P=MCO7F*QZ@n?-xpz6F7G53PuQT^MCfs&5Yvg{QT36-R zCski^X9q8qp1Aah6XGYUv&NZHgc)88$?AspsN3XE@AUwnl9ra%Ztg=@qQ=>(R~sX6 z{yOfjPqK+UUHuN*+9BXt)<(ifo8bpUyxQHf_=U&mYqFgm1QjNFNjn4@$4 zAg#>)z1@}jG-mhaCYl6i_8hJ%lf0XiPao|LXx821UC55@s? zfp$WqvS`WMySj#<&@lvAMm?4~;U^Bk9AZA|F;Kk;fb2*@5*Oc&BCA~4Csa9V5M7uK zewE0UI$=i^c7e?UL4#jEm#+}HJoIp zYvOy>x=GJqW`N6>Cog=Py0krR;COpBGqkJ4;8#Jui{dB;fi9j~7-S$02)ry$ozu?} z)*qU3?_+ms7)z~&xEV%L!U}@r<<8N!8l}GgVz9G;gyz9Ed=aoIWW%huUa$(JW*(xfoCvT0k$OUa zwi=@4zW1gkM>AlKw#W4VZ&2`R1DzEuFQMndWb_5P3Ok6MPa;i#BZ{KMan)#>?p#`C z2&ZcFd|j)GE7LFdG^*$rhQZCC^_A&9^Yx{lPdA6dJOnB0B&#N<-7yS_s{Dq|=!{ zCH!y|NDIsJC^ba)910uz{l7!ZC!U#^X)@s7u9RZ0LjM}*%;czmQHiJD2Y z(54`?3s%d?v5NS8p#n*j)H6s*-ppvug;EZAcI4}!tlES+Nn3^Y5nVZanKTr*F7NpNIfrx1yzp?bMI<5xRs?>D&=f<5N%je(vREM&61UNY zIQQq*iJeL7=@34g!~AdScxr#CZ(_^SkZNWq5y?|9%T?mvWND_3HnRCvud^@Pe@ zGHb>TN4OAK+1a(=nzfI(&TJ|?W@oI6<_khDkj-ilG6q&xS3UZ|L&KcUbVfn_>1?_J zg`zRhv;63B?;g*n+`ZBtu4I2aH+NWZx_Ria&{QMeq6R%=9-KR8KrtMNsp7QeF z-QU)--{E@Vhkc))KUz-1W+T~d5=F~vb{iSlGPt3Li^a6itdH$&6%`yG2@|J5GQoo!^ zl29;P_&D^&td26*e6nt@u;sRzS0+v?V-0ysXFqH+7fD$7nN*`Yn)t=CoQhkKf)tI26XuZ}_xVxs7jSKOE$8`3g)k zJj{H&bb^-|0{(O-JV@_k39u+W7g`WsuDye$QJ|OG!yJF8CwPtCt}3`t#a>(&^r1@7 zqgXWKN|>%#^W^6+^bpSRtV;%HSz`fPRVOiOovo!@0c8RnxbCaIlqr>w1SH*>c$ z)$b|I=#lV!SDulzjHP{udqOR-CLqlqSG8kw#*(3kqU)P}<00+VD36#`Ti15qBlV$k zr|xq#v$~(0qRxNov*q*I>`>j^M_gF1`E<2&YVC>KFLi=>XK_H5cI|Gd299u}Ie7)M z!*rHaXO2?pzRRg-jOy#yiFbEwVKX59SQnN8I*OR+Vb#_Q+bBQwa9+J8m?f_Coe9m? zIeuJ~(iPXVBV&%A88G)$^)tNP-TA+sA_yjAmUrO5yyD&`;~7b3o^VF0kN@YBW#W#* zH+FEjALbuprcwXamLr%`yu$E_Tl)O9{KSGg;Z^D8ig%<=7fty*SJ4vh7D{}^9N5t2 z_<8H`w;i7X;>=#DhoGAk6{Uwt1A4lmqlqi?^=P#;aV6N0aAxLNYgH?w?+o%PWMwFZ znQq%{XsC%(VN<`Xu>dquPeRKiFUKi75j`sDx9Lp^^SJSgq3U@v^Ftx|wT5MQ8siVw zC=^tA3ofTF6|j=fi}T%0s?8($`O#r-|nz2?w8n(xm4ZV#R7@r z<-%T;eSyz^{Rwy!)Fj{3`SAPXkMro35zZkz?t+`w7@wM$oRqo$G0vcJzifE&7?}O9 zMA;CpfUlvn z8<{a`Cn$TA^tkvbdv;k|H)|pbpQcn;f*YG3vfsGU&d$#Ke0Jjp>!z~T@Q=La52Eb+ zvq=52|7^jeH0rQ#x7eERlyd*wtZC*gnYL49$Ly9iwL1$CTM`TL$P7^?g6)q2@lyo< z(q>`*gFWiL=a4?iBXLuG=9Gr06*uE=0yx8yzrxsed6=%nmODJB%kJpd+z>i?;aXjq z_{x@xyLD%)XZ(0he$IFJc5~ILaR0aWA72m3y#nPkN>MbBv{9Du$mr8EBVQedFc+al zo-bYb(YMh^!kW{!ij1|S8@w*};DoIi<+$bZO30k8ty8`3pP4VRx`xB-*6<>HOl3tY zjE!1jL%&W`As7!XM$lKz{^5T z?WMRDX%b0~C?x!C1#7Xear=_k*3{h-;sgV~>QMo&`tV(EVtMqmz z(8MJ2+7K``d`Yp#c=-qC@-2sZ!~g}|Ii=>-5q*%OwSYoUwMB$;9XMANn; z4fk$Ytk|t6u=rGd=s;5$kWnV&_uYxeVmA|$GtuJOJUE^A;2J#KB@TFgYN=^yo5qwU znr(Zy6c_#fXvtGPFK(zi^ZSL*0SP=CO3^{BxowsE2IMnt-;QD5`g4!mFXW~@-`|1X zbIDK~f|EUDD+>q?)~THAC_1nirKbB&xBSiSOwS8+vPquV{YEbH)&9HB$O?A~Lf{r| z?%yAj8L~>>$E9G>jkH7J3@3~BP`*3@Bn@Sd5Hy3l^m9;o8pKWR$QMFLNI++VAF>AZ zW>}-!gNy~y)Q7LCrmd~`>5|kF4{!Sm9kbVw-@I!}`FQ%7xBi}!W;3|D#gw>vE*GKy zL26Y{ziQ*5TuM&P7plM*gfRRb*8_ogt^TvaYwe5zVo5aR9z*zk?Dx-8q=tb9ZhpMB z5mP4+vwC7^0p3Mirx4brE@~eLv9J673iX)?-j{6ug_(6#Eb~d{rc>4r)APyh11Dz| zsvlDoZyg{TCxN{9m7Crg&;kpCm)N%C`XVJDP9Y%{SiY>QPjccVX{72$gB%J9(^3DQ z3rYmyPLfyD#n63gfMyb)rezd7p?d%_k^&h?G$7PH9}~llR&V{MU1nq0uKZMcnfsGp zi9#;6Y(^e`Esz4=PW>L3eKVFX8I23~%Mw@+;A~~H zpy(r`Dv|zxLqKtTXTycBD_dLJ8QY#)GQRR$R>8Ht_@^E1%}lJp467R`VW2{G359w* zAd&?^i+xd)k?5r2Wi5lC$+p9m*=4yeBK7)$%YOvom5D-!Bpm?bXe9 zfMzDaC2-d3eHQUc2y#@Tsb)q00Ld$Nr!Oa7 zi-7JBvCJx^)$Cyc;bSp(nfHit5vdA$hZj{=-05oi`tQAU=7G$mrrge0g^NC4o8M}uV(z<-8Ui(i?qXhjQ6ZK_+R)SE88s{}!X`;&-34gKz5c1KZ@0{8CNh6~@Xm^2Yb z!YhMYHAK|Y(tnaEqe|3vO#}3rQCf7@8C6k8guh28e_v&aSFr`H3>OVfhU+0Z6Q}OZ zF!R(JI0-NBFb|HoNo6Q6>^P#chVSZx=IOkVzn+>rE1P{3f*j4qg$r z2rmZSpF>7jn{n~qc1?y~E%>>7#v(<~Of9!H9+o0&kJ)!_2Tj}_QYQ&XSuP3&#W=*I z)2>|%@yZ(G6;t29g$2Juu19mXwbK`SXZaB5=4Zu)B5^#Kr8Ta!IL0Y2*6&rK9Aqlc zpXerYHjJOF#SI&1_jsMyuElZbnyH1;7xMUL-)Il>HGYL?gWbe?yZS>zT8k9^n!Qin z%{bp2RZ(rtUqzap*VfM4?!k1VG{`1`48?;&iZx^-=h`|43OcJR>DEOBx5U*p#43qB zPdaspS<}l&(J%7vy zEb)AHOkHd9HkuZv;1!Ec-pxS^`D|y-dpRbJE8%4qvJQ=8?vWRjm*ewvt#s^$vG32e zd6_59L6;p46gOun4ke4?R$U|$6@xNX#OIl@=alCht5A_sIePAf(DS6^n@c|li}q^L zMe3x0&X3etPJHj2v?qewSm){l`Bs&#Uw_`C*i*E7jfr9Y^%dP;_dmDz@bS+#Ne>oA>M<5}mzTuw`WXC!(qxFFZ8s;#4X-$6&QZR2f7Pw-Tr{%6y-epmb>C5yJG+I6 zGfw1RtO{MTo@GG+SV!WH+$$n~XEArj{m9#RN;3QD*_78QwJrTj$b$zbo#2xlb(0p{2Fto1j$* z%@y8sTE=oydB#R6O+?0T9}KXacHrFAzpXZJa|{P(df;CQ=CD6$Rm>C@2j}qU-B!v! z-xM^ZHewlnU?>NL!WfY(VupBqe19;?j$NDS&x@K#p;hSiVBm>E$2O zx_PhxNsJb&ssxzro_WO=AlAz0G7=E57ii(qkJPDQ#y%-(zGIv?&4+e*@qQ9_9voX= zOrdO8!4+3z%EnvG#>J@~mcy{Ei$^C%Ev9Kgbf=FL`6HZ)6E zqJE{7{})MN$)#i@%lqWNB?g)xy@&AG)S&Z1?6?$7A0K6i!+?{!-F(#Eo(f~s01_&( zCR0|;BeDJcRG%9B?8;6tY?7++SCZPevXD^~Nx4pQ=HaJX8tcpNA~TWiuK(hFrafcu zNYn+#0vEh?WwC$bE&LrNM}(Njhz3VS$N((jTtc)+>gBC~oe}|R)5-GYjGk+8005JM z`OK`(Tx}SlE_JveX5Cz4Y~*-)hG*YM2G++~-YnW;xOCY4Si9oU&l4gP?6c>ikLn*`0LX`bVgK-~X}joIi@&`s}3(XQVdWOYjoe{8EZK3hke3ku7^l-8Sx5YQ+G;K6ExGQG%IbPlmryrym9v1w{DC2e}eD+Lfm=m zB2vYgW?X@4#gm8^Jh8rqMtSmm(yCymnHL$e8>{@$yC~l3;^LYml-~BdUl3-eJfrs@ zSgGv(I~OS$odc>pECYo745w71Ioyy>;$-%CI}sV0Z{;joQsGG7$eR;=;#!D%ai1 z&c?GA+n*x2s4Xf z2Rbq@07Yi50EZ8K{{@CXH6=gwNH=(%0%#N)_uGke5p&6STQe`^Qkre3cU{%41CR?{ zbgk??J4kihb`Py!^ls{?j3QGkNNlH}ZUHZ8BG5|_m6BTakAfyfo;`m2_-gQN0BcwA zZavWO8T8;pW!L3+3EHk}kv!@h0$tb&%tn(`+hq4nMLv8DN`EE66r_p|Qx6oC%GZ!J)ZU<9;h6UWIz^ z;^OQw+O)VXHO9QuCG}x!5c9lnre@A}=a(;cS`|!Q<$<=8-(^qP;6LF$0pRXjidA?L z(%Z0c?vUa_U9TDH-_oZJ#|2*C!KOcG-=rCw&-S;IO1I_a8)^ahry5ggz8b1Liy`@e z7ozgaI1k<1pBa0f2M!Yfmpy-h0+$d+n!09Nhk2hJ`)ZS4GTB+#+aO5IO0!TYMUI30 znPDqwSf#iAiy4FiX6eWG?=UaQ^!)Vc;IKNE3!hx|BEUYFEH+TXD1FwQ7a)b>sA(58QlH*xFv38mO=SFR~H^`lU!NJ$t#2RarKYTTW0N z3?6PiNZ#aOrS+Yr$J0KHA7R}sNiXR8Z}-QHi^StnH9CP7dO;I)pS9h- z`EjwEX2l&=b|seni@k)4p9tD1|L%J%Zn?RtAE zi`Ru;hK5yUq0;inElwKN1OK8<1z;smCs}Li`l;>On&kTd-vFna*);zO!*@gNntd5< zIX5I&X8Bz@x&OX;a^;b{ARo&0NjsfdDCojJ!AB<;{>juPir>Xd^6y_ceo8cf`Cd|D zpsDfhs$F_FMI~fxC1P$owBU5PXZZvu3y@SqyJE+4>sv(r{rkl6eQ}A4hfeRW+SM0>y zvH7>Bdls&Zw!DTTiC?BcO%;f(2H*xaN30gJ^ z+$6stD-tUwjDH|XuI=md#&-($6kFFulV#Q!UgLnKEZGssegs^--;b#ZV6Nr79}yuQ z*be*~kBP!)nz>U1+;(jp9rI~=T3S(zRkEHN)WZWYOv7i1P0dan_|}jvd^mc2Da7;8rLvVI|w_<1V>^Su3B|KrL{F76m`^*SYPFWuR#$K zk*u>mW<$vhB(TMAMOPNi0yYBIpirP7hJNzoeE$jJ3X1vx?ktg(lHNGD*rWi(Yq2FI zhjr;Ul}a*FfBHuZbf|MREOGh6ZCnwp-F+rFgkpBkBpRl_)eI(gaeOu<^)Gq0J*c>i z%EcBeXeX1f=IJjCh^9q-gY-5b_~}R-`yX+RzH#Y9=PiXY5bSL=UG=o9&#r(k@M+GW zTUfMEQG1^d8G6&-U~zUM69V&ULCc!Y+amOTjBRNZJ$-FrHYFuBl_hB>JwWVnKKKK5 zBbPU0Fd2|GB5Y4PWGhe_p5oI3^9-a`wswWmHpxIYl&YhoWZfer88R*D)Jv%!a zo<`6wQfQIgvq-4{*93?eT+AE))Zp0NL6IFNOo$g@?4YS9_WG?tVL`gG+9?-!@y1x* zcyNnnr+^Db*D~J^DakO+kU2%f9~Bj46t)Q7exBbO3Y{j$_zeh%2K0=gq_1z`PF@^+Q+%ykP5b}Pit99Rfz8mK?H!WWWqv- z?YTt+*_@ODhL(uYFMV|Rvx4$;N0iJMdTe(%eNh{uWmG2_$ za|$VHlbC>o&w5uS?)}2lq0n^KM2?661vb5) zm778Vl_HO@YsFGc@R#zl^e~a(#~pi_#q#-=7^r3#)v!tlWyjoQZ~+GH?x5SUjBNn0s*KZG|HP2lItaU)(iqN(t60cnj4}N6?Hf{M zquw2I%>rdxJ)w8$BO!MH^$-_>xJGd4tMF@-{{pf8za%`9+6=iN?WE#^zl^iH{yMav z6A6Lq5{Jn#fMUI-Dj-Y2{z+bUi31$8WE?R}5-6ObiPb>NsW(Qkh+)y`461Q}<&01_ z2&f93gHn&zyzo)Nb0h0ZP9j3ofTAVkmHYn@a(HVJ@jm|gppwSGi#JPxl>HIfKk4Y; zBJ*9K$L+ptYx`aUetk4T>LHlMJkO%(WKl+oGB+M#DrFJc$bq&PX;2Ak_=SSVY&O%k z^clN$pRU?+5?dIWMHf+0U)%B2K?$>X;rV&Ke11QMcYOT#Q4tz0czAb?l%eyCOct59 zZIaVJQ|jeVDVoWUq2)_Fc349x0odD_(pQk1*!JMOD5F}@GUK@pSG&vq`Rq`6C@dm! zTXv{#U4+e(}A?|IMHhA~$dgy=|KvR}hc(kT-&fD%+*uo~U{IJiMY6b0EKy0jO z+11@z z9l+NJc8HpoNZ%0pVCV}OoJR=&VM+OLF9%fB#6gU44uvKD#!cE^XaAOzGC^HNMC9o6 zxJ@4T>m}#{Q(Flr7I6uQRnWCT$p9LQzJ*1i*B4&^K%P2G3GPK3Ap}&!ofy%G-IPqU zMJ5WBs>RBi*Hth<8%nubzK`;EeE7cU&TrkHLY!YWYnqkmQjcu~jRBj&_Vz{UQ%kRm z**q!dF8#ZMOE`O7-M@3*_2g#Z!MuSo01;AwHM$C~mkzAqZGeRkS7lWZ&f2*@F>uTi zff173^Kj+BCU9d?+IZzK#))H{!UsSGN_YDDhWZfkBoX|P`4*_)faIlH#^xN7A3`){ zI6A>8cR(k#swjHz@3#ZL8qCc9_*49ajpet>Z`Gz=JZoUH;?a3sMg0)|XfLjx#aZ>U zuTuX!draq2=A^8;V@E=_#r_FV0gr+Z-NQHZMZ?9|=yPmXmjx}1u!M@RhMd8`!7XoF zc-r^Rd$IRii<$cEIWYF($CkHa4Mhqqn-ta6qX2;F?C!=HPlMJUG4t~(*cAAb`oFjq z!$6sxpKtO%bq-c8{Kp08Gv9sqknf{hvRW{T9(uzNtHQl%Oqa!=Aa@@3Q6sYh~Ed-KU#*XWF6j92_0Z@rm@$&dVcXHy|w!uqv!a&p#4_DrHtSiGB0iLczc) zb|~{G0g&{O!L}G5(|AEltYrP^2f~Ez>E#uK*DE74)A+!FLLb?TQy5em*VWTQ0IS#+ zFN$x=&CN}0y9pg8E)FYUrj!CoV@7@cksY1Z9W@cy8!Ln*-WK@Uhl}D=2@o({O1y8{= z1!?T2AqcDIEne8WxG+Lw@dS!)r7>@@*ClnEe6W8jZ`hDoQnH+=4lxzhA-0k;$-U@y zFdh>0@CTubAbae!v)?RX;_G!gt@}-@HI+&fq;=PJUJ(%m)Le!ggKI^x!`7iH>e7C@ zynHWa-S+V+{po)6{B3KiDD;A&w}GjzSSu%&R&nZu&b&O)ofynPjaF*|?6`Lzi>?gcK66#& zKhuqB5*}(&tcc}@)vaOXGPvTy6-tNYNV{9BK7T;zOt>HnOcqib5uX= z6#3sxPP(^$nw*S)`u$lG$Zam}?oXaRrG?$}DYT4zx@8=$dL_~O%~Q(WjzH0QEe?1oJwn^ z@CO_?VcL1k&PNxgsL==bhr8R@uRN0y9}%bWGnzyEM8nv zOvc6w%Q%_m^aYv~nZ)mZ`u3kY)3iCI!JaKk_L`1vHCyN?UADhm*H8`?6(#X&+k}r- zI!1r*M1aB!kvH(M3W5+uG5@5TX*zoJl=IBS=D@3U-l3sARBe8u%T}G2W8x=!uh?n7 zuPaF~hu52*`TyZRY@WvKuhnbT=uhxtw_kq;TYViX3kx~H;k{jJCe0HR6hy2@xM^!w zPKmU+DC0AScGbsx*F|bP9IElMclqN$bbHinp>HmTiL6|Jj7G74V!s&vvQ$2(YhjzwQG-Kgt&_F zsmVB9Op>H%o;a}qOX$YlTZpKliTMu)uQ0eZT3CUtF3Ln+bL;lQaD-r$F>NkS4MfhK zhgM8G+k_mI$h8uBfIzUrnfs4}2tTZ?t4rcG93PvBFc<{`U=Y}Gvg@OqoRgD7q2R5o zCl)wTGrl5@OQUFddMbhVVQgs`hJ7a2Oxowfi6PJ;E$$zegCBKpaFEPe0UY$%>kb!X zN&r5#_7(9{i?s$fBK23patlx~BL;OL+%(&_uW9mrRh8(r<&8~EBpT!6;{!Zk2!TfY z%8fXR8I~RjOm;V_ssu6I6|rwx@c=?iEMeHx`2__ZLvHRl+AOd{OnCN8apHAbTVQxN zlbN~sjr@EjoVsdlTh3T>Io#Kb}%seL z*JCiLJ%;WL&!Wi#zn_buYib(Hp&bhC+Ti>5V)_OKBN}JEedI)|V7WGR-^h$tRG>7@i6ZSAMX z)>H#WXZ5w+UCU!+Qbg+3Ug2DvbN=)oI$O_YGzvuw|udkDjBd;(1Sb?^ooeFyO| z3=15u_V~w-+?1y%J`K;!&!^km_IDxKn19d-*-%3z?NEbnZIg3EABD5_4X>~;8)WR` zbEhkfGTUlk8@KreiX;xp5hc>jz?taT?NM4*HV9o1E+Y1=JwenC4~wg*sSTe)2!Q}K z(RT&=5Vk<5=2J?^7L6#X)vXM#OaQzAI7Qm}`+q(A`f0_=m3rtUK{9Lf3ZuJ>zOhnb zr+zFhQUzG{L21xjmrxOAsAN^hdqv!Ggl-C@jU?F}S7kHFx&@~2w^7q1q29d-12psTN!@w@7J~+RStDb zr4=*mQO5$v@5EcAE3t12stLIOk}sYnH}w*ew>u-zG61oo9JFya0F z{TsZ2M5Xe|AE1pt!;EE)%Ne);dqsXSLkD{RFIHvi7p#y+0&C*QE@{5@)yye?s7L8 zW5I+8chS+&@g61$51jkqQCeQ^fM~;M3~Q={o|kN^usmQ#62)-Z%F0SY(ZCd8V^h;$ z1TjKa#0LSlc*3=NOCT}&A}Ao%ElbP3t*>5Uxa*~qln9cPK=4ME|68SFo}OA~&#J>@ zL5L%;-`JzTiHHcJj_>!qCY!>(eSFtWe~^kb$aV6D5fpLYL6~cZSriZ@xK0kXk9`?@ zLZo9jV7@f3Z)sNBb^N%hmzNj$WjHmkF_O5>s<8Vb;T(61F7qC^M$}_C3>gQCmr@e3 jh~mF;lmDOp@JmEz&)J%|-k(ww{IyMQmu``^UC{pm?Jdri literal 0 HcmV?d00001 diff --git a/baselines/fedavgm/_static/fedavgm_vs_fedavg_rounds=1000_fmnist.png b/baselines/fedavgm/_static/fedavgm_vs_fedavg_rounds=1000_fmnist.png new file mode 100644 index 0000000000000000000000000000000000000000..313c8299336fb9c543e44dd3771953fe90a4307c GIT binary patch literal 38840 zcmcG$byStz+bz85mXeSb1*Jhj5NQDsK{^B^ML?vakycs>1(Xnw?(S|7B}G7~El7iO z*LU5%zxRxHeB+#PzJET(cpe?P-Fw|@T`{jY=h`8v5ATr>(h;IiD3bdMvg#-l7Bvcm z5r~fqe=>iMrWby>?Ifq`q+$2e$<^eAIZD~Y$^N;W({n3R78mmuj#hTIH+hA51-Mx( zot*3)Mfvz_{?`k5?Os^$jT7Pr!J81+D?D;Up-4@T|1h#8v#e044{Y~kr8V6WS0`O{ zHFTKqHtZ<&VtGPuB;tosR_W%5=7( z_W$?0&^LZK~cYd#_7{`Z>fZdzS3v!IYf@RwjHUnU zJv-rzBxvsjT?hyX3DI+xV?%ob{``5uAn7gE-!xiB3M+dq`l}E{9>csn-}|bp ztV}(=uC7jddFyD`dU4Tmox;-C*w?Hp{IaMh-QM1w;`l9_U)We!Sm_xVbG3HQpL1&$ z=mpp{9?H+oHo9$MzNHhAQCFu3BxMeV5B`0)y-@8ii;KKQz^pUHW$o9&N>NkvUV^AI zr;?Hqzt@q|+lc$JvU6Clmz+Y3cU)xC9bIcr4`IX0 z{m!&|5z$-bPoJ9pov6t+Y!D?TCN?%V4~XT_pV|1`#%V4uFQ2MkWy>!h;P>_|N#p6B zzCwn2hFHzpl4_4#PEnUt{F^s#e*E|`<#C;hh|gKQdeW-xV~f5-zvN`*=g*%TKY#w_ z?_}K-Ufzdm+qd}nr6QQb$!KY_qZ{3~pK4~SVl*~3y8pdnJ;L4I-fsLOSL+!wpT3)$ z$W;o0_**u)rM=(2DXn18kdrqwAt944&DG-a+!?&5Tl$2+ zu*th%P#{mcfG|fhcj!k*aBy}uax99QVfcC4za zt5GOiQl>ZYLe{23>Gwq(W?rj%b8&Goh&m|<_&#EPZ*sUbr=XOTHc`e08@~?@ z7m@{>7FD?+&ZefOEGKJ)M4gw@T^iuYUbImoLC&R9*bdtl!*sg~|H;#*GmsF#3*haY|dh?d{AbMN>NFc7^`=`i8@TNxp(hg`LiFn&0Y|; zZ!2xapG`b7v9JhQ87)Gh#O-j4O}ohOEd~~D^T-JGw9mOXwzO&;Hx!7(e-UN{)x*=% zGn%Q$sHrpmQ5kx15#P(pOIKgNt+NxCfPet@PIL8+-IUnXt5;KCJsTSv&92j4LG;45 z*$i#X&8S(M|Z_O|!eRHBFSG;>m45Tin8 zud=hVii+xOx^2$L6?QSf<3jMhg7seE=O;To+SPbTHwq!m0+lIGr%0R2r!tg^%M@}Y zI5bp7SC=-H&*%a@J^ioRXEP=Bz-tcSDxpxn&<`SPAD ztYfU)>V4-N^a%_GGih19O0Q!V;!8KMAAI1|g6f)a`SN9HwxWy-wm=g4K$Et>%eQY| zf|Ye9-gex8D5J@_02|7G;|4tH$DT^4e!(54BBG+tH0RUrzmLv*+G{)5t$coVy4gl5 ziB#T=ot@!DD2(6ys;Y!X@*kxQdau85aF7llq#~oD(n@~;xp!g;T}qEFN zi?h^fquXw{p`l^0*yOIR zp59E;`B~fE+PF~Cj1ZsX$p+R1W{EdQq5jI(^g2BJ0zB)hogF9cN2Tey;ZYxQweoWF zb3TV432ZZ7`E2eKT2isqcXp@sDMk|dTd$Lnm}F#R$jHc0(C3sAZWRpdvaqIZy@kGS z^@FeJ1T#d|AD-kg4-W~nljf)^_cW=4kC*d`ie1+Yp`73^EiGm1Rd9MA{KoU#pLFd| zkAND0hQ{>~+8#Bp0e+kkE*~G?^NH#bRUU$jFXQ9%P;k}aD1?QLJU4OPym>>$!0<-= zoo!3ch;tgfmMPuH{<(82Egbic9-FC!8@cqp0Ti4;GSO zS=M}`CPVlw3W+6!t3xhA!PzM0=f)-8rylUPC;QVrzX}yUKbg*--_yXz1O2Oh}o$$`? zu7KNyF=XDAD_5`pQFS6k8e04;oEu+EZTFKKPJbsZAq^e=0?*po`ebk14oO?U6KV|} zyPo?#M>`xyVS}{f)-6VT`SJw`9sBKhWu)H&+)_MrtP6~t-wl{jf!^uc&>+4%S`_gL zkMvpnIfPURR63+gBDiX#$SC6BC(+-~+bw^VX0T2}D;Owzd}p}uG5yzAp0@8NfLfQu#UEEYExkiY=~mf(s?1BjS~Xh10uSC{7{TmwqK0A- z^E@aX`^uY-N6Lf`l~Sfaui~{ue^So#CfD^O^!mEdv)!My&Url+lDBSY7Z(?k9qq3h zq0h`Z-;fiNkOV+}a1BA-;zp4nrCEV1+}iSyXVHiB)qeL!f-m=;e-^ZRR4Uo#;NbA= zw7+i&B7}3j0S<|%u&_|3>}hZG79e7^Y~7O7$d4bh6^>y{z(%#Uyuw3v?3P#Y! zq>qh_McUK%Oyv@vbMF{WD=RA`ou3V7V?df}=IfG+d+bQLxm7Dp0o-7O2N@V0y*JzW z)?sNVgW`7oJJV;+2p>FnP{241=;1BBa3{2RGJ5*FavOLNvMAU&6Dz9_@6)5-U*2C+ zbgyq{_zfK!@I{*Y_B_9!;Onxon}BKxUgSfUc|$9J>Ehx7vw#$Yaj{7oZid+HuKt7L zW3IExrqjKIRA;NPVv|r?wx3=D(~M?OWemln*h<|u2tJtFyIN#_a`IAfncY#Ulnbq$NNNL z`)m(lSpW$DqLJ$erP8X@$b9d*M3c450 zFqqxv?$x(GQ3KEBv2z_JK^!wPGhJO>G^Dbeff%W4D71YZwyA1zn?5(Y>m!xN@$nkgXAjxPKF-O>2}QGABO)SNT&zFc zo~&~v1pw8tIG85zq?;^Xzv=>vobsxwWD;iwX8*tx46otC!7$v1hQ@|(0`BcBQlsS^+)dU-il#MaoD#jwF6fUofY6S#v1 zvE0&tQX#Ri;OVH{yo(GQdUj9p^YgbDvp>YghYk-9V}AXha5Y=?vy9(iInQyjXW!qO z+Y9|&-{Ni#W~nfHo*ga(hlRBjHJycy6zI<`|HxBxH)`|@f$IGkfb`;YlO)8G-J44{ zaUtMnB|Jq?XsD)0=YYr+fMbR@H-pfk6?M8!K|!Hu%R5@29|Qy38#*CE$QR_vkR`*U zXEL0vh8zeg1@ibXDM55S3f?2lZn_a^lu-OOx8@!o`3%vlek>HNn8CTuhV=fAA5o~% z(o*!wN*ICwvy#32qoT-B?o#jJ8G(XmhVh<;&#)bN3g~^Nu$<6R+lgvFC~VR?IyAsO z_`iQo+1l2&*)Mr+e0FjOhlKZcsv)d5L3nl{*(b1~LI`!J>}WfDD)v;r+>*TAQ~;U& zVOTMRT%i@QXW=!dj$B{af`8xK(UH;Iq~RU9z3l?USI}}mDnnoQJ-hM-OdYc@|HIRC zJ5P6aE3|iZrb6U$j>ijH`D5W-$WHHvCO+-8&jiHZ0&zxSisVEQhE-Dp9^(1tCmIZIC7#yQ4-_aHd>6LroDZusEJ2@E0Q9Y@zQH!k3sl zdlm|aAPt%vKuzD67%CKczBd8Es=#mXTl6tJF*i5aTm4yFR;IzZ0vU$1i{+7mFgQ|a zBO~Vh_31945~8=}_RiUaGcz+G{rQI78&)8)pg0Ap7o)nT(|qfX->0Qrv7M?9$Fuw731ddDaB^}^pOz(b5P4`q@tl4!rA7DnGkhi+pictm?=qH_ z*&CCt(|~XKKHcHkf+ndDsc^OR(~`QJo`***fI*J2 zU%v=1UAhGL#=ohl2_-dF?U2T5BrhL0kpJjr3p|Q~qN2JeGw@QE0fQ_Z96ATk-FOl_ zv{LZpWqVuO-E4!J(v9UGTyno)M*VR{|Fj8rT_TY9Qff9m%A_ZngYy^;DL0+Rpjs8C z1;t4lz<@k};5jo0TF3iev;3K+%7y0$J;L`tf*#hd2u&-dP@w5#D-0!74ZK!T(F0S{ zRFu@g&UcmVZ)xE(C-s-0of}Z%NR#cYPxGP3{Qdp6CWMbq_aW1ApMO1LfH{}|9tbEV z>R(lMd=SryW5A67pyGorD+g0_Yopiku;_aI)XI8~_xW7D4(x`pogLrK>Q9ZR*i)aS z$^CnK(;^E(SL?n33nixq+l!Jy{vo6k)e_*f;p%a1@8s=O>w7LVYSLt{fngvrEzzYF zb1Pn_&;6KssQlEcYimNh z?-j;-Y{CLGIXeJd3>4@K17&r6h)X*^7&sX;pjs&)R-r=`n{{0n&eKtuqnAa6+Wlao zAOKPjb%<_%{TdseVatQ>4EE_UAUNryU#| zEUv2R^25R#NWV{+A#Dg9kyhy0MP$wd6xTdC3B_d97pfhQCCLkiPglmPiiU)ve@>f9 zx@|}mY$pg=$C@T55-!IJ!dq70=3Szr%hie)0<4p}#j|%U+0!4OJZBQ10LmM2#9l`` zYDb;DFd{+fbd{KOMaQ2wJ2`E1&=_4$T3K599mdz#{87Nn{&34&(B{`gybBD-+yE%K zs5>h!FRZBXh!9#pR|w^`fs6+<9hFJqFXsevZABE(+q5ewT*wU=9czgR$o8i3Sr zb8~~?`F_Ey$nD#|VeeEGU%I0A;QclCz8*3G;%R|X9;|nF#P|0FRGA7;2Wf6lM+Kmn zX7GymE?dYvUiM}x14cl}%J@a!sDMRdVyV#`<)#jA9S83zU@kwc2)E(l;)bprt`Vg) zpYHDNs%0or#Yk5rd+l8U`eUP4csj*#p$}(7ixSyhLCeUQ8mM1T<5Tx4_=|2F@2+II z83F?a&?rScLot-v85KHBn2fN$-ZN)4pAs5{z)w7 zn2B%kG;zC3HM*Zf&5EdK7e*XNq``b6o^gDO% z(9|ZuNd`qksQ4WMrIPvLl}4I8@bSxW@ctCp0l9bM{h}! zZ6#h_n6!O*;lc%es9%a-5BKeUm2)sjdcT2=k#~aCcevl5bA5e05kle#a}}UY&SUL7 zZRNG*iLo(}>HT-W1##*W=D%YiC-*}@#`;tP=lVDlMwFf1*jB9{AF;D!#81NQM_>Qm z+JyIH{ZSzzX$OeCCOl2y!2^xw{dPvEfT`)BA&Ry@lD1U2unmCN2GgVgS~V}sW326g zO0$R2#FXdz#Dqb{*RYzJ+o;{5y4wxMxBJh;=69Pm(JikO?%!9dnaZA>+~;TUd0V=| z2-7_batj`ld5_96)r|+M?2HgR*#X4=_R@h9X!>l}TG#8;PwIPGMzP`S~RSIr{YW`7-e&*0s?RZxLf7R43<%bXZ zoRijF?_YgqdN|S`((n>%B2|tEun7t#wYIIfuNd9>4S+h%PWPt`jAj$3S3gA09No(( z*zc|b@<7XOE2~Gwl837aTY$(eS{r}vo4!^w!UIBpT3p9r z9B0o$eRW?}@vemX*41#vJAsh?SFT^j2l~1L$obdsVI1L9jEn`^?(_5W9WK3jx+Scv zSFUIcO?ySq?0EMbK2ohxVzuF_@(Tco24U5xsWVG?1O+cbb>AfI{_&%t+28H%XRhPnt<94&JBXFnAb=?%g>|Yx?o%~Drb9#iviE-f1uG;># zt|mf_t~Z^BLwTX{I1PkW>h9rzh+~}VZA3J@&E4JCr}tlGD8?9@nY~|W&02o=E^6{9 z{Vg9useatlW2w!_5gD5v@9DYSsq)H7Z37EV&Fsh()5rkvd8&?pQZBb1 zy?{)grRHCYjax94k|h`s;>6Sa{kW24%_E>gdh%@o=9ZR106B8hYFK-5Nk)^qdF&x<7jPLv-|O=(m(00YW3gLpd!E6F?kk z=4vP^5_+GXxk5N&6Le?Y55Dz^8o8|l%6cw|Tkpjiz zQ$WcGrIK*VhA`h^k3`O)kb4b{M!CAWo&W)^I31cS;(!S)io0_54|`RA0Si7tPeyP3 z92-jk&iil%*+u$ZheMnQ<3KaR7_5uM!=ixfQX_qM~N?7=!8i#rZcUfjzX9vS&Nf} z1qC0bbsShGVjjUK;=Yy6s>~S^`)lU)G7v|s0IN$7Qe=M%~BMAY2oG_`ws*4Fl!^X2ZZK(!*DAL9hFO@-PCz&mIQr1JFHx zFDwLw(eR~$S_);p1*CxuIOiE)J$QBw>sKH>LnTsg|0vL>fsN>OY?@&*L*yA$q>2fC zH{U8uq_`|beii98H8r0#vK}5xpPvQ6jMNGx1Q9k6a0XM~XGC}w66%15F0mfvMaJi8 z@8befAK<1DYzKqB3<&YX&MVpmbuJ_zkJP39u!4~q*}K3CjG;h~wSlJqN*^+T0hlft zK^eg&01PpPvx4^yC@B%BudfdkV=`;;$3X!5AT$YL`dh?&Mvd*DvjG`qnBK1<4S@fj z>UF$U75wInrqW2HzR@l+7n*;KMN~6{c(<^y5cAl<2lU(pfB+>$-mVN)1ZJvNg@wGe zE^G1DK2Fei<>r=`LlL1E^jvtIwx0VjNXnrcLE9RM$yLA%e75?Y35bRkWWY3CcYV&7 z-11PQJfWf+PvUOb(1EBK2>&5o0fmPTJE2Db<{*VdD@ECX!7Vd)i z_~Id=vIAsH12X6U^dE3xgu=;xhGz~)Dt`jY#3v+_0ZkSO0?x*hY>ljNb<|CeNE5}} zDL&nCl?SGooSr@mPLBkFQ9Ir;rPz%7>}Z7#Fq1AhKfFwR(`TlvUqYjNHD9*`6RLd& zU}q3aNTeh_HoC4)A(E1*nVA~G!;KYVz|pJ6r(;E`a7_WY1wf1oj7BSr?IkcNmfKA; zF^W2={`sD+#9F|)W|gh@V6WWUT|7z5ofY4o#h~Ve93(dtI5V(89QX{ir4{rBt)7=> zCp8F9_Zcv%KNMV5fIBALk(Xt_a=?PW#c_>>TbBiTq!x_uG(h$9t&UgmjV>Li5`XWV zPpmA3Ad!Lm3jhUx5CWeHU_=1Gv$@(_XP|!vprEz1w#oo8O9s&R-O6wnkP@u`t&qq8 zRz?R3JPiVWKE?|-Y1#a^-Q=H{@Z-s|rkO~i$As#`F;!*eDn;?z6F~T1E(eVX@FDe^ zw{MZpAb_q!1%#ny&k3~4*FXjk0Axof{s2(6vKg+hv&%zFeKfYQ;aOZ-B4A=-;{47{ zU2eJw0|vIRu&^{#d8_!&`pg8aeQ{`l)SrPW@&|hcWB@PJ8lA8q!>L zq?c~U!squnkHZ0DGcz+IlXr1tCAEAwL(WIs?a9%3pe1DX|4vdRCqqmTf^|W&XB)s4 z5s+CF?WQF|!>Q{Jg}LyTy_mN3G9-?7wOm670SyoWJ#!IQdTeoV@fIKlftLx`0PT`9 z(~^e=z!;hQ8`{gdj~)>KnJWXW=*1_xdf54zhZY_yBRq&?3tU)~v5CoLVg?b_;P7zm z9!gr;>*cnSdc`Frr_?~6X(^?Ur-2}%u`(Wlkg~!`SuH0WMRgZme*lEAwURoBwydHgYe@I)}oZG ztZT#hk2HXtFiV2)&~BQLpt<%ikqlU_>&OZ4^76I<_}1!y21YG$x}(w3(jtveI}8jA zufh011W~VbEYz)HsG-|*?AlW}S~q%*C?QAH< z;wGzOC80}AMEao41!I4Hvt#pF#_`1qSk@gF&9l~WqaQ#5A;iv*u=YP)7NCP1Q;I${ zfUDNdPWd&CDfO6k&5`b?E96vE8E%LNLbg8!`K-me>=fA210yXB_4Na=I;6e$dSLGaS$iU9?ZI>1zvbdt617Txoovl$@)I=Zxf|5p4`xnR+eqrHz zrlzcQGDx%)x;077g^cKN-?(uDVRsG=4(bcu=n_L|1iqDb)C2%n({-bMI%_x%@o?cl%-SXd}c92w(3 ze_j?86dd&eH(SZ`pEsbo(SfoEN=YZMsh49E9eep8aDc%}1CxNgtE)~rDJ?$=+I9dk zuYwW)4IOM)#6W_5D{F*NsvT@6L=Kt%%$v-O`_Htq5fd2HVp&jySSUh3ZV5F3dkzZ= z%jgLd!VW;sntFl&0x}LJfs}_fBMY#DjEU)jj?SpT*LdjICnqNt`^&N-uKarj9+!a9 z%E~}lR%s=rH|ieXr2#_Fak2|04JiBaFwY<)2RM8{-T+BKj%^)GmBn_`5)5}-@|$t2 z0H~o*FtYaFjDWsm`s~p|fd|NP)0M3K0)THq^aC{7ST0_P{jR+|KQx`#^+xD|*_t^N z0CEsQ(er32E7ZoH#dxCHVXVTMCY(u}(86K?bLGX_FCml^D2+s}H~wwGT%vZJ%;Di7 z(*G@ov&o@jWBT~`q^GAFgLsPa1<5%LV1jP7{bdvyA|FuUYbg0Jm1}x53y4($b}5|Ub}GIp6pBF@KfiqxNNex^Foku4W8;83k^$@E z7=~5?mT5?(wH$1N=Dd|NW7z(sg^V-QkkY;e3}*jNT<;*~N)GI$2xqFU+pgyiV?$u7DnJN>RVkJb=lckD`5YF1R2<3~|J{ zPpdm{e2>ico2I*cg+kQ9jKoi>owi)eAav4a(CD{DS@D_PW{ob&Z!;D5whl zt->6IMKr`4N35Wt$;aP)SSQ$E-$fZRC%hLJ>l?*p{DjT!3%;cKMx$0dhAAW388b2# zRquUxOZUD?v?K+qV19(`)XZmFmUr8(kFVge!SXeq@)utZn;PX%$%xxZu*on+AKqzc zz2n!m^1+vdYhv@NhaDz1mwTkK?&Yra+n7PC(`9x&9qYtA@9tyxxE@Na8FW1g{!@0B zWU}bUHS#H@Rmj$M_a*!K06^SyJ4-Ln%Mmm`lg+Cgd`2`zciYL zt}Kh~>nHb9LpxKf*Rw$0IMVU>!J+V8K!(K@J1TqQA^(OgI`WF{z)jvHyVVYg!VqT zjBYkh%dcNY466gx;c}N_y6cC3Y9!@)TA$6&SN12zPd_2Rj7*BYLhF{p!2O3x`Nt-dKOhntH>3MOW3mwK+91^i<~aV)*;fRav{***9Wm^YUuFCHO@~-A$9c z#3kWJe$p;n{+~OJC%xwGu7_WF{{>4~u31frhF4DY9qLIv=dThuQ}=yyuXam zq@Dd<*A9=*RuY&;ru~9@NN}k&lZv@CA8j7wduSx_78q7wFHyrr$USrYtoOuxUv#-v~Ly%bH#Ze{ifuL`BgS6 zxgYaejeolHR-zOnBFAc+twut#pHLk%`plv)8ZTT&Q|a?U6YSx{r8bw_i_Su6+LaQ1 z+=^cG(n22_Vpo^PoY$97=?*G1V%C;NQc}Ru)j~{+)nco?Sd;a>=cm4?jl;^CNGH{i zUO#TB^NVWnz3X-OMq66LcX}KZ(QlPe3y~Oo=b^B^0^4n`M3&yx9V731FFda*<#U;w zy;dWOX-|D3Ykpr^7&fy_YV5~f#+4KZ5=Oa=yrl)fyZPVnnTWx%I6jMZ<%#@0_!cQj6gn^xKsB7`+GFNmL#j-8W` zIV+z{OkpjSU(BA#`0jF>NbT4A=Y%$nG1vZDzaK|FK zQ0$d{R7m99L0+(|Z$li0XwCSQTeqr5SS!@&w+wExKw#TgkjRqKvAGahVd zsTUs9rtgNx6QTphbe4keE8bO77d17^;227!{2rOarcuX4Engd|9iAv1^y!hygJ6v3 zN;{vk+U|E`zj1$|HYLt^6nNs^;j#5sE&OeTJ|8P*)qj6X=&?-Hq@Kgg8{{>oaw+>YQ2C}J? z-5Lxke2sx0!y0$-fDa{dBpw-iT_x{4uKs0oNriTvyzCq)77?e4$a2-2?#P}E?%&RZ z(l{u5OtP5BK;j&w*+NCIc+3aZ@(T~kIp6s%whb`2eKhX2f4Hz@jGpu#X@yM9=CEku zdWA1imOFW#;Uv13R+yxY!$g*ZE&o}I5k>bXP ze}g?yi*fHM*66J-bqWC|Smg;gf($)>)gZjte0d-yD5j;v9=N@w;ke{(TXkdbuAF0= z>1}Pb?kAWOh8B%WE^MSy7hCSB)4^_E`K#;^tJF~SK^I#$VexC7Q5@ZM5{ar_0W4U8 z#8uN?z2}Rkk(19{XRTP5wRsIz3<_EMu#TH>Ol^CT?+kx%jmVuSUNnbM2R>Od3A)Tx z!|R0X?+M(Z;x{vY+^r$fcG=GxOLuB#%etHU6vav&W^Bf4cUtW3y5Szq(mtd>IwOP2 z(y>)=+iG=EK)$2aJ4}%1z>dd$2k&rsk_>|c_iNp~OOL4g1;DP~@~?k0>E;{MP)ME~ zTuDt!Q!6qQ|7VE+I;qedl=Kq&-!cgA0)sVTN22C4d;``DOU+6bC6e2W>cJLMxqol<&g0@zybKH1mQa<4LGhb-PVKB6ZV`#IQqLWg zzF1p4^(BxYrZR8`JJd zpR!UGdXBpz)bh>iza(?ST-L}v=_+_)65J#&=vP+Zuz5=I&djf^%y%VWuAhXf`&HP{ zD-40U8FIDF*;CAzYkDNym9~?ifZ@!52i(}&0w;kaLaTv`6zm1Lf0D)AZ``t-2!{Q@ zs(AJTF&HA8CouA<;KFCQoR!mn771akdO{v!rUSpGyc^^b}k3nd(ky zNbn2ve;x}k@_$b`D3VxfEif+N0En8WV^2l6BL4&Oy{olMQZgFj9g(u=oWMXZ*ebe2Us*HVyQ zH6?dyH&tk7jad+-fZwtzw3{wo7yq!}zVCi^oPBPPBPPc*x1|++jpBFM+Yb)H^D&BH z(fG}EG}me9%>-R5WQcUE1syB=1ELIn8_HTy+b@x#7d=^KV@@pJ27VcBGTy(=%E?}D zl(HpjT_$7x5U`-m%uNg&-7W@g!I=A z(`ihH%gs}p&2uDD3mLwzEJ70RkdqGv%{SM)_y1RDGmhPW57Z zSzcsDiVF6M33UA3@342~m`D)O_5DMU!_wdb1CJewChX}~p9`_ye&V5eoHfDlZ7Zp8 zEQgK%=%T#|iM{7#vP0&LI^h8~?2SRLQ2L}hffw>M8sNsM{{ zM^XH8-xdZhJkU0fbQ;YDH$1m^*lN6E*uox9Ri|`W?nPrjmCoh zc1BR^Ky9s%K2K;|V>tH9t_p7Z)7X0IfN~+B=B%WcjT{R$lvMk+W+F*MSKP*e2n|!j z!5`wpditREuCs9GG_clk*)(TSbncm1>WOg7B4%$F#$R#2yElFH$Ne6hzZ@6OokF=p zAgg}`s|N~I@c9@S5Jih41@x#xsTGKpH}t~1(_SRR#Hu;!Y;ZNp6ogJhy!>T;3tkI% z*ATxYxGFGECMG5jg;L(Vu0F--F~~4 z@qa&}D?W}V8B92B=shfW<_pAEa6N`qfZdE6bXbtke7#46auti{Vo-%IxW~sJ}$abj-;}waIYP>0HL*iyTTS zH663?#}V}&4_;be1BV6q71k*})f!~@bNkG~*#wx@&7d?@2{9q^n~Ffo-_HW(3rDg|&C}=fD5Gv|c>DcB@a;r6y?FRU0t6XivkKv+C4agIU*C~^^slzoEPiI;8OIDK*VetrnE}aSR4}KA|fjyMM z5&8Nr7OI|1f9CiN##qb`;^y%}HKA3dfeEcf)#MLhRNqLPD>O}5IqxG~3K?TIWu#>f zYhAGq_V1uOuu#np^!c8beexCW|7L%U^S+g|dY%hF2W=4 zQ$ot-5;~6Z)xJ($G!?K&Wc6VFpBrhl`u8$=`hN4*psj_zg1NAjpTn9@$gwd&io$2b%K#b2U_oPvfD@^iUB~mN* zeH%j*UE-9a6r6?7v^y-_cF&d9d)e75*#}@QcL~!iT)c22WKl7 zN?`hDVw&Ln>F?$IQg^S$8EebRt2?^K{`5X(U!Y`H-0@~LU6d~j4IB|+Zw2(~2?XP7 zbz52$ivIMy3f}rhS^X!fHMv{;|4gP)Fnvv~a|2lXNJqzdfAo_2$^HRO--J)NZbW$Vv8%F_h|-aQ`YVaB$po&CRtyoZJ_>W%K%{;9vzx zN)Nzcr)H?Bb04lHmiE@t@mpRa zE7fWAaFxL>u3=V9!x}IlH(oL@CO;CTSY;fg;8Tyb2%x~m&~TY*U$eMxIrE&r3poBy zAY6GFi*RXPc8JAe<)pl!?)zR6(?SPTsw9<4TE2y)r-X7MA=3KuHjCikNyjBrbK@q@ zZ^JM3BGyWph5s_u1ujQ=BrjdUl5q4Du)v_PJM5Y4qUq@}nh@8`W?j*)loD3KLp75g zuiGcWr{y5qG8ojHiM#RBHf7(Vc^Jd(5FdXP>-k^4^T&6mk49btR$O4NZyp>NGcDwX z^#vokkk@rt)|7{?#uIGrI@e1skhw@T?sbH)Nj;7KC8t$r{FAk%MGV3rQ~nr+Wbru}8quKOr3Rah)Ia@Wm(u+qRo-Jgn` zHI>v)8!HS5@7Df)Qt0@dAcCpFJPUqN)?2sspYDV8LMiSB4*Zoo+;tf#e2l!<6t2dF zihbaHETgFzJ>zgte3EPlzR8##y4x?()f?e9QuIU!1-aELWo6@TNvR@Kb2;nA*5%?H znqu=5PxhSWK%j42w0EpK$xefjSFXpV89GywwL^sxd{W_lc?pE>t2{ zyuez5LV^D?JUpBhzN8{kDK7P$4~P=@1O(Fd_F%|Puxo4qgkyiSvt$sliCk)!ZuF{l zU1yTD)Arxm+e0u0+#w}G@Qjq~dd!Yab4W+?p_VT<(SQK9o)4C8Eh^|K>VE&!ywHF6 zdg)9}T%+^%Ze$<)`5GPzmIWOkU0{J#H83Kgf4nESV`Fo(1+Hqo zn9PEU4Aq{8rKxqpfWPd~%^2W8Ixw;T>haJxNrW3YL~W=A$CNG)dHW5EmX5D)J)Ei8yU_8BHyPK&DGR2}<|n z(PdUjL8G|@NO!yf9BL4zBNa1{yA_I6U{(V0zVMhBge-6=-Gv$Gb6y@17$@9bzT|Qa z=&=H~fr5fUGf41SJwK}*t{`?wxU4n<*I-)Vdoi{O)Wkba3{V#GuzCMGt}@X{5zuL_#pYo*K+(k)mr+$YDEI6v9OT$zFE`>1NT zsF>sq_7sz+PcwH|SUv36x-KT-p$xm5>^HNYhyUH>!a&V4qf^~MT~qvD>RPrSnLhjb z%OWDQU@C&48_XHO9X(pH3Rh7nTPk3Jbq29i4hC|f{I7@u3I&?r@Slqi`bO+7I13oQ zFb~$rW05ax+8omj_>@^rMUnBwgA~*wc!3+yfx%ip^R?yl9+d0J|IVwFbbx$ifa&Vc@cms)F|<=JJ=cZh~3`nCK)c0v8-39mrjm9&tDunAi9{5A0*imf?y3 z48VlN)jv>W^KN>w};E zb;?1!-+hKGmRomLbTu(WxiKds+!JDB@h1-GtLvG9YcdpK^|XT02l-PBeVV&Vcb1iR zlKzc9a(XO$H*Zoxbde&v4mBbbtV77ofCrDz(Q$hw-y*IJ0J#_c6Ls|FqeEX?$~*4D z93@xx>-~N6ubuxMFbYF|av+pXb#kwjWwt-QXepxED5)ZhC|G~DmSYcrEVnXbo zFb^x{K3vB!ybfO{(bQCAvJj&n=up`{qNaSg=XT}HPpw1a^@MT1H&5K-b!oCEwIrz9 zYJ|E>2k;`0!hg|f!^OQ5PKO-$cA;NqP7FxVJ9~5&*p#Apj#Dao>madiHkUn?$q_#Z@itIrMATVThs-W9?{8xZnMb1uRc3rQ;NIB!&_kf40e zqv{kCzjC`>^P)^?F*g3Ah7Qw^n(fE%x{023lkw9JXV}7R7En#V`tG1xd)XNA%Tg)#=|p%MmJVzLl0kwqfN2P z?KoC`Up9T!(vmo^=$zC;)D-);!Y?gD?Ke?aBaXpOl)*G%`n8_#?V?h>54Seycx^Bo zqDI`$iDXHzmZwt$C^dR z;#QvLVEv?-e)IRi)C0nZ8WMvkSlU)4)!nNY&s?zSE?^mqW18MSGbf9=@o%Vcc>+Ae zEoX*rgK6I8sAm7u%hyNN3s;v>pJKbaOr7*eL~h|=bCA`Njtx^DbWn`y$`-do9w0pE}{+n zAIF&9ohmoQ#aA*HEM@=9j>Rgx2R zQjrpD1rO~P&fw}m1z48YJ3Ds(jXhZ7XaR1i8GsF;<=|(6FZ?M^U)IaqK-OG8XCP?z zP)C^9=C^?dUIa-@|8-{`GKB|`St0GPXwR`w>5RaC&~3t$9Il7xI>Qwo?2wSH%mw%| zF=M!;-JyR6*2x#QxMDohwI#RhP0$$BKKhJ1UdQ0MH3lPfkG_D%35KQ(2L26}RTSA6 z-bN7-gAw*s37!T~^8cr{H-W~o{rkO7WJqKlQc|MfLQynG$WWrmnB1mFp%jXYnahx= zTpGw!LNXOmA(bhG$WVxgC>fG6ru{wi{NMM#_wzh^ul25Xx2$`0S6tV1p678Kzv27+ z{CMb0BCLXlop%|;cv;40W=atR0tGq*FXD!9+19i5%kaQ8r&3Wj8fZeaF`AG;%uz&B%C=?hs;7wiZTe%8hB)KJ_Lb$xZB=GQTg zj|~}xU+yec4XF2>PGk^_Fv(o*?bh_j0)7+Pm2Pf2GisSpT1_L0CcQ4wb%`pGq<;gs z+5j%NZm36&ETDC?YcMIC9MX(06^qRNxoq^rA1h<|oTBnB&+US%eon<{@ErVYJ!O39 zYpJis?l^_pT&g=K7Quc>ij7C_^Tzl3rn7-&1wkeka6*>m;f!IBNqT*+5W2SQ%fu?QA#Hi>1#!6!_Hr5}!bzPs($Pu(ZXOxODEZCl=@# zqrnehTOg!by&Nd`5o0L)WP%5kprK zQ*@iz-)(+;-K5LQuHuKIyRVbO6=$hk3S!tDIW1IEGAq~{XI4L*3BvpAc1ovlr%~7ZhnZKl zNF7T0J>hG?_U44>#|{=>szZh2q$u0$x5W#5xQe~*4c%yDg^8b$f~_>)lrN=t$&!~< zl{Uu&m7XM4`e~y1CI^`~@n{nT8kPio`3;l*q&V!Fotv8@eMf*Wy3x^s98X+EW?!B+ z79+9fMxwv7jvgHwdjXzRF9X&Y4VB$KxAT$W&yU{Sshs?4Qx#r1vxWcv`1i{CGn& zAhGgmvvK(FSO7szCB(bk2&R$Do@{@}iBdU&U&EJTP4{0a&RdZrNI<{}^mZfH3iUZ5H-gV`P*?Uw6@_h7_nZNl1( zK>bDZ!Q-MMZm%Zp={Oi&@RGUoYS53hs`Jj$_EOQXBU1@C`RGNn!$OUXjU*xlgaAMd zRUHM1n#9#3Z^i4`4f)N@&22l1zp}?})W^0DdgFV-z=VA6QABARM%9%g-=zIS#7U?MEm} z%ewUMGZy~RQ(owB0G>@ej$$7rf@IZb6N3(`{K&n(NXui{b-R+fcy&hU%tdfpJnKj*)%51fLVS5-XP z5>$l>llAmv;cEt#D|@+>wlgs@HqqA1H|+Ryn5wo!~TN{&^UFAKO9+M1U`dE8uk>;vuEY8a-d~TcJ2UEgO&eITG}c= zdcG?W*Y6Kxg@tGo>>V^%s@`h${KA9C?}zo*800mMrMPRZY+&wCj#fdbG<$a4=?bj~ zhW&q}eTv*(9gn&De#x7cHc{Do*_D)~x~j?-r?)NR&V0H;fQR~DN}A8M^Xpz*EK2M_ zsbHYl*T4?*1+YlypZ5mN@~85^)=O<%1y2IXHWg5zMn^>*WHj^| z1g6h8*OKA<%+MBCFPNb?Er-7SJFEawV+7PgpdiQ{y8*KR5@N;5l?hN&8CzN)U>Oz)PY*Pn(&bcc_-&nA->~cVIkyzX%Nmg}Tk&+JhnU9=4 zoih&07h%1L$lDSiT2WCEfk@(3>3xF|e+LoRLQCp+=FAH$?=vz!xcTIN(D`$O!JVkB zeP_eW)Ya8zQ47$XLV`-*O1JOaVWjF3b`ae#7%xeHUy$_#>}i5oUxu4)hZV6l(dDo2 zy+E)X&CP~G&iAsiR^luAWjm> z378dfc+*rFl>A_yy}7lU3tDD0s26GN-CGW=_stWB;qj|LrN;8qsR$HrtYDrZ$0%g! z#N$Q;3Q!)0&cgMfR(R@zCEz`v(ws#t0Jd*6pkN>%j_-F$ce7h&+`9|<0F(exNW2EO zjmZp57s)6hpt)(p(GvagDA7tI>2v+3x9=^is9)L80BT#i|!8 zOco88AJ^{QJZKsHYRS)5oA8N8o9cyjd3@~>9Bwbr4(E;$kkQ9z_An7 zD*{)5EPj=wBr~cuOcXJn!shcF_m+Vol@2(H4n95a4RlgB);_tXFvQ(ulO;?Ua{nMq zuRwGr3j`S2?c*a|V!mE|qwt}X;YA2xYtyC|*hd6eO| z(dY<0IHAx-Y03|B3-NR+YVhh~1_lO*pA4l1$Z14~J=g;^A%`dbOE^o&iw9bB#9*Uj zfSjtLTnX_)`agx)jU2ns=3e)!)z^SLvxu9j|6xx_Ng)A&X5igycwEKTuV0J~yc*5| zSXX&KjS_w%%EOb~q!uljQFl#_38E|*n|2}+q<5A9QKU&|n26+%M<>CaNU*$(zA2~< zF0slJh7w%^h(Fi{zPF!#YB#ON4Hf0PxTTgyPY6kKm0r}`nQVKgeX)n4N!-2Mabtt% z$fwm?a@|%zkfYlouuawdhyLvF`H=-WL3LL?Z7Uy(4LujYPPLtq+1Nz?Sf^xB@B?<@ z(i3%`_8agX-L`)>>PWO3M+SxVHJ@52-_-ogG(0?N?EJLHd$ zWpvV=HmCSUcvWYi<`4E)<=G#Vs5cIDFX;>N>ZQ*MdyfAaS>h`_I5gtk7k!NJ{%fzX z;;D77Y@T+E73=&$mDP%8Y)h7AWIbbDmiEtT@o24E!&+;GE4y1ZH6NzgFggw%=AHA5 z_uZ@zdiu^Ce}W;v6F_6w{g6aZ@L~i5=2{UtCRl?!P^xd6<41}xqG0f)dEBDPkRr*R zd?#BW{GKdUK<_Gt+6$e=2XtpgUvPyMBX}r>1}!=vA$y?mJg{;-%4Rh|m43uT$zF$G z=r)VBx2V?-C(ga^-T7Vxj3gfq(9V^uvC(ef+6M*k-0U9R>fdo^qP;vnl-iuW6ttq3 z_jU2o{9eJ)^ebtye){G+b$^}z!knmo>asO^-7QTxD83}i25XF|ynLb|PJW-Ceag)K z(^VJ`gOGkEL2WhAoGFArHrwK91gt*Y7sbx|Mn-Z0i|2>J>_-%MH2``6ATHaW64!v_ z!UXrS4jdOwzBT*Hkr~RmelsTLE<`m1I2C{MCIar%>g^yd6MsIjeB$_pX4#LxoGInk zd8RhO_|zziMbqg)2+*Zcy8t0W8i?eQmOY=uym=D6s^zA8VD7a;#!lnP$Id#v#lDeX zEG{N7`!9+7DKAB7n&(dVrL1d*O%_-?wBE7wUy=BG-S$a0H}-dmxgUQwIT$ux=E&fv z=QZyaF`&lYMrdoPUoc?tzS-z5HZ?tMg?0@wNkJ;f0e5-RT|*tYxHMzwN`%sc*(Pt- zxBz`I<5I6}if0EXRb};lY>e=Vr0YF2mfnSMHqUj)1X?{ooY;g2b6HY{SU^&X!8M#j zH$fH>L^s&=V7bKqBsd)jb?wbQ}1MvG4-4bkX}3lH=L1ipG>!Y)=k@u*-xbY5um zai8<4x!E5F5RVab%siYMaCcw^Wz*Ll5jpS_y^_(d^sgKOz|2Q!D_ArI1qZWHcg|&V z=CQr=(N|`6c(gLMnsuS)_~k!dT&HZiJWnN7r^P6E9uKwN#JXQ$#t({}O2ixo0hJ-u zUxcHTI8L*N?7c_jhuND8P8=diNfhEj8>X>I!C4bg?Loi8UO$sg_c;Si$nuMfeH0a6T*pw&JC{c$!T$-?n>GNh-&rsbfMq~DcHgJisslWtpluG4|5h$#>>697G}5~ zNCU~<<|5V^X{I?iWL-XW;>0T>cynliJC+*llJV4+NVVy1y){u_CMqtj7l1?`bsel; z!$!1hLh&xMlu`#nqB(e zi*%!9PpNGuF7f0@!y<=67t>E(-s5*M%{Q*SPsJv6Q=AEoA0)+GLPA22?Y(Gd;6e1F zi8w0y)%o!O9`ItYevQUM1sl<1#G_}hd}2h%XW}*C5Audcv^u@kr2jV2mf%H~hvpC# z8tM-k555c4LxWyx9PwU%eosJftcKVC1z0xZ3&gH2<~_~>45mddJ3gpQrrqgKqC)O|YafZ9^}#9CvOM`f;B`U z)TG>h4!IXQDToCwEp+Fi1IxAW3lT>I&|m`9KQ3#6An@ zDdA~TzNiI1KKsQVc}MDhk=_AUK)UUvf97VCB7Xo8`VsQ(t=`k})JJTIC_G3aoiG__ z?KTTSH?kcIfYc`N8A*v0cVR=e2OivYmrf2{DhrGJ-dk~E*8P{4=^2kyy{vEG-VP9=b%RN}>f8i_>doHphSUI_96W@;}0FF3__!$bm=11&S^_1B-~E6B1{Alx)e-Y8NXrdA7;;BA#FUlli-VF~!zysSbMcrFstL)#0nj084?w{PlvF%A z-B=0-Sd2a_9XyWHE7fjU9GPnHy!N3W;$bEdjkDpsGOSkJ&RhYPS{1LEquZ* z&q`J7xMMKwx|fdI2Tq3L=hsV8H=#j|ukXDt`BHzVkU2F=Kwd0EdNrRz8bza4Pl&Hj4^b4bA>6Q77kSTYCWk9BE?Nx< z-3YA=J`>HhRGq71Su?mNPLP#boz9}<*`xs_@xzi&L8+L`N&&p~@HhHO66n7A+A^AL z7a)Dzx8jd;gA%Q(oHyx3&z1w7oa~)SBe_}$ISWWeipUa`fMMDL|c-wS5knfS)*>#29h|SE)as}qz7aWqP9UZo4|1yhX9iSQ(F7@Xu*F<)7Dz!#6TyG&^EEkfsWd#8Fs+D zBm{t%kfNfk1hiZ+_c&eDm>d|uK$3sB`E=(cNaF}eDrf~;=p8h8Q!dAr*qXx?YTA?H zZjkWEn{j-KPc}0>{?z9c2hPphE?+!^ostkF@H9@MbwT*$gxd$xgYmroebNs|U1Ea( z*;au}t4Q65BS{t%B*=e+kXm2R5GS9Tq6l{j2^m!UOsuS|1QtN~5+Wyd)%l;?-R*~% zgOIntgxrvKrZyZ;FD~3g-1THgY0w5)N96nlStbI$#ZvDOD-yae$@OrVA2oWMmXni1 zg-r#Tp^Sge^QbW@+2_w3F7{)+6U-}4R{*ExL+%tZJS#{5L*HkQeZ8D|>lV|A6N75! z(rO~L*7mZ+6Lkur3ixK2plu*2G?ubdYw0nul>e0K=|0+suFT0&VLy)u!FRuH&P4pU zwNVOfK#-|H_2;gutFxbYRZrhb6FU=4KmaeR3}O!IFw?z}Og;xj9>9Hst1rZ_roEiW z*BUe?8Z(fUpOL}?h)~F0g&uB2eY9ZTY?0!npAtoL5)6MjpdmA;3z76aBUN_m>g<|; zQ0G1nqs#F)5lk5&q9Z2Qm8k3;Dw+PqAem{ACXgmL%tLW%OBt~@m4gi5-P=oMaS`si zpvKEEctY%3?k&VxhlBdErFkpn{{ zd~m;pM%$4S_Pz}co#n1X_T=+4p9tZF~a{g@~w z(%1FaMG7zrQl2C&4D$Tvbh06_=+H>487V=~nV zC$`Sy9ev4Ld*E7daQ^2XM(cAoy(MbxlUrxB5KfCXh60S&jR-$s&?_1rlHZ2?`0g@~3)IyO9_WAQ}R2 z!dWrK;5FPRLMchT4`3vk*Ew#E@e80ssW^DR|1>)Kg+1@Qi|6%!CaLhC{gs9q-CM?1 z{lsu;a`Gd3evgLVg1v(vB{+nY;`+toeMafqC3mzPQkaasgd?3g% zBp`x6V*nC}`DuG_$ti)_QJTym)S?K}CP>dz>M$RD!M@JK3YGep8$F*sLDWGv+OnWG zQ}lDe4Utq87d+z8-$sNk@7(6(Shi%c3oYeyI25KO!yDoi z!O1QD#f;aDWfIOtbA$5T&l-2LAWQzA*f3-Mo}+X1A=q4zWuPUL6V z1y~<(GG4|Xs*tGAh)lf+Q3(^EW#ttWgyU|NXXKgAw+?0ugn7Y0f#H`pcU>fsb28a2 z_E3E+XoQH~3SQ;?7wDDa7|BOk9sE+v9v}&$dQWO+ZC#I# z=2*HRj8tL@g8~!c>I0++xe%cQbK+XaMT+ch+J8bZX<4yoKp2+=JS8^Iw)5Cs*OEY8 zQ-CaX0(-;5T|*BF(oC56JfgQfmq4z2CXhTJPB;B-isCxNr6`NpAJRh_ivEiR0%p|T zSyVe+*5R-4rvB9)$-J!*#kNU|}K9yXHnZnOs+U0v7bu17w`L}BiRT)h%-`y_46 zl}2&GyLo7DL(U$71?ItJv%k;f4!#oP4#V=NdxPUoX0&YFxOIx6??AQCzQEw9{`TDJ z(kUT~83a7MX6McLctRmB;NqZg8sN{^NP-7!aamWF*t&J=$a17C!AB&t88ZBcg33e+ z&+7$}Hau3VMGFt_(lCDf{~HoU;jRiUvM8Uoy=8r~mZ{H;V4qV;FJKwWHK96^h<6xS8BE=Myyg?$EmN_qIa)u z-))({Dy%Q8#AlB4nU;WZD4DZDt}W`x=J)9EwKWDNCoJfig0Sl!_n5HHxH_vLi^Rb! zbxd8+Yf=`V8-JYEq{EB>17>-LqKJ{?!{DLI8*W@p2(pOiXm)jhI+hdPg%OaNmU%!8 zkpde^>s9D(JED$mm4oq9(W}d?c#fUT=A_=8UCG$(^wK07YM7}cCre%A?WFTdVU`DB z;8A0C=4bxtg4jPjUm@pMxNE`5Xi2`@$*zMaH3g@ezS){f5@O6u^Cb~VeWD6UfEg$> zI5c514=wRWtv*PuKwk!q>Sd4l0)B8MoFK?AF(tf7(!s$3AY)oknBagKgHnZLpmcWZ z)%^R^J!0@cLG#b*vFs;{76+=XmKu`?Mm`d0b^X*srJivoRP8LhoE0IRjOZPgo}Bop zQ8;ZXAE_-wjCzn0F;WD{2(Tb8Btbx1%TeUn#$=UsEj7rV1%e7fyc(3K$?ulJjT^TK zMg^zeh6s|>EwCa8Iudu8pXs5Dv^4Txi;kbfdw7{g=B92#2Nk_ACvW)mDsmYH0#cnU zzXRykgKJQR3yx+t{}+@#o+AhkD{#go(6mXKacwB>iuh3VqxY;#D=3Ga%0<2Ny_A84`oA02tWT znq_tCw+rfB_tnIe3r>HRvE#!%0IhHvPC7Uc(k}LURy2?)3IQZZ=2z{yw6>upRx(~W z0@GE`sBB(RI=5yeec?N$`|EBzGO>H2J?=NpB(N_u?!BZ!vUWsuWC$K&5;shAe8f(} z$Nz0m+p>V^h5-ZhTE+1qdD;b5g;yG*N1NIY4kDySGA*QbNWZ;Mx%o&+4u~Ur-ksT@ zM}moI%K_WVkD%$IJ*#*EimmSL`F|;Kg3sIQeT#sO|MOAngTjiB4+Z*V-~NbN@>&ZK zW8<8G*V zXCwcv#ZJ$l8&Et24|uN;43jN+02OJiP~yk)-=yZBf!}vrEg>9u$BBQUymp`2z0tfl zQ4|}*X)Gf9w%}MFPrlxmCkQ4%L|}Q(Nw?zpr=gP!`_t&Vkr}>_#RE^tX&<)v?~wqM zO2c4ypY0*hJJX6qzoi#G$#H5>5>I*l#=|ntxlbD*)HBQ^f@-SB?YmX!)bHQF|1~`o zbz$_VG)zox?6SE7{uZils8f@2BO?0{@XM@Jwm^HSVtjqz)2l5f4$r#Da^!5fO*W2( zncbOw70~yP=M6=+PY7_+59KUJM;46KxD#_If&JJ1BoR8;cN%2wC@D`6bvoibG6hNU zJiOBeuAC>j1GNP&1Z(eAFS~y6Z+mC=5c8O}KVlMu#+P8nprY!P`u-gXQ5C-QVpJ42 zq{dQ>gydmbqv&`cq249(U% zh@d_~#YKk4wb>wRo>y$Sqk<8H@^r7eDFs}^ZiH=(Ffu4hn++-^ow7M|`=}OKz0nu) z1&zzAs(_Y+JAD5-QbNM2c{IEU0r2a%EnGxy1px;@h25CFH^82{|2qC2kj((ZNo`fk zzHnI0M;`E@HI2uhi*kt%8$>7R5ya{oaNKZ#th5ZB!GJ;qy$|oKdHDYnOy;;;j6nTjVWP~=G?;te?8?*m$J+_PQ z?~f;T(1GGLAR3gwu*>^ zZJPMhkaD8{RVG4g%c>wHe8f%$BEd3OUXmG$=*r@vqd;A5)ko8j;H6-ru&2MrTuIdU zXnhM8EmFs>n@}YQ$7DrC<${14M$9WLh=Pjf1&T3m(sfsnbQXe4 zLViYCNKpvi{vTJqDuOSMi!3GA7x@sLLy9KyyRaa52SD~&#I68{j8XH_8-Q4 z6gjT9Z30BG;o?Z9YjfVG8vTwC?dRE<(F4u7_vb&=9lQUIgZHczyPmcTKN-M>=`ko+ z6DR;^CPY<&UizUYdnq!lcBNn#5%JtkOM3x)25#a%Uv6&2ZE}yT^bl@+C?YtwG&FJX zh2#gjHHE+K$v2Y$hD;}O(73pM>jnk9vkfx(p{_ayR&4B6D4i-|sVvOQ9-=jP&MUBs z;UOn`?`kHqg{xiC(J6BgUzV5Cm~XlY#?UZ$EqQd6l{5N%k$G#?Mm_&iy<eptnwFid5B~{X3fGUx;J=g+yfE z&@G)Wnoz#YYo7DOm6OMWj55{G5E;}f6jvgW@E_4n$QGn~y$s^yU@c5XMP=|6B+wpT z7CuiQ%a?}?7Ee*L?L8V2G5jlPVHqP?DMTwBoSwV-Tskv)(nLbh`(Z1&Tw0m)Xj06b zERa(^@pXY}r~Na$aJtg`$Z>-Oqkp@ zR-5^}p^+Om=sWNw^@T%a7o* z#LvH(h`+D63$O{fqU(H=M2`Wf8yP72scT^dHszE+_>Ow+|+TuxAvA0K=~2Y zdsWr$=+%P%I%YBq=XE8Ref_Cdx*Kfk5lRb;b$pL*{&4c)VJ94CR&*OTPhG>ZI9r?yWy>p#f zrLDm1N18X{6f!SYRbvnLpD??gjf*bTq_PgTIUU@Q}O)hlJ>A zNB$L>md@M(-W2cpk*iY;(I^B41@*P3MAyQ!eE9HTH|UMB+lgHEuWM9Xuj;79)Vz%2 z4C1C~OKj3OTh-BI;2}ElEAtx5T_`e-eK?1kiIcpFxNu)TK$VQtV^KzvnorjQjN$Oe z%9>sS;r8tdG^1U@W5*hbnOW7MB?rSq~8t&LedI-qH2`7~_wo&IJO-M2b@OuIoGVJ?Z%<$r)O1uQ z4#tck0-z6%&w4MG)*+Hb)aqV+0<#JJN*f43h^?2h(K)I$sR(Dr)X z0&-sU5Hksibj_g8m^n+|IQP|jnOE%1GN^ppvPF1wljnEQM*o+h_B#)VS6-i~mpd^b z(-FC^60&O3>rY~CmI~D;MWkPwe}zWI-8!paVOl`BO)3e@ON952u@p9e3<3G|k|rJj zzWqu2aMvX*9y(RD>2s~9byC?b|55zXxWt?2;FUaKD+EJU@G$X4vsvY_@#YN-jfBdh zc&9F|7U8X-{Zy*P{l}AtNrxqu&-wUV*>e0wqZ9ormHPXE&sVL^9lzo>YTmW6XLM zH+3RGYe6ctd-)|wkvC5-YBeu|Q%J_ZK=O)_o+gfuUV)AW_z z?YL5qq1j2dWG=~8ja`vcc4;JPAt7o2 z?1>%AB&cMf^76Fw z1uo=UmFB-SRr*PRn#5(F7jx@*PCNdP;4Kkt)iox^?s?NtZN-0NfTNw61~G;P!g_Q($u$ld-^91U5y*oVIj3V`2LzB7KVk6jLt0AkSTJjR1>_hGc*>cpv!ne}lnJ4=J?=nt6&fO(E^uPXiU@btvI@eC!!bHn$lH^H+*A0k5}X#Egxk&PD`bd<2j#qF$>GSOJW6b|O)sK021X=@3`C3V% zu~$}BQoS;Q7<3Ge!MW@i(iVrBl29#B*QF>lH3@GzLD`#~mGvTHB=*Pu*k7K*3s(C; z61&n^AlXd@Y=3-2<^k-(Mhs)QtHl@gFCIh_%^HnVw2BkO)2m&P6Om?%S`ho>z^zDV zQB}y~QOXh9E$Itl9RVZZ2h}4^J;EiRf5IX4*T90Vyzi_(_9l&(xNkEHWPBoZ3~2cx z|7$*n$vVu#K2WZZD+qUhp*AKgFyV}4z|af`(X9dQAhjo4XjY8HMIjGwh&BXeHWM-o z)Ft{o?%~C);N_rwA;szza%pP{QF22W(iHRE=TG-gdweR=^2Q+@(?|x^%|p#01qy=o zk<|B*`wDzD2Dt&y`ox|`dsn}J*u&Dsh8b3kBED+msJyW2lMb`yTRS2{`qC(w>h<-G zfJ@wJ_t48vZ`Zuh&^4cVep@I%OLmGq?rG6PYs_YIPnU<8acOI(bnbGv^!;ktQ{@`yNxuE-h7tjEdd}^9Z|=t1gUE z>(-w|cADyQ=dqpUhdX-`QhGOJY-GPBo8x_?aZ#JmHLsvri;fu~BfHXHUr!#|azJxFvxVbhtN(rf;t+|E zBI|69)Atu)=p<_Dt%wr*qR%94ur8QCrpVw|`)#n^rnD{J8VA-0|~14!2b7 z-Av6&M12|`m%bhx^wLr3BYrLXoU4-=9KyyWmX_`{ytRL)dP_ndH8^QAj6_iZogHfsj2x4c*`!YAK?w) zn`~E}9$1A);5qp3a19#6B-z&1Mi$zadxSWV^!5uGzmozW3hpnlLH`>5aH62Ulka?J zpx3pMzhA}Io_bx|5#3jPK2j48EJ5p$XuP(z_KmIlS{m*0sex0rw!Y!v-$Upayi9s3 zTd@@u4!v({>%sn3{qp6`6DOoFfc$$;*ajj*-~2d+%%OFI&N8CtuDb7D7f89BeykZB2Fr4UJ>} z)o@ zyl^u#lpQ5GWHhYUv#vqkzv@pU=V7j{MPnc3RF%<_p11a#b#+x^W0-S@trlLoNJ>gZ zrULVt$ke1DH9r6dPo25{;6bup#=KPwNKO2T>}bJLp4>W!>SJrYAs1)t>jaOgj&I14 z7Ndb-_a3i^63&BNUjle72FLP_{Ze8Pez5BoqI0y<(e&Ox+@qLNvB>a%ynHUIp3t`< zO<1{o`%LuUkFl{R zXUr|REF&qb%-c34<{R)ZRv;6)od+dwXnOkEJfJyG&=q^f6+Bl4u3fi|2d$=-wP&=x z>ioilJ8}>-PNRn&0w;`dYUi`M_}1wwqa|-1%+(}R`K}qPZ~e`5y5wifw@n{CTduJSvQFqjez%nu(^ZB9;mhiDT<5jO+-tm)5|9-1| z0p@9`5D774Q2W!f&?3R-322P0q^Vq(+fICgtbzBggx# zx1qtZqSA_b1oFkW#||YWrAw9g@0*Se4r;BGl)Ab3a&~t1qy}4jk>$UwMpSR_%jFcN zW6AllfB&#NPLecHr_w^YcZ{$Fon4kc*f zKY`?zSdY-=M6vhR?nfE{Eo_e#C3U(FKkB!X74BVc{cak2Oz-+5??NLZcYApici6o7 z^SxvR*K(%bTg~>mhK7>omS`}E)EjfGfM5(=i6Pfb*Y>9cfV#2Qkdy1Rx$^7I$JYwy zH=`ZVF1dNas^7kSBP*z<$I9-trEF4BkuY}SjYgZt zN=iflYl)2={QC8vp&>J>3B+~=c>?KwV?OuaJr!x1AHud*#y{Vbkv#{)xL;j-k1sw> z%GpP{*->9lPnldDXqtj~m@#p{K|Cise`p%fUp$zj?FF|Z(g-LG*X><}} zVtpSpHI+VRSCXQak&9u|VGak94<;ogmD!$zUMyg}khr+Wja}7d6iLmb%oX~`mSk%O_) zhZ(V%U^~LuV@0ORfenkQKCTi21)Ujc)4uSL$KSk3s8|DM744RCt>>z#tqsyn^~bzX zbMK#q`2_`g&frK@p6cZz1`8YtC?6X?am~sv#Xv6LNPH1vLCXT)s_*o)huv?iDcN(w zri3ttBC;^Xy<@SzX>Mkwh*twLyV1JsBdW;<@kLPx%*bfIWm_FdfQ##7YinBuiGu8z z&wHT>c>&o5#_jC3u}RHWp=&c!q}|8Q9+W>jntG5WfB_>+{jjaYXiLhAaE6A4-bR7r z=fSo7)%EqeF`@=4L(EgXfNsyViEyOHM%Re}j-~-?jq5=A* zvC$vWiyo*0C$ZZ_#l#$yFT_|hW@hHK($X53N=7t=*za@(yL|q56YDqB*>F+_<(?Zc zM+bnSAVA<25@KDmW(@}4aA40OlsugcC4p!R?rF!4wi83#w6(RPu3QO#q~HklYszsxp4vJ<=MiJX!P zn0FXAodrG7*Ztf)oZMtu7o~Cj{G4_FGR3o6h{i)NTwt;LhGr_GZ+dz$J;4weA<7EC-sk+1?@;vjA{gioRP z0AYAtoii{^Y(U-ab|3i@$emx?w&Qc)4Hz04Mjjsc=>hGS#QE9pyX`o9iM6$%7>5&& zpx|m5PfyPAoN-G9A5UIBMiAh%jmbMr#V3YzT3Y1S#jLx(1eNz?d%8>XMJaI7rn z>o+)bh`3(S?`|}PrV5QET{AP8@C*YQs;zrok&XwByZmsR{NR0kik{he<}(NBfZ=9- zSx$m@~on0r`L~t>CN^qEMSuElA)m?Jj`xA zzm^xB>0eBobTCjY2|p0GxpwVRYS+xf*cc;q#eNN1jmtWV_h~R)Fv+^0mYiG7*#=#= zyou3Tepz7-it_ZFSuWPz*%>i9>WC;Vt^ILYf=A75@9}cz*B|%y_(4NW@+;!&N63ep zz_VnD-Ptd6$=`*)SPQP{5o-6R(M#mX$5c^3&eUC9H$yG?IP0{7gC>mEB1@MxynDAc z94b5p5-YH?Z?scE1O~%Jwsp{Vdv+wCY=#qF*T|^Fq5aM=^r@N-Vb`@)eOXs`!93?8Hhc(! zBX8Ug!M!14YB7jO1N$ivv7SuxOxCEdTzi!6>zGXr^yw9IfI5{e9VqDgosNqnmWKoqJ974TW$2exMZCejP! zZA5Pj-F9?LjKl}G^={Y%75ZF4LUD$jJ zjcX>N9)>WOP_>brTf=#^>kO%ol9D_g_IjlsuU8dbfsPDF4k**bF&pN8zxkzdGLBu+<6+SQ3cvPf?$^jxv-JNTAG-n` literal 0 HcmV?d00001 diff --git a/baselines/fedavgm/conf-colab.sh b/baselines/fedavgm/conf-colab.sh new file mode 100644 index 000000000000..822fe2f273e1 --- /dev/null +++ b/baselines/fedavgm/conf-colab.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# Shellscript to configure the environment on the Google Colab terminal + +# fix issue with ctypes on Colab instance +apt-get update +apt-get install -y libffi-dev + +# Install pyenv +curl https://pyenv.run | bash +export PYENV_ROOT="$HOME/.pyenv" +command -v pyenv >/dev/null || export PATH="$PYENV_ROOT/bin:$PATH" +eval "$(pyenv init -)" + +# this version is specific to the FedAvgM baseline +pyenv install 3.10.6 +pyenv global 3.10.6 + +# install Poetry +curl -sSL https://install.python-poetry.org | python3 - +export PATH="/root/.local/bin:$PATH" + +# install and set environment with Poetry +poetry install +poetry shell diff --git a/baselines/fedavgm/fedavgm/__init__.py b/baselines/fedavgm/fedavgm/__init__.py new file mode 100644 index 000000000000..a5e567b59135 --- /dev/null +++ b/baselines/fedavgm/fedavgm/__init__.py @@ -0,0 +1 @@ +"""Template baseline package.""" diff --git a/baselines/fedavgm/fedavgm/client.py b/baselines/fedavgm/fedavgm/client.py new file mode 100644 index 000000000000..6500bdc9c737 --- /dev/null +++ b/baselines/fedavgm/fedavgm/client.py @@ -0,0 +1,70 @@ +"""Define the Flower Client and function to instantiate it.""" + +import math + +import flwr as fl +from hydra.utils import instantiate +from keras.utils import to_categorical + + +class FlowerClient(fl.client.NumPyClient): + """Standard Flower client.""" + + # pylint: disable=too-many-arguments + def __init__(self, x_train, y_train, x_val, y_val, model, num_classes) -> None: + # local model + self.model = instantiate(model) + + # local dataset + self.x_train, self.y_train = x_train, to_categorical( + y_train, num_classes=num_classes + ) + self.x_val, self.y_val = x_val, to_categorical(y_val, num_classes=num_classes) + + def get_parameters(self, config): + """Return the parameters of the current local model.""" + return self.model.get_weights() + + def fit(self, parameters, config): + """Implement distributed fit function for a given client.""" + self.model.set_weights(parameters) + + self.model.fit( + self.x_train, + self.y_train, + epochs=config["local_epochs"], + batch_size=config["batch_size"], + verbose=False, + ) + return self.model.get_weights(), len(self.x_train), {} + + def evaluate(self, parameters, config): + """Implement distributed evaluation for a given client.""" + self.model.set_weights(parameters) + loss, acc = self.model.evaluate(self.x_val, self.y_val, verbose=False) + return loss, len(self.x_val), {"accuracy": acc} + + +def generate_client_fn(partitions, model, num_classes): + """Generate the client function that creates the Flower Clients.""" + + def client_fn(cid: str) -> FlowerClient: + """Create a Flower client representing a single organization.""" + full_x_train_cid, full_y_train_cid = partitions[int(cid)] + + # Use 10% of the client's training data for validation + split_idx = math.floor(len(full_x_train_cid) * 0.9) + x_train_cid, y_train_cid = ( + full_x_train_cid[:split_idx], + full_y_train_cid[:split_idx], + ) + x_val_cid, y_val_cid = ( + full_x_train_cid[split_idx:], + full_y_train_cid[split_idx:], + ) + + return FlowerClient( + x_train_cid, y_train_cid, x_val_cid, y_val_cid, model, num_classes + ) + + return client_fn diff --git a/baselines/fedavgm/fedavgm/common.py b/baselines/fedavgm/fedavgm/common.py new file mode 100644 index 000000000000..0ce9d04dc544 --- /dev/null +++ b/baselines/fedavgm/fedavgm/common.py @@ -0,0 +1,494 @@ +# Copyright 2020 Adap GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Commonly used functions for generating partitioned datasets.""" + +# pylint: disable=invalid-name + + +from typing import List, Optional, Tuple, Union + +import numpy as np +from numpy.random import BitGenerator, Generator, SeedSequence + +XY = Tuple[np.ndarray, np.ndarray] +XYList = List[XY] +PartitionedDataset = Tuple[XYList, XYList] + + +def float_to_int(i: float) -> int: + """Return float as int but raise if decimal is dropped.""" + if not i.is_integer(): + raise Exception("Cast would drop decimals") + + return int(i) + + +def sort_by_label(x: np.ndarray, y: np.ndarray) -> XY: + """Sort by label. + + Assuming two labels and four examples the resulting label order would be 1,1,2,2 + """ + idx = np.argsort(y, axis=0).reshape((y.shape[0])) + return (x[idx], y[idx]) + + +def sort_by_label_repeating(x: np.ndarray, y: np.ndarray) -> XY: + """Sort by label in repeating groups. + + Assuming two labels and four examples the resulting label order would be 1,2,1,2. + + Create sorting index which is applied to by label sorted x, y + + .. code-block:: python + + # given: + y = [ + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9 + ] + + # use: + idx = [ + 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19 + ] + + # so that y[idx] becomes: + y = [ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 + ] + """ + x, y = sort_by_label(x, y) + + num_example = x.shape[0] + num_class = np.unique(y).shape[0] + idx = ( + np.array(range(num_example), np.int64) + .reshape((num_class, num_example // num_class)) + .transpose() + .reshape(num_example) + ) + + return (x[idx], y[idx]) + + +def split_at_fraction(x: np.ndarray, y: np.ndarray, fraction: float) -> Tuple[XY, XY]: + """Split x, y at a certain fraction.""" + splitting_index = float_to_int(x.shape[0] * fraction) + # Take everything BEFORE splitting_index + x_0, y_0 = x[:splitting_index], y[:splitting_index] + # Take everything AFTER splitting_index + x_1, y_1 = x[splitting_index:], y[splitting_index:] + return (x_0, y_0), (x_1, y_1) + + +def shuffle(x: np.ndarray, y: np.ndarray) -> XY: + """Shuffle x and y.""" + idx = np.random.permutation(len(x)) + return x[idx], y[idx] + + +def partition(x: np.ndarray, y: np.ndarray, num_partitions: int) -> List[XY]: + """Return x, y as list of partitions.""" + return list(zip(np.split(x, num_partitions), np.split(y, num_partitions))) + + +def combine_partitions(xy_list_0: XYList, xy_list_1: XYList) -> XYList: + """Combine two lists of ndarray Tuples into one list.""" + return [ + (np.concatenate([x_0, x_1], axis=0), np.concatenate([y_0, y_1], axis=0)) + for (x_0, y_0), (x_1, y_1) in zip(xy_list_0, xy_list_1) + ] + + +def shift(x: np.ndarray, y: np.ndarray) -> XY: + """Shift x_1, y_1. + + so that the first half contains only labels 0 to 4 and the second half 5 to 9. + """ + x, y = sort_by_label(x, y) + + (x_0, y_0), (x_1, y_1) = split_at_fraction(x, y, fraction=0.5) + (x_0, y_0), (x_1, y_1) = shuffle(x_0, y_0), shuffle(x_1, y_1) + x, y = np.concatenate([x_0, x_1], axis=0), np.concatenate([y_0, y_1], axis=0) + return x, y + + +def create_partitions( + unpartitioned_dataset: XY, + iid_fraction: float, + num_partitions: int, +) -> XYList: + """Create partitioned version of a training or test set. + + Currently tested and supported are MNIST, FashionMNIST and CIFAR-10/100 + """ + x, y = unpartitioned_dataset + + x, y = shuffle(x, y) + x, y = sort_by_label_repeating(x, y) + + (x_0, y_0), (x_1, y_1) = split_at_fraction(x, y, fraction=iid_fraction) + + # Shift in second split of dataset the classes into two groups + x_1, y_1 = shift(x_1, y_1) + + xy_0_partitions = partition(x_0, y_0, num_partitions) + xy_1_partitions = partition(x_1, y_1, num_partitions) + + xy_partitions = combine_partitions(xy_0_partitions, xy_1_partitions) + + # Adjust x and y shape + return [adjust_xy_shape(xy) for xy in xy_partitions] + + +def create_partitioned_dataset( + keras_dataset: Tuple[XY, XY], + iid_fraction: float, + num_partitions: int, +) -> Tuple[PartitionedDataset, XY]: + """Create partitioned version of keras dataset. + + Currently tested and supported are MNIST, FashionMNIST and CIFAR-10/100 + """ + xy_train, xy_test = keras_dataset + + xy_train_partitions = create_partitions( + unpartitioned_dataset=xy_train, + iid_fraction=iid_fraction, + num_partitions=num_partitions, + ) + + xy_test_partitions = create_partitions( + unpartitioned_dataset=xy_test, + iid_fraction=iid_fraction, + num_partitions=num_partitions, + ) + + return (xy_train_partitions, xy_test_partitions), adjust_xy_shape(xy_test) + + +def log_distribution(xy_partitions: XYList) -> None: + """Print label distribution for list of paritions.""" + distro = [np.unique(y, return_counts=True) for _, y in xy_partitions] + for d in distro: + print(d) + + +def adjust_xy_shape(xy: XY) -> XY: + """Adjust shape of both x and y.""" + x, y = xy + if x.ndim == 3: + x = adjust_x_shape(x) + if y.ndim == 2: + y = adjust_y_shape(y) + return (x, y) + + +def adjust_x_shape(nda: np.ndarray) -> np.ndarray: + """Turn shape (x, y, z) into (x, y, z, 1).""" + nda_adjusted = np.reshape(nda, (nda.shape[0], nda.shape[1], nda.shape[2], 1)) + return nda_adjusted + + +def adjust_y_shape(nda: np.ndarray) -> np.ndarray: + """Turn shape (x, 1) into (x).""" + nda_adjusted = np.reshape(nda, (nda.shape[0])) + return nda_adjusted + + +def split_array_at_indices( + x: np.ndarray, split_idx: np.ndarray +) -> List[List[np.ndarray]]: + """Split the array `x`. + + into list of elements using starting indices from + `split_idx`. + + This function should be used with `unique_indices` from `np.unique()` after + sorting by label. + + Args: + x (np.ndarray): Original array of dimension (N,a,b,c,...) + split_idx (np.ndarray): 1-D array contaning increasing number of + indices to be used as partitions. Initial value must be zero. Last value + must be less than N. + + Returns + ------- + List[List[np.ndarray]]: List of list of samples. + """ + if split_idx.ndim != 1: + raise ValueError("Variable `split_idx` must be a 1-D numpy array.") + if split_idx.dtype != np.int64: + raise ValueError("Variable `split_idx` must be of type np.int64.") + if split_idx[0] != 0: + raise ValueError("First value of `split_idx` must be 0.") + if split_idx[-1] >= x.shape[0]: + raise ValueError( + """Last value in `split_idx` must be less than + the number of samples in `x`.""" + ) + if not np.all(split_idx[:-1] <= split_idx[1:]): + raise ValueError("Items in `split_idx` must be in increasing order.") + + num_splits: int = len(split_idx) + split_idx = np.append(split_idx, x.shape[0]) + + list_samples_split: List[List[np.ndarray]] = [[] for _ in range(num_splits)] + for j in range(num_splits): + tmp_x = x[split_idx[j] : split_idx[j + 1]] # noqa: E203 + for sample in tmp_x: + list_samples_split[j].append(sample) + + return list_samples_split + + +def exclude_classes_and_normalize( + distribution: np.ndarray, exclude_dims: List[bool], eps: float = 1e-5 +) -> np.ndarray: + """Excludes classes from a distribution. + + This function is particularly useful when sampling without replacement. + Classes for which no sample is available have their probabilities are set to 0. + Classes that had probabilities originally set to 0 are incremented with + `eps` to allow sampling from remaining items. + + Args: + distribution (np.array): Distribution being used. + exclude_dims (List[bool]): Dimensions to be excluded. + eps (float, optional): Small value to be addad to non-excluded dimensions. + Defaults to 1e-5. + + Returns + ------- + np.ndarray: Normalized distributions. + """ + if np.any(distribution < 0) or (not np.isclose(np.sum(distribution), 1.0)): + raise ValueError("distribution must sum to 1 and have only positive values.") + + if distribution.size != len(exclude_dims): + raise ValueError( + """Length of distribution must be equal + to the length `exclude_dims`.""" + ) + if eps < 0: + raise ValueError("""The value of `eps` must be positive and small.""") + + distribution[[not x for x in exclude_dims]] += eps + distribution[exclude_dims] = 0.0 + sum_rows = np.sum(distribution) + np.finfo(float).eps + distribution = distribution / sum_rows + + return distribution + + +def sample_without_replacement( + distribution: np.ndarray, + list_samples: List[List[np.ndarray]], + num_samples: int, + empty_classes: List[bool], +) -> Tuple[XY, List[bool]]: + """Sample from a list without replacement. + + using a given distribution. + + Args: + distribution (np.ndarray): Distribution used for sampling. + list_samples(List[List[np.ndarray]]): List of samples. + num_samples (int): Total number of items to be sampled. + empty_classes (List[bool]): List of booleans indicating which classes are empty. + This is useful to differentiate which classes should still be sampled. + + Returns + ------- + XY: Dataset contaning samples + List[bool]: empty_classes. + """ + if np.sum([len(x) for x in list_samples]) < num_samples: + raise ValueError( + """Number of samples in `list_samples` is less than `num_samples`""" + ) + + # Make sure empty classes are not sampled + # and solves for rare cases where + if not empty_classes: + empty_classes = len(distribution) * [False] + + distribution = exclude_classes_and_normalize( + distribution=distribution, exclude_dims=empty_classes + ) + + data: List[np.ndarray] = [] + target: List[np.ndarray] = [] + + for _ in range(num_samples): + sample_class = np.where(np.random.multinomial(1, distribution) == 1)[0][0] + sample: np.ndarray = list_samples[sample_class].pop() + + data.append(sample) + target.append(sample_class) + + # If last sample of the class was drawn, then set the + # probability density function (PDF) to zero for that class. + if len(list_samples[sample_class]) == 0: + empty_classes[sample_class] = True + # Be careful to distinguish between classes that had zero probability + # and classes that are now empty + distribution = exclude_classes_and_normalize( + distribution=distribution, exclude_dims=empty_classes + ) + data_array: np.ndarray = np.concatenate([data], axis=0) + target_array: np.ndarray = np.array(target, dtype=np.int64) + + return (data_array, target_array), empty_classes + + +def get_partitions_distributions(partitions: XYList) -> Tuple[np.ndarray, List[int]]: + """Evaluate the distribution over classes for a set of partitions. + + Args: + partitions (XYList): Input partitions + + Returns + ------- + np.ndarray: Distributions of size (num_partitions, num_classes) + """ + # Get largest available label + labels = set() + for _, y in partitions: + labels.update(set(y)) + list_labels = sorted(labels) + bin_edges = np.arange(len(list_labels) + 1) + + # Pre-allocate distributions + distributions = np.zeros((len(partitions), len(list_labels)), dtype=np.float32) + for idx, (_, _y) in enumerate(partitions): + hist, _ = np.histogram(_y, bin_edges) + distributions[idx] = hist / hist.sum() + + return distributions, list_labels + + +def create_lda_partitions( + dataset: XY, + dirichlet_dist: Optional[np.ndarray] = None, + num_partitions: int = 100, + concentration: Union[float, np.ndarray, List[float]] = 0.5, + accept_imbalanced: bool = False, + seed: Optional[Union[int, SeedSequence, BitGenerator, Generator]] = None, +) -> Tuple[XYList, np.ndarray]: + r"""Create imbalanced non-iid partitions using Latent Dirichlet Allocation (LDA). + + without resampling. + + Args: + dataset (XY): Dataset containing samples X and labels Y. + dirichlet_dist (numpy.ndarray, optional): previously generated distribution to + be used. This is useful when applying the same distribution for train and + validation sets. + num_partitions (int, optional): Number of partitions to be created. + Defaults to 100. + concentration (float, np.ndarray, List[float]): Dirichlet Concentration + (:math:`\\alpha`) parameter. Set to float('inf') to get uniform partitions. + An :math:`\\alpha \\to \\Inf` generates uniform distributions over classes. + An :math:`\\alpha \\to 0.0` generates one class per client. Defaults to 0.5. + accept_imbalanced (bool): Whether or not to accept imbalanced output classes. + Default False. + seed (None, int, SeedSequence, BitGenerator, Generator): + A seed to initialize the BitGenerator for generating the Dirichlet + distribution. This is defined in Numpy's official documentation as follows: + If None, then fresh, unpredictable entropy will be pulled from the OS. + One may also pass in a SeedSequence instance. + Additionally, when passed a BitGenerator, it will be wrapped by Generator. + If passed a Generator, it will be returned unaltered. + See official Numpy Documentation for further details. + + Returns + ------- + Tuple[XYList, numpy.ndarray]: List of XYList containing partitions + for each dataset and the dirichlet probability density functions. + """ + # pylint: disable=too-many-arguments,too-many-locals + + x, y = dataset + x, y = shuffle(x, y) + x, y = sort_by_label(x, y) + + if (x.shape[0] % num_partitions) and (not accept_imbalanced): + raise ValueError( + """Total number of samples must be a multiple of `num_partitions`. + If imbalanced classes are allowed, set + `accept_imbalanced=True`.""" + ) + + num_samples = num_partitions * [0] + for j in range(x.shape[0]): + num_samples[j % num_partitions] += 1 + + # Get number of classes and verify if they matching with + classes, start_indices = np.unique(y, return_index=True) + + # Make sure that concentration is np.array and + # check if concentration is appropriate + concentration = np.asarray(concentration) + + # Check if concentration is Inf, if so create uniform partitions + partitions: List[XY] = [(_, _) for _ in range(num_partitions)] + if float("inf") in concentration: + partitions = create_partitions( + unpartitioned_dataset=(x, y), + iid_fraction=1.0, + num_partitions=num_partitions, + ) + dirichlet_dist = get_partitions_distributions(partitions)[0] + + return partitions, dirichlet_dist + + if concentration.size == 1: + concentration = np.repeat(concentration, classes.size) + elif concentration.size != classes.size: # Sequence + raise ValueError( + f"The size of the provided concentration ({concentration.size}) ", + f"must be either 1 or equal number of classes {classes.size})", + ) + + # Split into list of list of samples per class + list_samples_per_class: List[List[np.ndarray]] = split_array_at_indices( + x, start_indices + ) + + if dirichlet_dist is None: + dirichlet_dist = np.random.default_rng(seed).dirichlet( + alpha=concentration, size=num_partitions + ) + + if dirichlet_dist.size != 0: + if dirichlet_dist.shape != (num_partitions, classes.size): + raise ValueError( + f"""The shape of the provided dirichlet distribution + ({dirichlet_dist.shape}) must match the provided number + of partitions and classes ({num_partitions},{classes.size})""" + ) + + # Assuming balanced distribution + empty_classes = classes.size * [False] + for partition_id in range(num_partitions): + partitions[partition_id], empty_classes = sample_without_replacement( + distribution=dirichlet_dist[partition_id].copy(), + list_samples=list_samples_per_class, + num_samples=num_samples[partition_id], + empty_classes=empty_classes, + ) + + return partitions, dirichlet_dist diff --git a/baselines/fedavgm/fedavgm/conf/base.yaml b/baselines/fedavgm/fedavgm/conf/base.yaml new file mode 100644 index 000000000000..3c2c281911a3 --- /dev/null +++ b/baselines/fedavgm/fedavgm/conf/base.yaml @@ -0,0 +1,24 @@ +--- +num_clients: 10 +num_rounds: 5 # original experiments (paper) uses 10000 +fraction_evaluate: 0 # fraction of clients usied during validation +num_cpus: 1 +num_gpus: 0 + +noniid: + concentration: 0.1 # concentrations used in the paper [100., 10., 1., 0.5, 0.2, 0.1, 0.05, 0.0] + +server: + momentum: 0.9 + learning_rate: 1.0 + reporting_fraction: 0.05 # values used in the paper 0.05, 0.1, 0.2 (not used for Figure 5), 0.4 + +client: + local_epochs: 1 # in the paper it is used 1 or 5 + batch_size: 64 # in the paper fixed at 64 + lr: 0.01 # client learning rate + +defaults: + - strategy: custom-fedavgm + - model: cnn + - dataset: cifar10 diff --git a/baselines/fedavgm/fedavgm/conf/dataset/cifar10.yaml b/baselines/fedavgm/fedavgm/conf/dataset/cifar10.yaml new file mode 100644 index 000000000000..4894ba5d675f --- /dev/null +++ b/baselines/fedavgm/fedavgm/conf/dataset/cifar10.yaml @@ -0,0 +1,4 @@ +--- +_target_: fedavgm.dataset.cifar10 +num_classes: 10 +input_shape: [32, 32, 3] \ No newline at end of file diff --git a/baselines/fedavgm/fedavgm/conf/dataset/fmnist.yaml b/baselines/fedavgm/fedavgm/conf/dataset/fmnist.yaml new file mode 100644 index 000000000000..2dfa07f1c60a --- /dev/null +++ b/baselines/fedavgm/fedavgm/conf/dataset/fmnist.yaml @@ -0,0 +1,4 @@ +--- +_target_: fedavgm.dataset.fmnist +num_classes: 10 +input_shape: [28, 28, 1] \ No newline at end of file diff --git a/baselines/fedavgm/fedavgm/conf/model/cnn.yaml b/baselines/fedavgm/fedavgm/conf/model/cnn.yaml new file mode 100644 index 000000000000..c25463693c7f --- /dev/null +++ b/baselines/fedavgm/fedavgm/conf/model/cnn.yaml @@ -0,0 +1,5 @@ +--- +_target_: fedavgm.models.cnn +input_shape: ${dataset.input_shape} +num_classes: ${dataset.num_classes} +learning_rate: ${client.lr} \ No newline at end of file diff --git a/baselines/fedavgm/fedavgm/conf/model/tf_example.yaml b/baselines/fedavgm/fedavgm/conf/model/tf_example.yaml new file mode 100644 index 000000000000..8c2a670ee978 --- /dev/null +++ b/baselines/fedavgm/fedavgm/conf/model/tf_example.yaml @@ -0,0 +1,5 @@ +--- +_target_: fedavgm.models.tf_example +input_shape: ${dataset.input_shape} +num_classes: ${dataset.num_classes} +learning_rate: ${client.lr} \ No newline at end of file diff --git a/baselines/fedavgm/fedavgm/conf/strategy/custom-fedavgm.yaml b/baselines/fedavgm/fedavgm/conf/strategy/custom-fedavgm.yaml new file mode 100644 index 000000000000..526c9714ed73 --- /dev/null +++ b/baselines/fedavgm/fedavgm/conf/strategy/custom-fedavgm.yaml @@ -0,0 +1,13 @@ +--- +_target_: fedavgm.strategy.CustomFedAvgM +min_available_clients: ${num_clients} +fraction_fit: ${server.reporting_fraction} +fraction_evaluate: ${fraction_evaluate} +server_learning_rate: ${server.learning_rate} +server_momentum: ${server.momentum} +on_fit_config_fn: + _target_: fedavgm.server.get_on_fit_config + config: ${client} +initial_parameters: + _target_: fedavgm.models.model_to_parameters + model: ${model} \ No newline at end of file diff --git a/baselines/fedavgm/fedavgm/conf/strategy/fedavg.yaml b/baselines/fedavgm/fedavgm/conf/strategy/fedavg.yaml new file mode 100644 index 000000000000..1b2cde85fe6c --- /dev/null +++ b/baselines/fedavgm/fedavgm/conf/strategy/fedavg.yaml @@ -0,0 +1,8 @@ +--- +_target_: flwr.server.strategy.FedAvg +min_available_clients: ${num_clients} +fraction_fit: ${server.reporting_fraction} +fraction_evaluate: ${fraction_evaluate} +on_fit_config_fn: + _target_: fedavgm.server.get_on_fit_config + config: ${client} \ No newline at end of file diff --git a/baselines/fedavgm/fedavgm/conf/strategy/fedavgm.yaml b/baselines/fedavgm/fedavgm/conf/strategy/fedavgm.yaml new file mode 100644 index 000000000000..ce88887c02ab --- /dev/null +++ b/baselines/fedavgm/fedavgm/conf/strategy/fedavgm.yaml @@ -0,0 +1,13 @@ +--- +_target_: flwr.server.strategy.FedAvgM +min_available_clients: ${num_clients} +fraction_fit: ${server.reporting_fraction} +fraction_evaluate: ${fraction_evaluate} +server_learning_rate: ${server.learning_rate} +server_momentum: ${server.momentum} +on_fit_config_fn: + _target_: fedavgm.server.get_on_fit_config + config: ${client} +initial_parameters: + _target_: fedavgm.models.model_to_parameters + model: ${model} \ No newline at end of file diff --git a/baselines/fedavgm/fedavgm/dataset.py b/baselines/fedavgm/fedavgm/dataset.py new file mode 100644 index 000000000000..939a42fda5ae --- /dev/null +++ b/baselines/fedavgm/fedavgm/dataset.py @@ -0,0 +1,57 @@ +"""Dataset utilities for federated learning.""" + +import numpy as np +from tensorflow import keras + +from fedavgm.common import create_lda_partitions + + +def cifar10(num_classes, input_shape): + """Prepare the CIFAR-10. + + This method considers CIFAR-10 for creating both train and test sets. The sets are + already normalized. + """ + print(f">>> [Dataset] Loading CIFAR-10. {num_classes} | {input_shape}.") + (x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data() + x_train = x_train.astype("float32") / 255 + x_test = x_test.astype("float32") / 255 + input_shape = x_train.shape[1:] + num_classes = len(np.unique(y_train)) + + return x_train, y_train, x_test, y_test, input_shape, num_classes + + +def fmnist(num_classes, input_shape): + """Prepare the FMNIST. + + This method considers FMNIST for creating both train and test sets. The sets are + already normalized. + """ + print(f">>> [Dataset] Loading FMNIST. {num_classes} | {input_shape}.") + (x_train, y_train), (x_test, y_test) = keras.datasets.fashion_mnist.load_data() + x_train = x_train.astype("float32") / 255 + x_test = x_test.astype("float32") / 255 + input_shape = x_train.shape[1:] + num_classes = len(np.unique(y_train)) + + return x_train, y_train, x_test, y_test, input_shape, num_classes + + +def partition(x_train, y_train, num_clients, concentration): + """Create non-iid partitions. + + The partitions uses a LDA distribution based on concentration. + """ + print( + f">>> [Dataset] {num_clients} clients, non-iid concentration {concentration}..." + ) + dataset = [x_train, y_train] + partitions, _ = create_lda_partitions( + dataset, + num_partitions=num_clients, + # concentration=concentration * num_classes, + concentration=concentration, + seed=1234, + ) + return partitions diff --git a/baselines/fedavgm/fedavgm/dataset_preparation.py b/baselines/fedavgm/fedavgm/dataset_preparation.py new file mode 100644 index 000000000000..dab1967d8399 --- /dev/null +++ b/baselines/fedavgm/fedavgm/dataset_preparation.py @@ -0,0 +1 @@ +"""Require to download dataset or additional preparation.""" diff --git a/baselines/fedavgm/fedavgm/main.py b/baselines/fedavgm/fedavgm/main.py new file mode 100644 index 000000000000..915cad28f212 --- /dev/null +++ b/baselines/fedavgm/fedavgm/main.py @@ -0,0 +1,100 @@ +"""Create and connect the building blocks for your experiments; start the simulation. + +It includes processioning the dataset, instantiate strategy, specify how the global +model is going to be evaluated, etc. At the end, this script saves the results. +""" + +import pickle +from pathlib import Path + +import flwr as fl +import hydra +import numpy as np +from hydra.core.hydra_config import HydraConfig +from hydra.utils import instantiate +from omegaconf import DictConfig, OmegaConf + +from fedavgm.client import generate_client_fn +from fedavgm.dataset import partition +from fedavgm.server import get_evaluate_fn + + +# pylint: disable=too-many-locals +@hydra.main(config_path="conf", config_name="base", version_base=None) +def main(cfg: DictConfig) -> None: + """Run the baseline. + + Parameters + ---------- + cfg : DictConfig + An omegaconf object that stores the hydra config. + """ + np.random.seed(2020) + + # 1. Print parsed config + print(OmegaConf.to_yaml(cfg)) + + # 2. Prepare your dataset + x_train, y_train, x_test, y_test, input_shape, num_classes = instantiate( + cfg.dataset + ) + + partitions = partition(x_train, y_train, cfg.num_clients, cfg.noniid.concentration) + + print(f">>> [Model]: Num. Classes {num_classes} | Input shape: {input_shape}") + + # 3. Define your clients + client_fn = generate_client_fn(partitions, cfg.model, num_classes) + + # 4. Define your strategy + evaluate_fn = get_evaluate_fn( + instantiate(cfg.model), x_test, y_test, cfg.num_rounds, num_classes + ) + + strategy = instantiate(cfg.strategy, evaluate_fn=evaluate_fn) + + # 5. Start Simulation + history = fl.simulation.start_simulation( + client_fn=client_fn, + num_clients=cfg.num_clients, + config=fl.server.ServerConfig(num_rounds=cfg.num_rounds), + strategy=strategy, + client_resources={"num_cpus": cfg.num_cpus, "num_gpus": cfg.num_gpus}, + ) + + _, final_acc = history.metrics_centralized["accuracy"][-1] + + # 6. Save your results + save_path = HydraConfig.get().runtime.output_dir + + strategy_name = strategy.__class__.__name__ + dataset_type = "cifar10" if cfg.dataset.input_shape == [32, 32, 3] else "fmnist" + + def format_variable(x): + return f"{x!r}" if isinstance(x, bytes) else x + + file_suffix: str = ( + f"_{format_variable(strategy_name)}" + f"_{format_variable(dataset_type)}" + f"_clients={format_variable(cfg.num_clients)}" + f"_rounds={format_variable(cfg.num_rounds)}" + f"_C={format_variable(cfg.server.reporting_fraction)}" + f"_E={format_variable(cfg.client.local_epochs)}" + f"_alpha={format_variable(cfg.noniid.concentration)}" + f"_server-momentum={format_variable(cfg.server.momentum)}" + f"_client-lr={format_variable(cfg.client.lr)}" + f"_acc={format_variable(final_acc):.4f}" + ) + + filename = "results" + file_suffix + ".pkl" + + print(f">>> Saving {filename}...") + results_path = Path(save_path) / filename + results = {"history": history} + + with open(str(results_path), "wb") as hist_file: + pickle.dump(results, hist_file, protocol=pickle.HIGHEST_PROTOCOL) + + +if __name__ == "__main__": + main() diff --git a/baselines/fedavgm/fedavgm/models.py b/baselines/fedavgm/fedavgm/models.py new file mode 100644 index 000000000000..a151c4d9db76 --- /dev/null +++ b/baselines/fedavgm/fedavgm/models.py @@ -0,0 +1,121 @@ +"""CNN model architecture.""" + +from flwr.common import ndarrays_to_parameters +from keras.optimizers import SGD +from keras.regularizers import l2 +from tensorflow import keras +from tensorflow.nn import local_response_normalization # pylint: disable=import-error + + +def cnn(input_shape, num_classes, learning_rate): + """CNN Model from (McMahan et. al., 2017). + + Communication-efficient learning of deep networks from decentralized data + """ + input_shape = tuple(input_shape) + + weight_decay = 0.004 + model = keras.Sequential( + [ + keras.layers.Conv2D( + 64, + (5, 5), + padding="same", + activation="relu", + input_shape=input_shape, + ), + keras.layers.MaxPooling2D((3, 3), strides=(2, 2)), + keras.layers.BatchNormalization(), + keras.layers.Conv2D( + 64, + (5, 5), + padding="same", + activation="relu", + ), + keras.layers.BatchNormalization(), + keras.layers.MaxPooling2D((3, 3), strides=(2, 2)), + keras.layers.Flatten(), + keras.layers.Dense( + 384, activation="relu", kernel_regularizer=l2(weight_decay) + ), + keras.layers.Dense( + 192, activation="relu", kernel_regularizer=l2(weight_decay) + ), + keras.layers.Dense(num_classes, activation="softmax"), + ] + ) + optimizer = SGD(learning_rate=learning_rate) + model.compile( + loss="categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"] + ) + + return model + + +def tf_example(input_shape, num_classes, learning_rate): + """CNN Model from TensorFlow v1.x example. + + This is the model referenced on the FedAvg paper. + + Reference: + https://web.archive.org/web/20170807002954/https://github.com/tensorflow/models/blob/master/tutorials/image/cifar10/cifar10.py + """ + input_shape = tuple(input_shape) + + weight_decay = 0.004 + model = keras.Sequential( + [ + keras.layers.Conv2D( + 64, + (5, 5), + padding="same", + activation="relu", + input_shape=input_shape, + ), + keras.layers.MaxPooling2D((3, 3), strides=(2, 2), padding="same"), + keras.layers.Lambda( + local_response_normalization, + arguments={ + "depth_radius": 4, + "bias": 1.0, + "alpha": 0.001 / 9.0, + "beta": 0.75, + }, + ), + keras.layers.Conv2D( + 64, + (5, 5), + padding="same", + activation="relu", + ), + keras.layers.Lambda( + local_response_normalization, + arguments={ + "depth_radius": 4, + "bias": 1.0, + "alpha": 0.001 / 9.0, + "beta": 0.75, + }, + ), + keras.layers.MaxPooling2D((3, 3), strides=(2, 2), padding="same"), + keras.layers.Flatten(), + keras.layers.Dense( + 384, activation="relu", kernel_regularizer=l2(weight_decay) + ), + keras.layers.Dense( + 192, activation="relu", kernel_regularizer=l2(weight_decay) + ), + keras.layers.Dense(num_classes, activation="softmax"), + ] + ) + optimizer = SGD(learning_rate=learning_rate) + model.compile( + loss="categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"] + ) + + return model + + +def model_to_parameters(model): + """Retrieve model weigths and convert to ndarrays.""" + return ndarrays_to_parameters(model.get_weights()) diff --git a/baselines/fedavgm/fedavgm/server.py b/baselines/fedavgm/fedavgm/server.py new file mode 100644 index 000000000000..c997c035f638 --- /dev/null +++ b/baselines/fedavgm/fedavgm/server.py @@ -0,0 +1,45 @@ +"""Define the Flower Server and function to instantiate it.""" + +from keras.utils import to_categorical +from omegaconf import DictConfig + + +def get_on_fit_config(config: DictConfig): + """Generate the function for config. + + The config dict is sent to the client fit() method. + """ + + def fit_config_fn(server_round: int): # pylint: disable=unused-argument + # option to use scheduling of learning rate based on round + # if server_round > 50: + # lr = config.lr / 10 + return { + "local_epochs": config.local_epochs, + "batch_size": config.batch_size, + } + + return fit_config_fn + + +def get_evaluate_fn(model, x_test, y_test, num_rounds, num_classes): + """Generate the function for server global model evaluation. + + The method evaluate_fn runs after global model aggregation. + """ + + def evaluate_fn( + server_round: int, parameters, config + ): # pylint: disable=unused-argument + if server_round == num_rounds: # evaluates global model just on the last round + # instantiate the model + model.set_weights(parameters) + + y_test_cat = to_categorical(y_test, num_classes=num_classes) + loss, accuracy = model.evaluate(x_test, y_test_cat, verbose=False) + + return loss, {"accuracy": accuracy} + + return None + + return evaluate_fn diff --git a/baselines/fedavgm/fedavgm/strategy.py b/baselines/fedavgm/fedavgm/strategy.py new file mode 100644 index 000000000000..cd0a27254fce --- /dev/null +++ b/baselines/fedavgm/fedavgm/strategy.py @@ -0,0 +1,201 @@ +"""Optionally define a custom strategy. + +Needed only when the strategy is not yet implemented in Flower or because you want to +extend or modify the functionality of an existing strategy. +""" + +from logging import WARNING +from typing import Callable, Dict, List, Optional, Tuple, Union + +from flwr.common import ( + FitRes, + MetricsAggregationFn, + NDArrays, + Parameters, + Scalar, + ndarrays_to_parameters, + parameters_to_ndarrays, +) +from flwr.common.logger import log +from flwr.server.client_manager import ClientManager +from flwr.server.client_proxy import ClientProxy +from flwr.server.strategy import FedAvg +from flwr.server.strategy.aggregate import aggregate + + +class CustomFedAvgM(FedAvg): + """Re-implmentation of FedAvgM. + + This implementation of FedAvgM diverges from original (Flwr v1.5.0) implementation. + Here, the re-implementation introduces the Nesterov Accelerated Gradient (NAG), + same as reported in the original FedAvgM paper: + + https://arxiv.org/pdf/1909.06335.pdf + """ + + def __init__( + self, + *, + fraction_fit: float = 1.0, + fraction_evaluate: float = 1.0, + min_fit_clients: int = 2, + min_evaluate_clients: int = 2, + min_available_clients: int = 2, + evaluate_fn: Optional[ + Callable[ + [int, NDArrays, Dict[str, Scalar]], + Optional[Tuple[float, Dict[str, Scalar]]], + ] + ] = None, + on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + accept_failures: bool = True, + initial_parameters: Parameters, + fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, + evaluate_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, + server_learning_rate: float = 1.0, + server_momentum: float = 0.9, + ) -> None: + """Federated Averaging with Momentum strategy. + + Implementation based on https://arxiv.org/pdf/1909.06335.pdf + + Parameters + ---------- + fraction_fit : float, optional + Fraction of clients used during training. Defaults to 0.1. + fraction_evaluate : float, optional + Fraction of clients used during validation. Defaults to 0.1. + min_fit_clients : int, optional + Minimum number of clients used during training. Defaults to 2. + min_evaluate_clients : int, optional + Minimum number of clients used during validation. Defaults to 2. + min_available_clients : int, optional + Minimum number of total clients in the system. Defaults to 2. + evaluate_fn : Optional[Callable[[int, NDArrays, Dict[str, Scalar]], + Optional[Tuple[float, Dict[str, Scalar]]]]] + Optional function used for validation. Defaults to None. + on_fit_config_fn : Callable[[int], Dict[str, Scalar]], optional + Function used to configure training. Defaults to None. + on_evaluate_config_fn : Callable[[int], Dict[str, Scalar]], optional + Function used to configure validation. Defaults to None. + accept_failures : bool, optional + Whether or not accept rounds containing failures. Defaults to True. + initial_parameters : Parameters + Initial global model parameters. + server_learning_rate: float + Server-side learning rate used in server-side optimization. + Defaults to 1.0. + server_momentum: float + Server-side momentum factor used for FedAvgM. Defaults to 0.9. + """ + super().__init__( + fraction_fit=fraction_fit, + fraction_evaluate=fraction_evaluate, + min_fit_clients=min_fit_clients, + min_evaluate_clients=min_evaluate_clients, + min_available_clients=min_available_clients, + evaluate_fn=evaluate_fn, + on_fit_config_fn=on_fit_config_fn, + on_evaluate_config_fn=on_evaluate_config_fn, + accept_failures=accept_failures, + initial_parameters=initial_parameters, + fit_metrics_aggregation_fn=fit_metrics_aggregation_fn, + evaluate_metrics_aggregation_fn=evaluate_metrics_aggregation_fn, + ) + self.server_learning_rate = server_learning_rate + self.server_momentum = server_momentum + self.momentum_vector: Optional[NDArrays] = None + + def __repr__(self) -> str: + """Compute a string representation of the strategy.""" + rep = f"FedAvgM(accept_failures={self.accept_failures})" + return rep + + def initialize_parameters( + self, client_manager: ClientManager + ) -> Optional[Parameters]: + """Initialize global model parameters.""" + return self.initial_parameters + + def aggregate_fit( + self, + server_round: int, + results: List[Tuple[ClientProxy, FitRes]], + failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], + ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + """Aggregate fit results using weighted average.""" + if not results: + return None, {} + + # Do not aggregate if there are failures and failures are not accepted + if not self.accept_failures and failures: + return None, {} + + # Convert results + weights_results = [ + (parameters_to_ndarrays(fit_res.parameters), fit_res.num_examples) + for _, fit_res in results + ] + + fedavg_result = aggregate(weights_results) # parameters_aggregated from FedAvg + + # original implementation follows convention described in + # https://pytorch.org/docs/stable/generated/torch.optim.SGD.html + + # do the check for self.initial_parameters being set + assert ( + self.initial_parameters is not None + ), "Initial parameters must be set for CustomFedAvgM strategy" + + # remember that updates are the opposite of gradients + pseudo_gradient: NDArrays = [ + x - y + for x, y in zip( + parameters_to_ndarrays(self.initial_parameters), fedavg_result + ) + ] + + if server_round > 1: + assert self.momentum_vector, "Momentum should have been created on round 1." + + self.momentum_vector = [ + self.server_momentum * v + w + for w, v in zip(pseudo_gradient, self.momentum_vector) + ] + else: # Round 1 + # Initialize server-side model + assert ( + self.initial_parameters is not None + ), "When using server-side optimization, model needs to be initialized." + # Initialize momentum vector + self.momentum_vector = pseudo_gradient + + # Applying Nesterov + pseudo_gradient = [ + g + self.server_momentum * v + for g, v in zip(pseudo_gradient, self.momentum_vector) + ] + + # Federated Averaging with Server Momentum + fedavgm_result = [ + w - self.server_learning_rate * v + for w, v in zip( + parameters_to_ndarrays(self.initial_parameters), pseudo_gradient + ) + ] + + # Update current weights + self.initial_parameters = ndarrays_to_parameters(fedavgm_result) + + parameters_aggregated = ndarrays_to_parameters(fedavgm_result) + + # Aggregate custom metrics if aggregation fn was provided + metrics_aggregated = {} + if self.fit_metrics_aggregation_fn: + fit_metrics = [(res.num_examples, res.metrics) for _, res in results] + metrics_aggregated = self.fit_metrics_aggregation_fn(fit_metrics) + elif server_round == 1: # Only log this warning once + log(WARNING, "No fit_metrics_aggregation_fn provided") + + return parameters_aggregated, metrics_aggregated diff --git a/baselines/fedavgm/fedavgm/utils.py b/baselines/fedavgm/fedavgm/utils.py new file mode 100644 index 000000000000..42a3f372e6ad --- /dev/null +++ b/baselines/fedavgm/fedavgm/utils.py @@ -0,0 +1,61 @@ +"""Define any utility function. + +They are not directly relevant to the other (more FL specific) python modules. For +example, you may define here things like: loading a model from a checkpoint, saving +results, plotting. +""" + +import matplotlib.pyplot as plt +import numpy as np + +from fedavgm.dataset import cifar10, partition + +# pylint: disable=too-many-locals + + +def plot_concentrations_cifar10(): + """Create a plot with different concentrations for dataset using LDA.""" + x_train, y_train, x_test, y_test, _, num_classes = cifar10(10, (32, 32, 3)) + x = np.concatenate((x_train, x_test), axis=0) + y = np.concatenate((y_train, y_test), axis=0) + num_clients = 30 + + # Simulated different concentrations for partitioning + concentration_values = [np.inf, 100, 1, 0.1, 0.01, 1e-10] + color = plt.get_cmap("RdYlGn")(np.linspace(0.15, 0.85, num_classes)) + num_plots = len(concentration_values) + fig, axs = plt.subplots(1, num_plots, figsize=(15, 5), sharey=True) + + pos = axs[0].get_position() + pos.x0 += 0.1 + axs[0].set_position(pos) + + for i, concentration in enumerate(concentration_values): + partitions = partition(x, y, num_clients, concentration) + + for client in range(num_clients): + _, y_client = partitions[client] + lefts = [0] + axis = axs[i] + class_counts = np.bincount(y_client, minlength=num_classes) + np.sum(class_counts > 0) + + class_distribution = class_counts.astype(np.float16) / len(y_client) + + for idx, val in enumerate(class_distribution[:-1]): + lefts.append(lefts[idx] + val) + + axis.barh(client, class_distribution, left=lefts, color=color) + axis.set_xticks([]) + axis.set_yticks([]) + axis.set_xlabel("Class distribution") + axis.set_title(f"Concentration = {concentration}") + + fig.text(0, 0.5, "Client", va="center", rotation="vertical") + plt.tight_layout() + plt.savefig("../_static/concentration_cifar10_v2.png") + print(">>> Concentration plot created") + + +if __name__ == "__main__": + plot_concentrations_cifar10() diff --git a/baselines/fedavgm/pyproject.toml b/baselines/fedavgm/pyproject.toml new file mode 100644 index 000000000000..298deafd8932 --- /dev/null +++ b/baselines/fedavgm/pyproject.toml @@ -0,0 +1,139 @@ +[build-system] +requires = ["poetry-core>=1.4.0"] +build-backend = "poetry.masonry.api" + +[tool.poetry] +name = "fedavgm" +version = "1.0.0" +description = "FedAvgM: Measuring the effects of non-identical data distribution for federated visual classification" +license = "Apache-2.0" +authors = ["Gustavo Bertoli"] +readme = "README.md" +homepage = "https://flower.dev" +repository = "https://github.com/adap/flower" +documentation = "https://flower.dev" +classifiers = [ + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Operating System :: MacOS :: MacOS X", + "Operating System :: POSIX :: Linux", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: Implementation :: CPython", + "Topic :: Scientific/Engineering", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: Scientific/Engineering :: Mathematics", + "Topic :: Software Development", + "Topic :: Software Development :: Libraries", + "Topic :: Software Development :: Libraries :: Python Modules", + "Typing :: Typed", +] + +[tool.poetry.dependencies] +python = ">=3.9, <3.12.0" # changed! original baseline template uses >= 3.8.15 +flwr = "1.5.0" +ray = "2.6.3" +hydra-core = "1.3.2" # don't change this +cython = "^3.0.0" +tensorflow = "2.10" +numpy = "1.25.2" +matplotlib = "^3.7.2" + +[tool.poetry.dev-dependencies] +isort = "==5.11.5" +black = "==23.1.0" +docformatter = "==1.5.1" +mypy = "==1.4.1" +pylint = "==2.8.2" +flake8 = "==3.9.2" +pytest = "==6.2.4" +pytest-watch = "==4.2.0" +ruff = "==0.0.272" +types-requests = "==2.27.7" + +[tool.isort] +line_length = 88 +indent = " " +multi_line_output = 3 +include_trailing_comma = true +force_grid_wrap = 0 +use_parentheses = true + +[tool.black] +line-length = 88 +target-version = ["py38", "py39", "py310", "py311"] + +[tool.pytest.ini_options] +minversion = "6.2" +addopts = "-qq" +testpaths = [ + "flwr_baselines", +] + +[tool.mypy] +ignore_missing_imports = true +strict = false +plugins = "numpy.typing.mypy_plugin" + +[tool.pylint."MESSAGES CONTROL"] +disable = "bad-continuation,duplicate-code,too-few-public-methods,useless-import-alias" +good-names = "i,j,k,_,x,y,X,Y" +signature-mutators="hydra.main.main" + +[[tool.mypy.overrides]] +module = [ + "importlib.metadata.*", + "importlib_metadata.*", +] +follow_imports = "skip" +follow_imports_for_stubs = true +disallow_untyped_calls = false + +[[tool.mypy.overrides]] +module = "torch.*" +follow_imports = "skip" +follow_imports_for_stubs = true + +[tool.docformatter] +wrap-summaries = 88 +wrap-descriptions = 88 + +[tool.ruff] +target-version = "py38" +line-length = 88 +select = ["D", "E", "F", "W", "B", "ISC", "C4"] +fixable = ["D", "E", "F", "W", "B", "ISC", "C4"] +ignore = ["B024", "B027"] +exclude = [ + ".bzr", + ".direnv", + ".eggs", + ".git", + ".hg", + ".mypy_cache", + ".nox", + ".pants.d", + ".pytype", + ".ruff_cache", + ".svn", + ".tox", + ".venv", + "__pypackages__", + "_build", + "buck-out", + "build", + "dist", + "node_modules", + "venv", + "proto", +] + +[tool.ruff.pydocstyle] +convention = "numpy" diff --git a/doc/source/ref-changelog.md b/doc/source/ref-changelog.md index c4aad511a4a5..5f323bc80baa 100644 --- a/doc/source/ref-changelog.md +++ b/doc/source/ref-changelog.md @@ -18,6 +18,8 @@ - HeteroFL [#2439](https://github.com/adap/flower/pull/2439) + - FedAvgM [#2246](https://github.com/adap/flower/pull/2246) + ## v1.6.0 (2023-11-28) ### Thanks to our contributors From 96c8ce99b7e82abb4797f859e8ae26056ef8d025 Mon Sep 17 00:00:00 2001 From: Robert Steiner Date: Wed, 3 Jan 2024 09:13:40 +0100 Subject: [PATCH 11/30] Add dockerfile for flower client image (#2746) --- src/docker/client/Dockerfile | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 src/docker/client/Dockerfile diff --git a/src/docker/client/Dockerfile b/src/docker/client/Dockerfile new file mode 100644 index 000000000000..0755a7989281 --- /dev/null +++ b/src/docker/client/Dockerfile @@ -0,0 +1,8 @@ +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. + +ARG BASE_REPOSITORY=flwr/base +ARG BASE_IMAGE_TAG +FROM $BASE_REPOSITORY:$BASE_IMAGE_TAG + +ARG FLWR_VERSION +RUN python -m pip install -U --no-cache-dir flwr[rest]==${FLWR_VERSION} From c9af663bfac5607144c289b7b46b28c601df374d Mon Sep 17 00:00:00 2001 From: Robert Steiner Date: Wed, 3 Jan 2024 12:38:47 +0100 Subject: [PATCH 12/30] Add docker client image ci (#2747) --- .github/workflows/_docker-build.yml | 4 ++-- .github/workflows/docker-base.yml | 2 +- .github/workflows/docker-client.yml | 36 +++++++++++++++++++++++++++++ 3 files changed, 39 insertions(+), 3 deletions(-) create mode 100644 .github/workflows/docker-client.yml diff --git a/.github/workflows/_docker-build.yml b/.github/workflows/_docker-build.yml index 9005f21424fb..36b94b5c7e97 100644 --- a/.github/workflows/_docker-build.yml +++ b/.github/workflows/_docker-build.yml @@ -1,4 +1,4 @@ -name: Reusable docker server image build workflow +name: Reusable docker image build workflow on: workflow_call: @@ -35,7 +35,7 @@ permissions: # based on https://docs.docker.com/build/ci/github-actions/multi-platform/#distribute-build-across-multiple-runners jobs: build: - name: Build server image + name: Build image runs-on: ubuntu-22.04 timeout-minutes: 60 outputs: diff --git a/.github/workflows/docker-base.yml b/.github/workflows/docker-base.yml index fe276585a3bd..f2cd2ef99d08 100644 --- a/.github/workflows/docker-base.yml +++ b/.github/workflows/docker-base.yml @@ -39,7 +39,7 @@ jobs: echo "ubuntu-version=${{ env.DEFAULT_UBUNTU }}" >> "$GITHUB_OUTPUT" build-base-images: - name: Build images + name: Build base images uses: ./.github/workflows/_docker-build.yml needs: parameters strategy: diff --git a/.github/workflows/docker-client.yml b/.github/workflows/docker-client.yml new file mode 100644 index 000000000000..47083b258982 --- /dev/null +++ b/.github/workflows/docker-client.yml @@ -0,0 +1,36 @@ +name: Build docker client image + +on: + workflow_dispatch: + inputs: + flwr-version: + description: "Version of Flower e.g. (1.6.0)." + required: true + type: string + +permissions: + contents: read + +jobs: + build-client-images: + name: Build client images + uses: ./.github/workflows/_docker-build.yml + # run only on default branch when using it with workflow_dispatch + if: github.ref_name == github.event.repository.default_branch + strategy: + fail-fast: false + matrix: + python-version: ["3.8", "3.9", "3.10", "3.11"] + with: + namespace-repository: flwr/client + file-dir: src/docker/client + build-args: | + FLWR_VERSION=${{ github.event.inputs.flwr-version }} + BASE_IMAGE_TAG=py${{ matrix.python-version }}-ubuntu22.04 + tags: | + ${{ github.event.inputs.flwr-version }}-py${{ matrix.python-version }}-ubuntu22.04 + ${{ github.event.inputs.flwr-version }} + latest + secrets: + dockerhub-user: ${{ secrets.DOCKERHUB_USERNAME }} + dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }} From 68e6cf89f485131eb737b1f8e4e43fb864c76cb8 Mon Sep 17 00:00:00 2001 From: Heng Pan <134433891+panh99@users.noreply.github.com> Date: Fri, 5 Jan 2024 01:44:28 +0800 Subject: [PATCH 13/30] Rename `workload_id` to `run_id` (#2769) --- examples/mt-pytorch/driver.py | 8 +- examples/secaggplus-mt/driver.py | 8 +- src/proto/flwr/proto/driver.proto | 4 +- src/proto/flwr/proto/task.proto | 4 +- src/py/flwr/client/app.py | 8 +- src/py/flwr/client/grpc_client/connection.py | 2 +- .../client/message_handler/message_handler.py | 2 +- .../message_handler/message_handler_test.py | 8 +- .../client/message_handler/task_handler.py | 10 +- .../message_handler/task_handler_test.py | 2 +- src/py/flwr/client/node_state.py | 22 ++-- src/py/flwr/client/node_state_tests.py | 14 +-- src/py/flwr/driver/app.py | 8 +- src/py/flwr/driver/app_test.py | 2 +- src/py/flwr/driver/driver.py | 22 ++-- src/py/flwr/driver/driver_client_proxy.py | 8 +- .../flwr/driver/driver_client_proxy_test.py | 16 +-- src/py/flwr/driver/driver_test.py | 16 +-- src/py/flwr/proto/driver_pb2.py | 32 +++--- src/py/flwr/proto/driver_pb2.pyi | 16 +-- src/py/flwr/proto/task_pb2.py | 40 +++---- src/py/flwr/proto/task_pb2.pyi | 16 +-- src/py/flwr/server/driver/driver_servicer.py | 6 +- .../message_handler/message_handler_test.py | 2 +- src/py/flwr/server/state/in_memory_state.py | 30 +++--- src/py/flwr/server/state/sqlite_state.py | 46 ++++---- src/py/flwr/server/state/sqlite_state_test.py | 4 +- src/py/flwr/server/state/state.py | 8 +- src/py/flwr/server/state/state_test.py | 102 ++++++++---------- src/py/flwr/server/utils/validator_test.py | 4 +- .../ray_transport/ray_client_proxy.py | 10 +- .../ray_transport/ray_client_proxy_test.py | 10 +- 32 files changed, 234 insertions(+), 256 deletions(-) diff --git a/examples/mt-pytorch/driver.py b/examples/mt-pytorch/driver.py index fed760f021af..6e1e731301c9 100644 --- a/examples/mt-pytorch/driver.py +++ b/examples/mt-pytorch/driver.py @@ -59,8 +59,8 @@ def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: ) # -------------------------------------------------------------------------- Driver SDK -workload_id = create_workload_res.workload_id -print(f"Created workload id {workload_id}") +run_id = create_workload_res.run_id +print(f"Created run id {run_id}") history = History() for server_round in range(num_rounds): @@ -93,7 +93,7 @@ def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: # loop and wait until enough client nodes are available. while True: # Get a list of node ID's from the server - get_nodes_req = driver_pb2.GetNodesRequest(workload_id=workload_id) + get_nodes_req = driver_pb2.GetNodesRequest(run_id=run_id) # ---------------------------------------------------------------------- Driver SDK get_nodes_res: driver_pb2.GetNodesResponse = driver.get_nodes( @@ -125,7 +125,7 @@ def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: new_task_ins = task_pb2.TaskIns( task_id="", # Do not set, will be created and set by the DriverAPI group_id="", - workload_id=workload_id, + run_id=run_id, task=task_pb2.Task( producer=node_pb2.Node( node_id=0, diff --git a/examples/secaggplus-mt/driver.py b/examples/secaggplus-mt/driver.py index d9f795766f6d..cc52febf78e4 100644 --- a/examples/secaggplus-mt/driver.py +++ b/examples/secaggplus-mt/driver.py @@ -23,7 +23,7 @@ def merge(_task: task_pb2.Task, _merge_task: task_pb2.Task) -> task_pb2.Task: task_pb2.TaskIns( task_id="", # Do not set, will be created and set by the DriverAPI group_id="", - workload_id=workload_id, + run_id=run_id, task=merge( task, task_pb2.Task( @@ -89,8 +89,8 @@ def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: ) # -------------------------------------------------------------------------- Driver SDK -workload_id = create_workload_res.workload_id -print(f"Created workload id {workload_id}") +run_id = create_workload_res.run_id +print(f"Created run id {run_id}") history = History() for server_round in range(num_rounds): @@ -119,7 +119,7 @@ def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: # loop and wait until enough client nodes are available. while True: # Get a list of node ID's from the server - get_nodes_req = driver_pb2.GetNodesRequest(workload_id=workload_id) + get_nodes_req = driver_pb2.GetNodesRequest(run_id=run_id) # ---------------------------------------------------------------------- Driver SDK get_nodes_res: driver_pb2.GetNodesResponse = driver.get_nodes( diff --git a/src/proto/flwr/proto/driver.proto b/src/proto/flwr/proto/driver.proto index eb948217a4de..4d2e214e26eb 100644 --- a/src/proto/flwr/proto/driver.proto +++ b/src/proto/flwr/proto/driver.proto @@ -36,10 +36,10 @@ service Driver { // CreateWorkload message CreateWorkloadRequest {} -message CreateWorkloadResponse { sint64 workload_id = 1; } +message CreateWorkloadResponse { sint64 run_id = 1; } // GetNodes messages -message GetNodesRequest { sint64 workload_id = 1; } +message GetNodesRequest { sint64 run_id = 1; } message GetNodesResponse { repeated Node nodes = 1; } // PushTaskIns messages diff --git a/src/proto/flwr/proto/task.proto b/src/proto/flwr/proto/task.proto index 2205ef2815c8..ad71d7ea3811 100644 --- a/src/proto/flwr/proto/task.proto +++ b/src/proto/flwr/proto/task.proto @@ -36,14 +36,14 @@ message Task { message TaskIns { string task_id = 1; string group_id = 2; - sint64 workload_id = 3; + sint64 run_id = 3; Task task = 4; } message TaskRes { string task_id = 1; string group_id = 2; - sint64 workload_id = 3; + sint64 run_id = 3; Task task = 4; } diff --git a/src/py/flwr/client/app.py b/src/py/flwr/client/app.py index 2a2f067c2823..40fcb3178ba0 100644 --- a/src/py/flwr/client/app.py +++ b/src/py/flwr/client/app.py @@ -349,7 +349,7 @@ def _load_app() -> Flower: break # Register state - node_state.register_workloadstate(workload_id=task_ins.workload_id) + node_state.register_workloadstate(run_id=task_ins.run_id) # Load app app: Flower = load_flower_callable_fn() @@ -357,15 +357,13 @@ def _load_app() -> Flower: # Handle task message fwd_msg: Fwd = Fwd( task_ins=task_ins, - state=node_state.retrieve_workloadstate( - workload_id=task_ins.workload_id - ), + state=node_state.retrieve_workloadstate(run_id=task_ins.run_id), ) bwd_msg: Bwd = app(fwd=fwd_msg) # Update node state node_state.update_workloadstate( - workload_id=bwd_msg.task_res.workload_id, + run_id=bwd_msg.task_res.run_id, workload_state=bwd_msg.state, ) diff --git a/src/py/flwr/client/grpc_client/connection.py b/src/py/flwr/client/grpc_client/connection.py index 335d28e72828..481f32c77859 100644 --- a/src/py/flwr/client/grpc_client/connection.py +++ b/src/py/flwr/client/grpc_client/connection.py @@ -119,7 +119,7 @@ def receive() -> TaskIns: return TaskIns( task_id=str(uuid.uuid4()), group_id="", - workload_id=0, + run_id=0, task=Task( producer=Node(node_id=0, anonymous=True), consumer=Node(node_id=0, anonymous=True), diff --git a/src/py/flwr/client/message_handler/message_handler.py b/src/py/flwr/client/message_handler/message_handler.py index 0f3070cfb01a..f2b05622abcd 100644 --- a/src/py/flwr/client/message_handler/message_handler.py +++ b/src/py/flwr/client/message_handler/message_handler.py @@ -112,7 +112,7 @@ def handle( task_res = TaskRes( task_id="", group_id="", - workload_id=0, + run_id=0, task=Task( ancestry=[], sa=SecureAggregation(named_values=serde.named_values_to_proto(res)), diff --git a/src/py/flwr/client/message_handler/message_handler_test.py b/src/py/flwr/client/message_handler/message_handler_test.py index d7f410d81fc0..0ffa0c2c5de4 100644 --- a/src/py/flwr/client/message_handler/message_handler_test.py +++ b/src/py/flwr/client/message_handler/message_handler_test.py @@ -121,7 +121,7 @@ def test_client_without_get_properties() -> None: task_ins: TaskIns = TaskIns( task_id=str(uuid.uuid4()), group_id="", - workload_id=0, + run_id=0, task=Task( producer=Node(node_id=0, anonymous=True), consumer=Node(node_id=0, anonymous=True), @@ -152,7 +152,7 @@ def test_client_without_get_properties() -> None: TaskRes( task_id=str(uuid.uuid4()), group_id="", - workload_id=0, + run_id=0, ) ) # pylint: disable=no-member @@ -189,7 +189,7 @@ def test_client_with_get_properties() -> None: task_ins = TaskIns( task_id=str(uuid.uuid4()), group_id="", - workload_id=0, + run_id=0, task=Task( producer=Node(node_id=0, anonymous=True), consumer=Node(node_id=0, anonymous=True), @@ -220,7 +220,7 @@ def test_client_with_get_properties() -> None: TaskRes( task_id=str(uuid.uuid4()), group_id="", - workload_id=0, + run_id=0, ) ) # pylint: disable=no-member diff --git a/src/py/flwr/client/message_handler/task_handler.py b/src/py/flwr/client/message_handler/task_handler.py index fc24539998c0..13b1948eec07 100644 --- a/src/py/flwr/client/message_handler/task_handler.py +++ b/src/py/flwr/client/message_handler/task_handler.py @@ -70,7 +70,7 @@ def validate_task_res(task_res: TaskRes) -> bool: Returns ------- is_valid: bool - True if the `task_id`, `group_id`, and `workload_id` fields in TaskRes + True if the `task_id`, `group_id`, and `run_id` fields in TaskRes and the `producer`, `consumer`, and `ancestry` fields in its sub-message Task are not initialized accidentally elsewhere, False otherwise. @@ -84,7 +84,7 @@ def validate_task_res(task_res: TaskRes) -> bool: if ( "task_id" in initialized_fields_in_task_res or "group_id" in initialized_fields_in_task_res - or "workload_id" in initialized_fields_in_task_res + or "run_id" in initialized_fields_in_task_res or "producer" in initialized_fields_in_task or "consumer" in initialized_fields_in_task or "ancestry" in initialized_fields_in_task @@ -129,7 +129,7 @@ def wrap_client_message_in_task_res(client_message: ClientMessage) -> TaskRes: return TaskRes( task_id="", group_id="", - workload_id=0, + run_id=0, task=Task(ancestry=[], legacy_client_message=client_message), ) @@ -139,7 +139,7 @@ def configure_task_res( ) -> TaskRes: """Set the metadata of a TaskRes. - Fill `group_id` and `workload_id` in TaskRes + Fill `group_id` and `run_id` in TaskRes and `producer`, `consumer`, and `ancestry` in Task in TaskRes. `producer` in Task in TaskRes will remain unchanged/unset. @@ -152,7 +152,7 @@ def configure_task_res( task_res = TaskRes( task_id="", # This will be generated by the server group_id=ref_task_ins.group_id, - workload_id=ref_task_ins.workload_id, + run_id=ref_task_ins.run_id, task=task_res.task, ) # pylint: disable-next=no-member diff --git a/src/py/flwr/client/message_handler/task_handler_test.py b/src/py/flwr/client/message_handler/task_handler_test.py index 21f3a2ead98a..748ef63e72ef 100644 --- a/src/py/flwr/client/message_handler/task_handler_test.py +++ b/src/py/flwr/client/message_handler/task_handler_test.py @@ -92,7 +92,7 @@ def test_validate_task_res() -> None: assert not validate_task_res(task_res) task_res.Clear() - task_res.workload_id = 61016 + task_res.run_id = 61016 assert not validate_task_res(task_res) task_res.Clear() diff --git a/src/py/flwr/client/node_state.py b/src/py/flwr/client/node_state.py index ee4f70dc4dca..58a1f7111250 100644 --- a/src/py/flwr/client/node_state.py +++ b/src/py/flwr/client/node_state.py @@ -27,24 +27,22 @@ def __init__(self) -> None: self._meta: Dict[str, Any] = {} # holds metadata about the node self.workload_states: Dict[int, WorkloadState] = {} - def register_workloadstate(self, workload_id: int) -> None: + def register_workloadstate(self, run_id: int) -> None: """Register new workload state for this node.""" - if workload_id not in self.workload_states: - self.workload_states[workload_id] = WorkloadState({}) + if run_id not in self.workload_states: + self.workload_states[run_id] = WorkloadState({}) - def retrieve_workloadstate(self, workload_id: int) -> WorkloadState: - """Get workload state given a workload_id.""" - if workload_id in self.workload_states: - return self.workload_states[workload_id] + def retrieve_workloadstate(self, run_id: int) -> WorkloadState: + """Get workload state given a run_id.""" + if run_id in self.workload_states: + return self.workload_states[run_id] raise RuntimeError( - f"WorkloadState for workload_id={workload_id} doesn't exist." + f"WorkloadState for run_id={run_id} doesn't exist." " A workload must be registered before it can be retrieved or updated " " by a client." ) - def update_workloadstate( - self, workload_id: int, workload_state: WorkloadState - ) -> None: + def update_workloadstate(self, run_id: int, workload_state: WorkloadState) -> None: """Update workload state.""" - self.workload_states[workload_id] = workload_state + self.workload_states[run_id] = workload_state diff --git a/src/py/flwr/client/node_state_tests.py b/src/py/flwr/client/node_state_tests.py index d9f9ae7db3b0..29f3c80a391c 100644 --- a/src/py/flwr/client/node_state_tests.py +++ b/src/py/flwr/client/node_state_tests.py @@ -32,7 +32,7 @@ def _run_dummy_task(state: WorkloadState) -> WorkloadState: def test_multiworkload_in_node_state() -> None: """Test basic NodeState logic.""" # Tasks to perform - tasks = [TaskIns(workload_id=w_id) for w_id in [0, 1, 1, 2, 3, 2, 1, 5]] + tasks = [TaskIns(run_id=r_id) for r_id in [0, 1, 1, 2, 3, 2, 1, 5]] # the "tasks" is to count how many times each workload is executed expected_values = {0: "1", 1: "1" * 3, 2: "1" * 2, 3: "1", 5: "1"} @@ -40,20 +40,20 @@ def test_multiworkload_in_node_state() -> None: node_state = NodeState() for task in tasks: - w_id = task.workload_id + r_id = task.run_id # Register - node_state.register_workloadstate(workload_id=w_id) + node_state.register_workloadstate(run_id=r_id) # Get workload state - state = node_state.retrieve_workloadstate(workload_id=w_id) + state = node_state.retrieve_workloadstate(run_id=r_id) # Run "task" updated_state = _run_dummy_task(state) # Update workload state - node_state.update_workloadstate(workload_id=w_id, workload_state=updated_state) + node_state.update_workloadstate(run_id=r_id, workload_state=updated_state) # Verify values - for w_id, state in node_state.workload_states.items(): - assert state.state["counter"] == expected_values[w_id] + for r_id, state in node_state.workload_states.items(): + assert state.state["counter"] == expected_values[r_id] diff --git a/src/py/flwr/driver/app.py b/src/py/flwr/driver/app.py index 3cb8652365d8..2d4f2cf81390 100644 --- a/src/py/flwr/driver/app.py +++ b/src/py/flwr/driver/app.py @@ -170,8 +170,8 @@ def update_client_manager( and dead nodes will be removed from the ClientManager via `client_manager.unregister()`. """ - # Request for workload_id - workload_id = driver.create_workload(driver_pb2.CreateWorkloadRequest()).workload_id + # Request for run_id + run_id = driver.create_workload(driver_pb2.CreateWorkloadRequest()).run_id # Loop until the driver is disconnected registered_nodes: Dict[int, DriverClientProxy] = {} @@ -181,7 +181,7 @@ def update_client_manager( if driver.stub is None: break get_nodes_res = driver.get_nodes( - req=driver_pb2.GetNodesRequest(workload_id=workload_id) + req=driver_pb2.GetNodesRequest(run_id=run_id) ) all_node_ids = {node.node_id for node in get_nodes_res.nodes} dead_nodes = set(registered_nodes).difference(all_node_ids) @@ -199,7 +199,7 @@ def update_client_manager( node_id=node_id, driver=driver, anonymous=False, - workload_id=workload_id, + run_id=run_id, ) if client_manager.register(client_proxy): registered_nodes[node_id] = client_proxy diff --git a/src/py/flwr/driver/app_test.py b/src/py/flwr/driver/app_test.py index 91b4fd30bc4b..4b376cb94ef5 100644 --- a/src/py/flwr/driver/app_test.py +++ b/src/py/flwr/driver/app_test.py @@ -43,7 +43,7 @@ def test_simple_client_manager_update(self) -> None: ] driver = MagicMock() driver.stub = "driver stub" - driver.create_workload.return_value = CreateWorkloadResponse(workload_id=1) + driver.create_workload.return_value = CreateWorkloadResponse(run_id=1) driver.get_nodes.return_value = GetNodesResponse(nodes=expected_nodes) client_manager = SimpleClientManager() lock = threading.Lock() diff --git a/src/py/flwr/driver/driver.py b/src/py/flwr/driver/driver.py index f1a7c6663c11..7d1af40f4ee9 100644 --- a/src/py/flwr/driver/driver.py +++ b/src/py/flwr/driver/driver.py @@ -54,37 +54,37 @@ def __init__( self.addr = driver_service_address self.certificates = certificates self.grpc_driver: Optional[GrpcDriver] = None - self.workload_id: Optional[int] = None + self.run_id: Optional[int] = None self.node = Node(node_id=0, anonymous=True) - def _get_grpc_driver_and_workload_id(self) -> Tuple[GrpcDriver, int]: + def _get_grpc_driver_and_run_id(self) -> Tuple[GrpcDriver, int]: # Check if the GrpcDriver is initialized - if self.grpc_driver is None or self.workload_id is None: + if self.grpc_driver is None or self.run_id is None: # Connect and create workload self.grpc_driver = GrpcDriver( driver_service_address=self.addr, certificates=self.certificates ) self.grpc_driver.connect() res = self.grpc_driver.create_workload(CreateWorkloadRequest()) - self.workload_id = res.workload_id + self.run_id = res.run_id - return self.grpc_driver, self.workload_id + return self.grpc_driver, self.run_id def get_nodes(self) -> List[Node]: """Get node IDs.""" - grpc_driver, workload_id = self._get_grpc_driver_and_workload_id() + grpc_driver, run_id = self._get_grpc_driver_and_run_id() # Call GrpcDriver method - res = grpc_driver.get_nodes(GetNodesRequest(workload_id=workload_id)) + res = grpc_driver.get_nodes(GetNodesRequest(run_id=run_id)) return list(res.nodes) def push_task_ins(self, task_ins_list: List[TaskIns]) -> List[str]: """Schedule tasks.""" - grpc_driver, workload_id = self._get_grpc_driver_and_workload_id() + grpc_driver, run_id = self._get_grpc_driver_and_run_id() - # Set workload_id + # Set run_id for task_ins in task_ins_list: - task_ins.workload_id = workload_id + task_ins.run_id = run_id # Call GrpcDriver method res = grpc_driver.push_task_ins(PushTaskInsRequest(task_ins_list=task_ins_list)) @@ -92,7 +92,7 @@ def push_task_ins(self, task_ins_list: List[TaskIns]) -> List[str]: def pull_task_res(self, task_ids: Iterable[str]) -> List[TaskRes]: """Get task results.""" - grpc_driver, _ = self._get_grpc_driver_and_workload_id() + grpc_driver, _ = self._get_grpc_driver_and_run_id() # Call GrpcDriver method res = grpc_driver.pull_task_res( diff --git a/src/py/flwr/driver/driver_client_proxy.py b/src/py/flwr/driver/driver_client_proxy.py index 6d60fc49159b..6c15acb9ebde 100644 --- a/src/py/flwr/driver/driver_client_proxy.py +++ b/src/py/flwr/driver/driver_client_proxy.py @@ -31,13 +31,11 @@ class DriverClientProxy(ClientProxy): """Flower client proxy which delegates work using the Driver API.""" - def __init__( - self, node_id: int, driver: GrpcDriver, anonymous: bool, workload_id: int - ): + def __init__(self, node_id: int, driver: GrpcDriver, anonymous: bool, run_id: int): super().__init__(str(node_id)) self.node_id = node_id self.driver = driver - self.workload_id = workload_id + self.run_id = run_id self.anonymous = anonymous def get_properties( @@ -106,7 +104,7 @@ def _send_receive_msg( task_ins = task_pb2.TaskIns( task_id="", group_id="", - workload_id=self.workload_id, + run_id=self.run_id, task=task_pb2.Task( producer=node_pb2.Node( node_id=0, diff --git a/src/py/flwr/driver/driver_client_proxy_test.py b/src/py/flwr/driver/driver_client_proxy_test.py index 82b5b46d7810..e7fb088dbf57 100644 --- a/src/py/flwr/driver/driver_client_proxy_test.py +++ b/src/py/flwr/driver/driver_client_proxy_test.py @@ -52,7 +52,7 @@ def test_get_properties(self) -> None: task_pb2.TaskRes( task_id="554bd3c8-8474-4b93-a7db-c7bec1bf0012", group_id="", - workload_id=0, + run_id=0, task=task_pb2.Task( legacy_client_message=ClientMessage( get_properties_res=ClientMessage.GetPropertiesRes( @@ -64,7 +64,7 @@ def test_get_properties(self) -> None: ] ) client = DriverClientProxy( - node_id=1, driver=self.driver, anonymous=True, workload_id=0 + node_id=1, driver=self.driver, anonymous=True, run_id=0 ) request_properties: Config = {"tensor_type": "str"} ins: flwr.common.GetPropertiesIns = flwr.common.GetPropertiesIns( @@ -88,7 +88,7 @@ def test_get_parameters(self) -> None: task_pb2.TaskRes( task_id="554bd3c8-8474-4b93-a7db-c7bec1bf0012", group_id="", - workload_id=0, + run_id=0, task=task_pb2.Task( legacy_client_message=ClientMessage( get_parameters_res=ClientMessage.GetParametersRes( @@ -100,7 +100,7 @@ def test_get_parameters(self) -> None: ] ) client = DriverClientProxy( - node_id=1, driver=self.driver, anonymous=True, workload_id=0 + node_id=1, driver=self.driver, anonymous=True, run_id=0 ) get_parameters_ins = GetParametersIns(config={}) @@ -123,7 +123,7 @@ def test_fit(self) -> None: task_pb2.TaskRes( task_id="554bd3c8-8474-4b93-a7db-c7bec1bf0012", group_id="", - workload_id=0, + run_id=0, task=task_pb2.Task( legacy_client_message=ClientMessage( fit_res=ClientMessage.FitRes( @@ -136,7 +136,7 @@ def test_fit(self) -> None: ] ) client = DriverClientProxy( - node_id=1, driver=self.driver, anonymous=True, workload_id=0 + node_id=1, driver=self.driver, anonymous=True, run_id=0 ) parameters = flwr.common.ndarrays_to_parameters([np.ones((2, 2))]) ins: flwr.common.FitIns = flwr.common.FitIns(parameters, {}) @@ -160,7 +160,7 @@ def test_evaluate(self) -> None: task_pb2.TaskRes( task_id="554bd3c8-8474-4b93-a7db-c7bec1bf0012", group_id="", - workload_id=0, + run_id=0, task=task_pb2.Task( legacy_client_message=ClientMessage( evaluate_res=ClientMessage.EvaluateRes( @@ -172,7 +172,7 @@ def test_evaluate(self) -> None: ] ) client = DriverClientProxy( - node_id=1, driver=self.driver, anonymous=True, workload_id=0 + node_id=1, driver=self.driver, anonymous=True, run_id=0 ) parameters = flwr.common.Parameters(tensors=[], tensor_type="np") evaluate_ins: flwr.common.EvaluateIns = flwr.common.EvaluateIns(parameters, {}) diff --git a/src/py/flwr/driver/driver_test.py b/src/py/flwr/driver/driver_test.py index 820018788a8f..8e3b7994986f 100644 --- a/src/py/flwr/driver/driver_test.py +++ b/src/py/flwr/driver/driver_test.py @@ -33,7 +33,7 @@ class TestDriver(unittest.TestCase): def setUp(self) -> None: """Initialize mock GrpcDriver and Driver instance before each test.""" mock_response = Mock() - mock_response.workload_id = 61016 + mock_response.run_id = 61016 self.mock_grpc_driver = Mock() self.mock_grpc_driver.create_workload.return_value = mock_response self.patcher = patch( @@ -50,11 +50,11 @@ def test_check_and_init_grpc_driver_already_initialized(self) -> None: """Test that GrpcDriver doesn't initialize if workload is created.""" # Prepare self.driver.grpc_driver = self.mock_grpc_driver - self.driver.workload_id = 61016 + self.driver.run_id = 61016 # Execute # pylint: disable-next=protected-access - self.driver._get_grpc_driver_and_workload_id() + self.driver._get_grpc_driver_and_run_id() # Assert self.mock_grpc_driver.connect.assert_not_called() @@ -63,11 +63,11 @@ def test_check_and_init_grpc_driver_needs_initialization(self) -> None: """Test GrpcDriver initialization when workload is not created.""" # Execute # pylint: disable-next=protected-access - self.driver._get_grpc_driver_and_workload_id() + self.driver._get_grpc_driver_and_run_id() # Assert self.mock_grpc_driver.connect.assert_called_once() - self.assertEqual(self.driver.workload_id, 61016) + self.assertEqual(self.driver.run_id, 61016) def test_get_nodes(self) -> None: """Test retrieval of nodes.""" @@ -85,7 +85,7 @@ def test_get_nodes(self) -> None: self.assertEqual(len(args), 1) self.assertEqual(len(kwargs), 0) self.assertIsInstance(args[0], GetNodesRequest) - self.assertEqual(args[0].workload_id, 61016) + self.assertEqual(args[0].run_id, 61016) self.assertEqual(nodes, mock_response.nodes) def test_push_task_ins(self) -> None: @@ -107,7 +107,7 @@ def test_push_task_ins(self) -> None: self.assertIsInstance(args[0], PushTaskInsRequest) self.assertEqual(task_ids, mock_response.task_ids) for task_ins in args[0].task_ins_list: - self.assertEqual(task_ins.workload_id, 61016) + self.assertEqual(task_ins.run_id, 61016) def test_pull_task_res_with_given_task_ids(self) -> None: """Test pulling task results with specific task IDs.""" @@ -136,7 +136,7 @@ def test_del_with_initialized_driver(self) -> None: """Test cleanup behavior when Driver is initialized.""" # Prepare # pylint: disable-next=protected-access - self.driver._get_grpc_driver_and_workload_id() + self.driver._get_grpc_driver_and_run_id() # Execute self.driver.__del__() diff --git a/src/py/flwr/proto/driver_pb2.py b/src/py/flwr/proto/driver_pb2.py index c138507e03e9..b57152f83fb1 100644 --- a/src/py/flwr/proto/driver_pb2.py +++ b/src/py/flwr/proto/driver_pb2.py @@ -16,7 +16,7 @@ from flwr.proto import task_pb2 as flwr_dot_proto_dot_task__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17\x66lwr/proto/driver.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x15\x66lwr/proto/task.proto\"\x17\n\x15\x43reateWorkloadRequest\"-\n\x16\x43reateWorkloadResponse\x12\x13\n\x0bworkload_id\x18\x01 \x01(\x12\"&\n\x0fGetNodesRequest\x12\x13\n\x0bworkload_id\x18\x01 \x01(\x12\"3\n\x10GetNodesResponse\x12\x1f\n\x05nodes\x18\x01 \x03(\x0b\x32\x10.flwr.proto.Node\"@\n\x12PushTaskInsRequest\x12*\n\rtask_ins_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskIns\"\'\n\x13PushTaskInsResponse\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"F\n\x12PullTaskResRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"A\n\x13PullTaskResResponse\x12*\n\rtask_res_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskRes2\xd0\x02\n\x06\x44river\x12Y\n\x0e\x43reateWorkload\x12!.flwr.proto.CreateWorkloadRequest\x1a\".flwr.proto.CreateWorkloadResponse\"\x00\x12G\n\x08GetNodes\x12\x1b.flwr.proto.GetNodesRequest\x1a\x1c.flwr.proto.GetNodesResponse\"\x00\x12P\n\x0bPushTaskIns\x12\x1e.flwr.proto.PushTaskInsRequest\x1a\x1f.flwr.proto.PushTaskInsResponse\"\x00\x12P\n\x0bPullTaskRes\x12\x1e.flwr.proto.PullTaskResRequest\x1a\x1f.flwr.proto.PullTaskResResponse\"\x00\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17\x66lwr/proto/driver.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x15\x66lwr/proto/task.proto\"\x17\n\x15\x43reateWorkloadRequest\"(\n\x16\x43reateWorkloadResponse\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\"!\n\x0fGetNodesRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\"3\n\x10GetNodesResponse\x12\x1f\n\x05nodes\x18\x01 \x03(\x0b\x32\x10.flwr.proto.Node\"@\n\x12PushTaskInsRequest\x12*\n\rtask_ins_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskIns\"\'\n\x13PushTaskInsResponse\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"F\n\x12PullTaskResRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"A\n\x13PullTaskResResponse\x12*\n\rtask_res_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskRes2\xd0\x02\n\x06\x44river\x12Y\n\x0e\x43reateWorkload\x12!.flwr.proto.CreateWorkloadRequest\x1a\".flwr.proto.CreateWorkloadResponse\"\x00\x12G\n\x08GetNodes\x12\x1b.flwr.proto.GetNodesRequest\x1a\x1c.flwr.proto.GetNodesResponse\"\x00\x12P\n\x0bPushTaskIns\x12\x1e.flwr.proto.PushTaskInsRequest\x1a\x1f.flwr.proto.PushTaskInsResponse\"\x00\x12P\n\x0bPullTaskRes\x12\x1e.flwr.proto.PullTaskResRequest\x1a\x1f.flwr.proto.PullTaskResResponse\"\x00\x62\x06proto3') @@ -91,19 +91,19 @@ _CREATEWORKLOADREQUEST._serialized_start=85 _CREATEWORKLOADREQUEST._serialized_end=108 _CREATEWORKLOADRESPONSE._serialized_start=110 - _CREATEWORKLOADRESPONSE._serialized_end=155 - _GETNODESREQUEST._serialized_start=157 - _GETNODESREQUEST._serialized_end=195 - _GETNODESRESPONSE._serialized_start=197 - _GETNODESRESPONSE._serialized_end=248 - _PUSHTASKINSREQUEST._serialized_start=250 - _PUSHTASKINSREQUEST._serialized_end=314 - _PUSHTASKINSRESPONSE._serialized_start=316 - _PUSHTASKINSRESPONSE._serialized_end=355 - _PULLTASKRESREQUEST._serialized_start=357 - _PULLTASKRESREQUEST._serialized_end=427 - _PULLTASKRESRESPONSE._serialized_start=429 - _PULLTASKRESRESPONSE._serialized_end=494 - _DRIVER._serialized_start=497 - _DRIVER._serialized_end=833 + _CREATEWORKLOADRESPONSE._serialized_end=150 + _GETNODESREQUEST._serialized_start=152 + _GETNODESREQUEST._serialized_end=185 + _GETNODESRESPONSE._serialized_start=187 + _GETNODESRESPONSE._serialized_end=238 + _PUSHTASKINSREQUEST._serialized_start=240 + _PUSHTASKINSREQUEST._serialized_end=304 + _PUSHTASKINSRESPONSE._serialized_start=306 + _PUSHTASKINSRESPONSE._serialized_end=345 + _PULLTASKRESREQUEST._serialized_start=347 + _PULLTASKRESREQUEST._serialized_end=417 + _PULLTASKRESRESPONSE._serialized_start=419 + _PULLTASKRESRESPONSE._serialized_end=484 + _DRIVER._serialized_start=487 + _DRIVER._serialized_end=823 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/driver_pb2.pyi b/src/py/flwr/proto/driver_pb2.pyi index 8b940972cb6d..cebbd41590f8 100644 --- a/src/py/flwr/proto/driver_pb2.pyi +++ b/src/py/flwr/proto/driver_pb2.pyi @@ -22,25 +22,25 @@ global___CreateWorkloadRequest = CreateWorkloadRequest class CreateWorkloadResponse(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor - WORKLOAD_ID_FIELD_NUMBER: builtins.int - workload_id: builtins.int + RUN_ID_FIELD_NUMBER: builtins.int + run_id: builtins.int def __init__(self, *, - workload_id: builtins.int = ..., + run_id: builtins.int = ..., ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["workload_id",b"workload_id"]) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["run_id",b"run_id"]) -> None: ... global___CreateWorkloadResponse = CreateWorkloadResponse class GetNodesRequest(google.protobuf.message.Message): """GetNodes messages""" DESCRIPTOR: google.protobuf.descriptor.Descriptor - WORKLOAD_ID_FIELD_NUMBER: builtins.int - workload_id: builtins.int + RUN_ID_FIELD_NUMBER: builtins.int + run_id: builtins.int def __init__(self, *, - workload_id: builtins.int = ..., + run_id: builtins.int = ..., ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["workload_id",b"workload_id"]) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["run_id",b"run_id"]) -> None: ... global___GetNodesRequest = GetNodesRequest class GetNodesResponse(google.protobuf.message.Message): diff --git a/src/py/flwr/proto/task_pb2.py b/src/py/flwr/proto/task_pb2.py index 6d8cf8fd3656..ba0e2e3f5218 100644 --- a/src/py/flwr/proto/task_pb2.py +++ b/src/py/flwr/proto/task_pb2.py @@ -16,7 +16,7 @@ from flwr.proto import transport_pb2 as flwr_dot_proto_dot_transport__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66lwr/proto/task.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x1a\x66lwr/proto/transport.proto\"\xbe\x02\n\x04Task\x12\"\n\x08producer\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\"\n\x08\x63onsumer\x18\x02 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x12\n\ncreated_at\x18\x03 \x01(\t\x12\x14\n\x0c\x64\x65livered_at\x18\x04 \x01(\t\x12\x0b\n\x03ttl\x18\x05 \x01(\t\x12\x10\n\x08\x61ncestry\x18\x06 \x03(\t\x12)\n\x02sa\x18\x07 \x01(\x0b\x32\x1d.flwr.proto.SecureAggregation\x12<\n\x15legacy_server_message\x18\x65 \x01(\x0b\x32\x19.flwr.proto.ServerMessageB\x02\x18\x01\x12<\n\x15legacy_client_message\x18\x66 \x01(\x0b\x32\x19.flwr.proto.ClientMessageB\x02\x18\x01\"a\n\x07TaskIns\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x13\n\x0bworkload_id\x18\x03 \x01(\x12\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Task\"a\n\x07TaskRes\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x13\n\x0bworkload_id\x18\x03 \x01(\x12\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Task\"\xf3\x03\n\x05Value\x12\x10\n\x06\x64ouble\x18\x01 \x01(\x01H\x00\x12\x10\n\x06sint64\x18\x02 \x01(\x12H\x00\x12\x0e\n\x04\x62ool\x18\x03 \x01(\x08H\x00\x12\x10\n\x06string\x18\x04 \x01(\tH\x00\x12\x0f\n\x05\x62ytes\x18\x05 \x01(\x0cH\x00\x12\x33\n\x0b\x64ouble_list\x18\x15 \x01(\x0b\x32\x1c.flwr.proto.Value.DoubleListH\x00\x12\x33\n\x0bsint64_list\x18\x16 \x01(\x0b\x32\x1c.flwr.proto.Value.Sint64ListH\x00\x12/\n\tbool_list\x18\x17 \x01(\x0b\x32\x1a.flwr.proto.Value.BoolListH\x00\x12\x33\n\x0bstring_list\x18\x18 \x01(\x0b\x32\x1c.flwr.proto.Value.StringListH\x00\x12\x31\n\nbytes_list\x18\x19 \x01(\x0b\x32\x1b.flwr.proto.Value.BytesListH\x00\x1a\x1a\n\nDoubleList\x12\x0c\n\x04vals\x18\x01 \x03(\x01\x1a\x1a\n\nSint64List\x12\x0c\n\x04vals\x18\x01 \x03(\x12\x1a\x18\n\x08\x42oolList\x12\x0c\n\x04vals\x18\x01 \x03(\x08\x1a\x1a\n\nStringList\x12\x0c\n\x04vals\x18\x01 \x03(\t\x1a\x19\n\tBytesList\x12\x0c\n\x04vals\x18\x01 \x03(\x0c\x42\x07\n\x05value\"\xa0\x01\n\x11SecureAggregation\x12\x44\n\x0cnamed_values\x18\x01 \x03(\x0b\x32..flwr.proto.SecureAggregation.NamedValuesEntry\x1a\x45\n\x10NamedValuesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12 \n\x05value\x18\x02 \x01(\x0b\x32\x11.flwr.proto.Value:\x02\x38\x01\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66lwr/proto/task.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x1a\x66lwr/proto/transport.proto\"\xbe\x02\n\x04Task\x12\"\n\x08producer\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\"\n\x08\x63onsumer\x18\x02 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x12\n\ncreated_at\x18\x03 \x01(\t\x12\x14\n\x0c\x64\x65livered_at\x18\x04 \x01(\t\x12\x0b\n\x03ttl\x18\x05 \x01(\t\x12\x10\n\x08\x61ncestry\x18\x06 \x03(\t\x12)\n\x02sa\x18\x07 \x01(\x0b\x32\x1d.flwr.proto.SecureAggregation\x12<\n\x15legacy_server_message\x18\x65 \x01(\x0b\x32\x19.flwr.proto.ServerMessageB\x02\x18\x01\x12<\n\x15legacy_client_message\x18\x66 \x01(\x0b\x32\x19.flwr.proto.ClientMessageB\x02\x18\x01\"\\\n\x07TaskIns\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x12\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Task\"\\\n\x07TaskRes\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x12\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Task\"\xf3\x03\n\x05Value\x12\x10\n\x06\x64ouble\x18\x01 \x01(\x01H\x00\x12\x10\n\x06sint64\x18\x02 \x01(\x12H\x00\x12\x0e\n\x04\x62ool\x18\x03 \x01(\x08H\x00\x12\x10\n\x06string\x18\x04 \x01(\tH\x00\x12\x0f\n\x05\x62ytes\x18\x05 \x01(\x0cH\x00\x12\x33\n\x0b\x64ouble_list\x18\x15 \x01(\x0b\x32\x1c.flwr.proto.Value.DoubleListH\x00\x12\x33\n\x0bsint64_list\x18\x16 \x01(\x0b\x32\x1c.flwr.proto.Value.Sint64ListH\x00\x12/\n\tbool_list\x18\x17 \x01(\x0b\x32\x1a.flwr.proto.Value.BoolListH\x00\x12\x33\n\x0bstring_list\x18\x18 \x01(\x0b\x32\x1c.flwr.proto.Value.StringListH\x00\x12\x31\n\nbytes_list\x18\x19 \x01(\x0b\x32\x1b.flwr.proto.Value.BytesListH\x00\x1a\x1a\n\nDoubleList\x12\x0c\n\x04vals\x18\x01 \x03(\x01\x1a\x1a\n\nSint64List\x12\x0c\n\x04vals\x18\x01 \x03(\x12\x1a\x18\n\x08\x42oolList\x12\x0c\n\x04vals\x18\x01 \x03(\x08\x1a\x1a\n\nStringList\x12\x0c\n\x04vals\x18\x01 \x03(\t\x1a\x19\n\tBytesList\x12\x0c\n\x04vals\x18\x01 \x03(\x0c\x42\x07\n\x05value\"\xa0\x01\n\x11SecureAggregation\x12\x44\n\x0cnamed_values\x18\x01 \x03(\x0b\x32..flwr.proto.SecureAggregation.NamedValuesEntry\x1a\x45\n\x10NamedValuesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12 \n\x05value\x18\x02 \x01(\x0b\x32\x11.flwr.proto.Value:\x02\x38\x01\x62\x06proto3') @@ -126,23 +126,23 @@ _TASK._serialized_start=89 _TASK._serialized_end=407 _TASKINS._serialized_start=409 - _TASKINS._serialized_end=506 - _TASKRES._serialized_start=508 - _TASKRES._serialized_end=605 - _VALUE._serialized_start=608 - _VALUE._serialized_end=1107 - _VALUE_DOUBLELIST._serialized_start=963 - _VALUE_DOUBLELIST._serialized_end=989 - _VALUE_SINT64LIST._serialized_start=991 - _VALUE_SINT64LIST._serialized_end=1017 - _VALUE_BOOLLIST._serialized_start=1019 - _VALUE_BOOLLIST._serialized_end=1043 - _VALUE_STRINGLIST._serialized_start=1045 - _VALUE_STRINGLIST._serialized_end=1071 - _VALUE_BYTESLIST._serialized_start=1073 - _VALUE_BYTESLIST._serialized_end=1098 - _SECUREAGGREGATION._serialized_start=1110 - _SECUREAGGREGATION._serialized_end=1270 - _SECUREAGGREGATION_NAMEDVALUESENTRY._serialized_start=1201 - _SECUREAGGREGATION_NAMEDVALUESENTRY._serialized_end=1270 + _TASKINS._serialized_end=501 + _TASKRES._serialized_start=503 + _TASKRES._serialized_end=595 + _VALUE._serialized_start=598 + _VALUE._serialized_end=1097 + _VALUE_DOUBLELIST._serialized_start=953 + _VALUE_DOUBLELIST._serialized_end=979 + _VALUE_SINT64LIST._serialized_start=981 + _VALUE_SINT64LIST._serialized_end=1007 + _VALUE_BOOLLIST._serialized_start=1009 + _VALUE_BOOLLIST._serialized_end=1033 + _VALUE_STRINGLIST._serialized_start=1035 + _VALUE_STRINGLIST._serialized_end=1061 + _VALUE_BYTESLIST._serialized_start=1063 + _VALUE_BYTESLIST._serialized_end=1088 + _SECUREAGGREGATION._serialized_start=1100 + _SECUREAGGREGATION._serialized_end=1260 + _SECUREAGGREGATION_NAMEDVALUESENTRY._serialized_start=1191 + _SECUREAGGREGATION_NAMEDVALUESENTRY._serialized_end=1260 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/task_pb2.pyi b/src/py/flwr/proto/task_pb2.pyi index 7cf96cb61edf..f40a66ef98d1 100644 --- a/src/py/flwr/proto/task_pb2.pyi +++ b/src/py/flwr/proto/task_pb2.pyi @@ -59,44 +59,44 @@ class TaskIns(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor TASK_ID_FIELD_NUMBER: builtins.int GROUP_ID_FIELD_NUMBER: builtins.int - WORKLOAD_ID_FIELD_NUMBER: builtins.int + RUN_ID_FIELD_NUMBER: builtins.int TASK_FIELD_NUMBER: builtins.int task_id: typing.Text group_id: typing.Text - workload_id: builtins.int + run_id: builtins.int @property def task(self) -> global___Task: ... def __init__(self, *, task_id: typing.Text = ..., group_id: typing.Text = ..., - workload_id: builtins.int = ..., + run_id: builtins.int = ..., task: typing.Optional[global___Task] = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["task",b"task"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["group_id",b"group_id","task",b"task","task_id",b"task_id","workload_id",b"workload_id"]) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["group_id",b"group_id","run_id",b"run_id","task",b"task","task_id",b"task_id"]) -> None: ... global___TaskIns = TaskIns class TaskRes(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor TASK_ID_FIELD_NUMBER: builtins.int GROUP_ID_FIELD_NUMBER: builtins.int - WORKLOAD_ID_FIELD_NUMBER: builtins.int + RUN_ID_FIELD_NUMBER: builtins.int TASK_FIELD_NUMBER: builtins.int task_id: typing.Text group_id: typing.Text - workload_id: builtins.int + run_id: builtins.int @property def task(self) -> global___Task: ... def __init__(self, *, task_id: typing.Text = ..., group_id: typing.Text = ..., - workload_id: builtins.int = ..., + run_id: builtins.int = ..., task: typing.Optional[global___Task] = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["task",b"task"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["group_id",b"group_id","task",b"task","task_id",b"task_id","workload_id",b"workload_id"]) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["group_id",b"group_id","run_id",b"run_id","task",b"task","task_id",b"task_id"]) -> None: ... global___TaskRes = TaskRes class Value(google.protobuf.message.Message): diff --git a/src/py/flwr/server/driver/driver_servicer.py b/src/py/flwr/server/driver/driver_servicer.py index f96b3b1262ac..d2784834380e 100644 --- a/src/py/flwr/server/driver/driver_servicer.py +++ b/src/py/flwr/server/driver/driver_servicer.py @@ -51,7 +51,7 @@ def GetNodes( """Get available nodes.""" log(INFO, "DriverServicer.GetNodes") state: State = self.state_factory.state() - all_ids: Set[int] = state.get_nodes(request.workload_id) + all_ids: Set[int] = state.get_nodes(request.run_id) nodes: List[Node] = [ Node(node_id=node_id, anonymous=False) for node_id in all_ids ] @@ -63,8 +63,8 @@ def CreateWorkload( """Create workload ID.""" log(INFO, "DriverServicer.CreateWorkload") state: State = self.state_factory.state() - workload_id = state.create_workload() - return CreateWorkloadResponse(workload_id=workload_id) + run_id = state.create_workload() + return CreateWorkloadResponse(run_id=run_id) def PushTaskIns( self, request: PushTaskInsRequest, context: grpc.ServicerContext diff --git a/src/py/flwr/server/fleet/message_handler/message_handler_test.py b/src/py/flwr/server/fleet/message_handler/message_handler_test.py index 25fd822492f2..bb2205e26b18 100644 --- a/src/py/flwr/server/fleet/message_handler/message_handler_test.py +++ b/src/py/flwr/server/fleet/message_handler/message_handler_test.py @@ -109,7 +109,7 @@ def test_push_task_res() -> None: TaskRes( task_id="", group_id="", - workload_id=0, + run_id=0, task=Task(), ), ], diff --git a/src/py/flwr/server/state/in_memory_state.py b/src/py/flwr/server/state/in_memory_state.py index 384839b7461f..1ae7f65b7046 100644 --- a/src/py/flwr/server/state/in_memory_state.py +++ b/src/py/flwr/server/state/in_memory_state.py @@ -32,7 +32,7 @@ class InMemoryState(State): def __init__(self) -> None: self.node_ids: Set[int] = set() - self.workload_ids: Set[int] = set() + self.run_ids: Set[int] = set() self.task_ins_store: Dict[UUID, TaskIns] = {} self.task_res_store: Dict[UUID, TaskRes] = {} @@ -43,9 +43,9 @@ def store_task_ins(self, task_ins: TaskIns) -> Optional[UUID]: if any(errors): log(ERROR, errors) return None - # Validate workload_id - if task_ins.workload_id not in self.workload_ids: - log(ERROR, "`workload_id` is invalid") + # Validate run_id + if task_ins.run_id not in self.run_ids: + log(ERROR, "`run_id` is invalid") return None # Create task_id, created_at and ttl @@ -104,9 +104,9 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: log(ERROR, errors) return None - # Validate workload_id - if task_res.workload_id not in self.workload_ids: - log(ERROR, "`workload_id` is invalid") + # Validate run_id + if task_res.run_id not in self.run_ids: + log(ERROR, "`run_id` is invalid") return None # Create task_id, created_at and ttl @@ -199,25 +199,25 @@ def delete_node(self, node_id: int) -> None: raise ValueError(f"Node {node_id} not found") self.node_ids.remove(node_id) - def get_nodes(self, workload_id: int) -> Set[int]: + def get_nodes(self, run_id: int) -> Set[int]: """Return all available client nodes. Constraints ----------- - If the provided `workload_id` does not exist or has no matching nodes, + If the provided `run_id` does not exist or has no matching nodes, an empty `Set` MUST be returned. """ - if workload_id not in self.workload_ids: + if run_id not in self.run_ids: return set() return self.node_ids def create_workload(self) -> int: """Create one workload.""" - # Sample a random int64 as workload_id - workload_id: int = int.from_bytes(os.urandom(8), "little", signed=True) + # Sample a random int64 as run_id + run_id: int = int.from_bytes(os.urandom(8), "little", signed=True) - if workload_id not in self.workload_ids: - self.workload_ids.add(workload_id) - return workload_id + if run_id not in self.run_ids: + self.run_ids.add(run_id) + return run_id log(ERROR, "Unexpected workload creation failure.") return 0 diff --git a/src/py/flwr/server/state/sqlite_state.py b/src/py/flwr/server/state/sqlite_state.py index f3ff60f370e9..7dc456ad650a 100644 --- a/src/py/flwr/server/state/sqlite_state.py +++ b/src/py/flwr/server/state/sqlite_state.py @@ -39,7 +39,7 @@ SQL_CREATE_TABLE_WORKLOAD = """ CREATE TABLE IF NOT EXISTS workload( - workload_id INTEGER UNIQUE + run_id INTEGER UNIQUE ); """ @@ -47,7 +47,7 @@ CREATE TABLE IF NOT EXISTS task_ins( task_id TEXT UNIQUE, group_id TEXT, - workload_id INTEGER, + run_id INTEGER, producer_anonymous BOOLEAN, producer_node_id INTEGER, consumer_anonymous BOOLEAN, @@ -58,7 +58,7 @@ ancestry TEXT, legacy_server_message BLOB, legacy_client_message BLOB, - FOREIGN KEY(workload_id) REFERENCES workload(workload_id) + FOREIGN KEY(run_id) REFERENCES workload(run_id) ); """ @@ -67,7 +67,7 @@ CREATE TABLE IF NOT EXISTS task_res( task_id TEXT UNIQUE, group_id TEXT, - workload_id INTEGER, + run_id INTEGER, producer_anonymous BOOLEAN, producer_node_id INTEGER, consumer_anonymous BOOLEAN, @@ -78,7 +78,7 @@ ancestry TEXT, legacy_server_message BLOB, legacy_client_message BLOB, - FOREIGN KEY(workload_id) REFERENCES workload(workload_id) + FOREIGN KEY(run_id) REFERENCES workload(run_id) ); """ @@ -198,7 +198,7 @@ def store_task_ins(self, task_ins: TaskIns) -> Optional[UUID]: columns = ", ".join([f":{key}" for key in data[0]]) query = f"INSERT INTO task_ins VALUES({columns});" - # Only invalid workload_id can trigger IntegrityError. + # Only invalid run_id can trigger IntegrityError. # This may need to be changed in the future version with more integrity checks. try: self.query(query, data) @@ -333,7 +333,7 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: columns = ", ".join([f":{key}" for key in data[0]]) query = f"INSERT INTO task_res VALUES({columns});" - # Only invalid workload_id can trigger IntegrityError. + # Only invalid run_id can trigger IntegrityError. # This may need to be changed in the future version with more integrity checks. try: self.query(query, data) @@ -485,17 +485,17 @@ def delete_node(self, node_id: int) -> None: query = "DELETE FROM node WHERE node_id = :node_id;" self.query(query, {"node_id": node_id}) - def get_nodes(self, workload_id: int) -> Set[int]: + def get_nodes(self, run_id: int) -> Set[int]: """Retrieve all currently stored node IDs as a set. Constraints ----------- - If the provided `workload_id` does not exist or has no matching nodes, + If the provided `run_id` does not exist or has no matching nodes, an empty `Set` MUST be returned. """ # Validate workload ID - query = "SELECT COUNT(*) FROM workload WHERE workload_id = ?;" - if self.query(query, (workload_id,))[0]["COUNT(*)"] == 0: + query = "SELECT COUNT(*) FROM workload WHERE run_id = ?;" + if self.query(query, (run_id,))[0]["COUNT(*)"] == 0: return set() # Get nodes @@ -506,16 +506,16 @@ def get_nodes(self, workload_id: int) -> Set[int]: def create_workload(self) -> int: """Create one workload and store it in state.""" - # Sample a random int64 as workload_id - workload_id: int = int.from_bytes(os.urandom(8), "little", signed=True) + # Sample a random int64 as run_id + run_id: int = int.from_bytes(os.urandom(8), "little", signed=True) # Check conflicts - query = "SELECT COUNT(*) FROM workload WHERE workload_id = ?;" - # If workload_id does not exist - if self.query(query, (workload_id,))[0]["COUNT(*)"] == 0: - query = "INSERT INTO workload VALUES(:workload_id);" - self.query(query, {"workload_id": workload_id}) - return workload_id + query = "SELECT COUNT(*) FROM workload WHERE run_id = ?;" + # If run_id does not exist + if self.query(query, (run_id,))[0]["COUNT(*)"] == 0: + query = "INSERT INTO workload VALUES(:run_id);" + self.query(query, {"run_id": run_id}) + return run_id log(ERROR, "Unexpected workload creation failure.") return 0 @@ -537,7 +537,7 @@ def task_ins_to_dict(task_msg: TaskIns) -> Dict[str, Any]: result = { "task_id": task_msg.task_id, "group_id": task_msg.group_id, - "workload_id": task_msg.workload_id, + "run_id": task_msg.run_id, "producer_anonymous": task_msg.task.producer.anonymous, "producer_node_id": task_msg.task.producer.node_id, "consumer_anonymous": task_msg.task.consumer.anonymous, @@ -559,7 +559,7 @@ def task_res_to_dict(task_msg: TaskRes) -> Dict[str, Any]: result = { "task_id": task_msg.task_id, "group_id": task_msg.group_id, - "workload_id": task_msg.workload_id, + "run_id": task_msg.run_id, "producer_anonymous": task_msg.task.producer.anonymous, "producer_node_id": task_msg.task.producer.node_id, "consumer_anonymous": task_msg.task.consumer.anonymous, @@ -584,7 +584,7 @@ def dict_to_task_ins(task_dict: Dict[str, Any]) -> TaskIns: result = TaskIns( task_id=task_dict["task_id"], group_id=task_dict["group_id"], - workload_id=task_dict["workload_id"], + run_id=task_dict["run_id"], task=Task( producer=Node( node_id=task_dict["producer_node_id"], @@ -612,7 +612,7 @@ def dict_to_task_res(task_dict: Dict[str, Any]) -> TaskRes: result = TaskRes( task_id=task_dict["task_id"], group_id=task_dict["group_id"], - workload_id=task_dict["workload_id"], + run_id=task_dict["run_id"], task=Task( producer=Node( node_id=task_dict["producer_node_id"], diff --git a/src/py/flwr/server/state/sqlite_state_test.py b/src/py/flwr/server/state/sqlite_state_test.py index da8fead1438e..efdd288fc308 100644 --- a/src/py/flwr/server/state/sqlite_state_test.py +++ b/src/py/flwr/server/state/sqlite_state_test.py @@ -27,11 +27,11 @@ class SqliteStateTest(unittest.TestCase): def test_ins_res_to_dict(self) -> None: """Check if all required keys are included in return value.""" # Prepare - ins_res = create_task_ins(consumer_node_id=1, anonymous=True, workload_id=0) + ins_res = create_task_ins(consumer_node_id=1, anonymous=True, run_id=0) expected_keys = [ "task_id", "group_id", - "workload_id", + "run_id", "producer_anonymous", "producer_node_id", "consumer_anonymous", diff --git a/src/py/flwr/server/state/state.py b/src/py/flwr/server/state/state.py index fd8bbc8e8e25..2996bbc82e55 100644 --- a/src/py/flwr/server/state/state.py +++ b/src/py/flwr/server/state/state.py @@ -43,7 +43,7 @@ def store_task_ins(self, task_ins: TaskIns) -> Optional[UUID]: If `task_ins.task.consumer.anonymous` is `False`, then `task_ins.task.consumer.node_id` MUST be set (not 0) - If `task_ins.workload_id` is invalid, then + If `task_ins.run_id` is invalid, then storing the `task_ins` MUST fail. """ @@ -92,7 +92,7 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: If `task_res.task.consumer.anonymous` is `False`, then `task_res.task.consumer.node_id` MUST be set (not 0) - If `task_res.workload_id` is invalid, then + If `task_res.run_id` is invalid, then storing the `task_res` MUST fail. """ @@ -140,12 +140,12 @@ def delete_node(self, node_id: int) -> None: """Remove `node_id` from state.""" @abc.abstractmethod - def get_nodes(self, workload_id: int) -> Set[int]: + def get_nodes(self, run_id: int) -> Set[int]: """Retrieve all currently stored node IDs as a set. Constraints ----------- - If the provided `workload_id` does not exist or has no matching nodes, + If the provided `run_id` does not exist or has no matching nodes, an empty `Set` MUST be returned. """ diff --git a/src/py/flwr/server/state/state_test.py b/src/py/flwr/server/state/state_test.py index 59299451c3d8..0421c0edb224 100644 --- a/src/py/flwr/server/state/state_test.py +++ b/src/py/flwr/server/state/state_test.py @@ -66,9 +66,9 @@ def test_store_task_ins_one(self) -> None: # Prepare consumer_node_id = 1 state = self.state_factory() - workload_id = state.create_workload() + run_id = state.create_workload() task_ins = create_task_ins( - consumer_node_id=consumer_node_id, anonymous=False, workload_id=workload_id + consumer_node_id=consumer_node_id, anonymous=False, run_id=run_id ) assert task_ins.task.created_at == "" # pylint: disable=no-member @@ -108,15 +108,15 @@ def test_store_and_delete_tasks(self) -> None: # Prepare consumer_node_id = 1 state = self.state_factory() - workload_id = state.create_workload() + run_id = state.create_workload() task_ins_0 = create_task_ins( - consumer_node_id=consumer_node_id, anonymous=False, workload_id=workload_id + consumer_node_id=consumer_node_id, anonymous=False, run_id=run_id ) task_ins_1 = create_task_ins( - consumer_node_id=consumer_node_id, anonymous=False, workload_id=workload_id + consumer_node_id=consumer_node_id, anonymous=False, run_id=run_id ) task_ins_2 = create_task_ins( - consumer_node_id=consumer_node_id, anonymous=False, workload_id=workload_id + consumer_node_id=consumer_node_id, anonymous=False, run_id=run_id ) # Insert three TaskIns @@ -136,7 +136,7 @@ def test_store_and_delete_tasks(self) -> None: producer_node_id=100, anonymous=False, ancestry=[str(task_id_0)], - workload_id=workload_id, + run_id=run_id, ) _ = state.store_task_res(task_res=task_res_0) @@ -147,7 +147,7 @@ def test_store_and_delete_tasks(self) -> None: producer_node_id=100, anonymous=False, ancestry=[str(task_id_1)], - workload_id=workload_id, + run_id=run_id, ) _ = state.store_task_res(task_res=task_res_1) @@ -182,10 +182,8 @@ def test_task_ins_store_anonymous_and_retrieve_anonymous(self) -> None: """ # Prepare state: State = self.state_factory() - workload_id = state.create_workload() - task_ins = create_task_ins( - consumer_node_id=0, anonymous=True, workload_id=workload_id - ) + run_id = state.create_workload() + task_ins = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) # Execute task_ins_uuid = state.store_task_ins(task_ins) @@ -199,10 +197,8 @@ def test_task_ins_store_anonymous_and_fail_retrieving_identitiy(self) -> None: """Store anonymous TaskIns and fail to retrieve it.""" # Prepare state: State = self.state_factory() - workload_id = state.create_workload() - task_ins = create_task_ins( - consumer_node_id=0, anonymous=True, workload_id=workload_id - ) + run_id = state.create_workload() + task_ins = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) # Execute _ = state.store_task_ins(task_ins) @@ -215,10 +211,8 @@ def test_task_ins_store_identity_and_fail_retrieving_anonymous(self) -> None: """Store identity TaskIns and fail retrieving it as anonymous.""" # Prepare state: State = self.state_factory() - workload_id = state.create_workload() - task_ins = create_task_ins( - consumer_node_id=1, anonymous=False, workload_id=workload_id - ) + run_id = state.create_workload() + task_ins = create_task_ins(consumer_node_id=1, anonymous=False, run_id=run_id) # Execute _ = state.store_task_ins(task_ins) @@ -231,10 +225,8 @@ def test_task_ins_store_identity_and_retrieve_identity(self) -> None: """Store identity TaskIns and retrieve it.""" # Prepare state: State = self.state_factory() - workload_id = state.create_workload() - task_ins = create_task_ins( - consumer_node_id=1, anonymous=False, workload_id=workload_id - ) + run_id = state.create_workload() + task_ins = create_task_ins(consumer_node_id=1, anonymous=False, run_id=run_id) # Execute task_ins_uuid = state.store_task_ins(task_ins) @@ -250,10 +242,8 @@ def test_task_ins_store_delivered_and_fail_retrieving(self) -> None: """Fail retrieving delivered task.""" # Prepare state: State = self.state_factory() - workload_id = state.create_workload() - task_ins = create_task_ins( - consumer_node_id=1, anonymous=False, workload_id=workload_id - ) + run_id = state.create_workload() + task_ins = create_task_ins(consumer_node_id=1, anonymous=False, run_id=run_id) # Execute _ = state.store_task_ins(task_ins) @@ -278,13 +268,11 @@ def test_get_task_ins_limit_throws_for_limit_zero(self) -> None: with self.assertRaises(AssertionError): state.get_task_ins(node_id=1, limit=0) - def test_task_ins_store_invalid_workload_id_and_fail(self) -> None: - """Store TaskIns with invalid workload_id and fail.""" + def test_task_ins_store_invalid_run_id_and_fail(self) -> None: + """Store TaskIns with invalid run_id and fail.""" # Prepare state: State = self.state_factory() - task_ins = create_task_ins( - consumer_node_id=0, anonymous=True, workload_id=61016 - ) + task_ins = create_task_ins(consumer_node_id=0, anonymous=True, run_id=61016) # Execute task_id = state.store_task_ins(task_ins) @@ -297,13 +285,13 @@ def test_task_res_store_and_retrieve_by_task_ins_id(self) -> None: """Store TaskRes retrieve it by task_ins_id.""" # Prepare state: State = self.state_factory() - workload_id = state.create_workload() + run_id = state.create_workload() task_ins_id = uuid4() task_res = create_task_res( producer_node_id=0, anonymous=True, ancestry=[str(task_ins_id)], - workload_id=workload_id, + run_id=run_id, ) # Execute @@ -318,10 +306,10 @@ def test_node_ids_initial_state(self) -> None: """Test retrieving all node_ids and empty initial state.""" # Prepare state: State = self.state_factory() - workload_id = state.create_workload() + run_id = state.create_workload() # Execute - retrieved_node_ids = state.get_nodes(workload_id) + retrieved_node_ids = state.get_nodes(run_id) # Assert assert len(retrieved_node_ids) == 0 @@ -330,13 +318,13 @@ def test_create_node_and_get_nodes(self) -> None: """Test creating a client node.""" # Prepare state: State = self.state_factory() - workload_id = state.create_workload() + run_id = state.create_workload() node_ids = [] # Execute for _ in range(10): node_ids.append(state.create_node()) - retrieved_node_ids = state.get_nodes(workload_id) + retrieved_node_ids = state.get_nodes(run_id) # Assert for i in retrieved_node_ids: @@ -346,26 +334,26 @@ def test_delete_node(self) -> None: """Test deleting a client node.""" # Prepare state: State = self.state_factory() - workload_id = state.create_workload() + run_id = state.create_workload() node_id = state.create_node() # Execute state.delete_node(node_id) - retrieved_node_ids = state.get_nodes(workload_id) + retrieved_node_ids = state.get_nodes(run_id) # Assert assert len(retrieved_node_ids) == 0 - def test_get_nodes_invalid_workload_id(self) -> None: - """Test retrieving all node_ids with invalid workload_id.""" + def test_get_nodes_invalid_run_id(self) -> None: + """Test retrieving all node_ids with invalid run_id.""" # Prepare state: State = self.state_factory() state.create_workload() - invalid_workload_id = 61016 + invalid_run_id = 61016 state.create_node() # Execute - retrieved_node_ids = state.get_nodes(invalid_workload_id) + retrieved_node_ids = state.get_nodes(invalid_run_id) # Assert assert len(retrieved_node_ids) == 0 @@ -374,13 +362,9 @@ def test_num_task_ins(self) -> None: """Test if num_tasks returns correct number of not delivered task_ins.""" # Prepare state: State = self.state_factory() - workload_id = state.create_workload() - task_0 = create_task_ins( - consumer_node_id=0, anonymous=True, workload_id=workload_id - ) - task_1 = create_task_ins( - consumer_node_id=0, anonymous=True, workload_id=workload_id - ) + run_id = state.create_workload() + task_0 = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) + task_1 = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) # Store two tasks state.store_task_ins(task_0) @@ -396,12 +380,12 @@ def test_num_task_res(self) -> None: """Test if num_tasks returns correct number of not delivered task_res.""" # Prepare state: State = self.state_factory() - workload_id = state.create_workload() + run_id = state.create_workload() task_0 = create_task_res( - producer_node_id=0, anonymous=True, ancestry=["1"], workload_id=workload_id + producer_node_id=0, anonymous=True, ancestry=["1"], run_id=run_id ) task_1 = create_task_res( - producer_node_id=0, anonymous=True, ancestry=["1"], workload_id=workload_id + producer_node_id=0, anonymous=True, ancestry=["1"], run_id=run_id ) # Store two tasks @@ -418,7 +402,7 @@ def test_num_task_res(self) -> None: def create_task_ins( consumer_node_id: int, anonymous: bool, - workload_id: int, + run_id: int, delivered_at: str = "", ) -> TaskIns: """Create a TaskIns for testing.""" @@ -429,7 +413,7 @@ def create_task_ins( task = TaskIns( task_id="", group_id="", - workload_id=workload_id, + run_id=run_id, task=Task( delivered_at=delivered_at, producer=Node(node_id=0, anonymous=True), @@ -446,13 +430,13 @@ def create_task_res( producer_node_id: int, anonymous: bool, ancestry: List[str], - workload_id: int, + run_id: int, ) -> TaskRes: """Create a TaskRes for testing.""" task_res = TaskRes( task_id="", group_id="", - workload_id=workload_id, + run_id=run_id, task=Task( producer=Node(node_id=producer_node_id, anonymous=anonymous), consumer=Node(node_id=0, anonymous=True), diff --git a/src/py/flwr/server/utils/validator_test.py b/src/py/flwr/server/utils/validator_test.py index cab51fbf46de..6627cc9a7887 100644 --- a/src/py/flwr/server/utils/validator_test.py +++ b/src/py/flwr/server/utils/validator_test.py @@ -135,7 +135,7 @@ def create_task_ins( task = TaskIns( task_id="", group_id="", - workload_id=0, + run_id=0, task=Task( delivered_at=delivered_at, producer=Node(node_id=0, anonymous=True), @@ -162,7 +162,7 @@ def create_task_res( task_res = TaskRes( task_id="", group_id="", - workload_id=0, + run_id=0, task=Task( producer=Node(node_id=producer_node_id, anonymous=anonymous), consumer=Node(node_id=0, anonymous=True), diff --git a/src/py/flwr/simulation/ray_transport/ray_client_proxy.py b/src/py/flwr/simulation/ray_transport/ray_client_proxy.py index c6a63298dae6..3f3f65ee6997 100644 --- a/src/py/flwr/simulation/ray_transport/ray_client_proxy.py +++ b/src/py/flwr/simulation/ray_transport/ray_client_proxy.py @@ -133,15 +133,15 @@ def __init__( def _submit_job(self, job_fn: JobFn, timeout: Optional[float]) -> ClientRes: # The VCE is not exposed to TaskIns, it won't handle multilple workloads - # For the time being, fixing workload_id is a small compromise + # For the time being, fixing run_id is a small compromise # This will be one of the first points to address integrating VCE + DriverAPI - workload_id = 0 + run_id = 0 # Register state - self.proxy_state.register_workloadstate(workload_id=workload_id) + self.proxy_state.register_workloadstate(run_id=run_id) # Retrieve state - state = self.proxy_state.retrieve_workloadstate(workload_id=workload_id) + state = self.proxy_state.retrieve_workloadstate(run_id=run_id) try: self.actor_pool.submit_client_job( @@ -152,7 +152,7 @@ def _submit_job(self, job_fn: JobFn, timeout: Optional[float]) -> ClientRes: # Update state self.proxy_state.update_workloadstate( - workload_id=workload_id, workload_state=updated_state + run_id=run_id, workload_state=updated_state ) except Exception as ex: diff --git a/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py b/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py index b87418b671d3..13f5a6fec2da 100644 --- a/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py +++ b/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py @@ -119,15 +119,15 @@ def test_cid_consistency_all_submit_first_workload_consistency() -> None: NodeState (at each Proxy) and WorkloadState basic functionality. """ proxies, _ = prep() - workload_id = 0 + run_id = 0 # submit all jobs (collect later) shuffle(proxies) for prox in proxies: # Register state - prox.proxy_state.register_workloadstate(workload_id=workload_id) + prox.proxy_state.register_workloadstate(run_id=run_id) # Retrieve state - state = prox.proxy_state.retrieve_workloadstate(workload_id=workload_id) + state = prox.proxy_state.retrieve_workloadstate(run_id=run_id) job = job_fn(prox.cid) prox.actor_pool.submit_client_job( @@ -139,12 +139,12 @@ def test_cid_consistency_all_submit_first_workload_consistency() -> None: shuffle(proxies) for prox in proxies: res, updated_state = prox.actor_pool.get_client_result(prox.cid, timeout=None) - prox.proxy_state.update_workloadstate(workload_id, workload_state=updated_state) + prox.proxy_state.update_workloadstate(run_id, workload_state=updated_state) res = cast(GetPropertiesRes, res) assert int(prox.cid) * pi == res.properties["result"] assert ( str(int(prox.cid) * pi) - == prox.proxy_state.retrieve_workloadstate(workload_id).state["result"] + == prox.proxy_state.retrieve_workloadstate(run_id).state["result"] ) ray.shutdown() From 3e8e60e75f299314bb175ca405148523d4ba0eed Mon Sep 17 00:00:00 2001 From: Javier Date: Thu, 4 Jan 2024 22:43:44 +0000 Subject: [PATCH 14/30] Update README.md (#2771) --- baselines/hfedxgboost/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/baselines/hfedxgboost/README.md b/baselines/hfedxgboost/README.md index 29702496370b..2f31e2c4c584 100644 --- a/baselines/hfedxgboost/README.md +++ b/baselines/hfedxgboost/README.md @@ -11,7 +11,7 @@ dataset: [a9a, cod-rna, ijcnn1, space_ga, cpusmall, YearPredictionMSD] **Paper:** [arxiv.org/abs/2304.07537](https://arxiv.org/abs/2304.07537) -**Authors:** Chenyang Ma, Xinchi Qiu, Daniel J. Beutel, Nicholas D. Laneearly_stop_patience_rounds: 100 +**Authors:** Chenyang Ma, Xinchi Qiu, Daniel J. Beutel, Nicholas D. Lane **Abstract:** The privacy-sensitive nature of decentralized datasets and the robustness of eXtreme Gradient Boosting (XGBoost) on tabular data raise the need to train XGBoost in the context of federated learning (FL). Existing works on federated XGBoost in the horizontal setting rely on the sharing of gradients, which induce per-node level communication frequency and serious privacy concerns. To alleviate these problems, we develop an innovative framework for horizontal federated XGBoost which does not depend on the sharing of gradients and simultaneously boosts privacy and communication efficiency by making the learning rates of the aggregated tree ensembles are learnable. We conduct extensive evaluations on various classification and regression datasets, showing our approach achieve performance comparable to the state-of-the-art method and effectively improves communication efficiency by lowering both communication rounds and communication overhead by factors ranging from 25x to 700x. From 0f5ce998933bd132bf8213ced063f99e2706dd54 Mon Sep 17 00:00:00 2001 From: Heng Pan <134433891+panh99@users.noreply.github.com> Date: Fri, 5 Jan 2024 06:56:04 +0800 Subject: [PATCH 15/30] Format code examples (#2767) --- ...-series-get-started-with-flower-pytorch.ipynb | 2 +- .../cifar.py | 14 +++++++------- .../client.py | 12 ++++++------ examples/quickstart-pytorch-lightning/client.py | 2 +- examples/quickstart-pytorch-lightning/mnist.py | 16 ++++++++++------ examples/quickstart-sklearn-tabular/client.py | 4 +++- 6 files changed, 28 insertions(+), 22 deletions(-) diff --git a/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb b/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb index ce4c2bb63606..bbd916b32375 100644 --- a/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb +++ b/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb @@ -484,7 +484,7 @@ " min_available_clients=10, # Wait until all 10 clients are available\n", ")\n", "\n", - "# Specify the resources each of your clients need. By default, each \n", + "# Specify the resources each of your clients need. By default, each\n", "# client will be allocated 1x CPU and 0x CPUs\n", "client_resources = {\"num_cpus\": 1, \"num_gpus\": 0.0}\n", "if DEVICE.type == \"cuda\":\n", diff --git a/examples/pytorch-from-centralized-to-federated/cifar.py b/examples/pytorch-from-centralized-to-federated/cifar.py index a374909c33b2..e8f3ec3fd724 100644 --- a/examples/pytorch-from-centralized-to-federated/cifar.py +++ b/examples/pytorch-from-centralized-to-federated/cifar.py @@ -73,10 +73,10 @@ def apply_transforms(batch): def train( - net: Net, - trainloader: torch.utils.data.DataLoader, - epochs: int, - device: torch.device, # pylint: disable=no-member + net: Net, + trainloader: torch.utils.data.DataLoader, + epochs: int, + device: torch.device, # pylint: disable=no-member ) -> None: """Train the network.""" # Define loss and optimizer @@ -110,9 +110,9 @@ def train( def test( - net: Net, - testloader: torch.utils.data.DataLoader, - device: torch.device, # pylint: disable=no-member + net: Net, + testloader: torch.utils.data.DataLoader, + device: torch.device, # pylint: disable=no-member ) -> Tuple[float, float]: """Validate the network on the entire test set.""" # Define loss and metrics diff --git a/examples/pytorch-from-centralized-to-federated/client.py b/examples/pytorch-from-centralized-to-federated/client.py index df4da7c11cff..61c7e7f762b3 100644 --- a/examples/pytorch-from-centralized-to-federated/client.py +++ b/examples/pytorch-from-centralized-to-federated/client.py @@ -24,10 +24,10 @@ class CifarClient(fl.client.NumPyClient): """Flower client implementing CIFAR-10 image classification using PyTorch.""" def __init__( - self, - model: cifar.Net, - trainloader: DataLoader, - testloader: DataLoader, + self, + model: cifar.Net, + trainloader: DataLoader, + testloader: DataLoader, ) -> None: self.model = model self.trainloader = trainloader @@ -61,7 +61,7 @@ def set_parameters(self, parameters: List[np.ndarray]) -> None: self.model.load_state_dict(state_dict, strict=True) def fit( - self, parameters: List[np.ndarray], config: Dict[str, str] + self, parameters: List[np.ndarray], config: Dict[str, str] ) -> Tuple[List[np.ndarray], int, Dict]: # Set model parameters, train model, return updated model parameters self.set_parameters(parameters) @@ -69,7 +69,7 @@ def fit( return self.get_parameters(config={}), len(self.trainloader.dataset), {} def evaluate( - self, parameters: List[np.ndarray], config: Dict[str, str] + self, parameters: List[np.ndarray], config: Dict[str, str] ) -> Tuple[float, int, Dict]: # Set model parameters, evaluate model on local test dataset, return result self.set_parameters(parameters) diff --git a/examples/quickstart-pytorch-lightning/client.py b/examples/quickstart-pytorch-lightning/client.py index 8e07494b6492..1dabd5732b9b 100644 --- a/examples/quickstart-pytorch-lightning/client.py +++ b/examples/quickstart-pytorch-lightning/client.py @@ -10,6 +10,7 @@ disable_progress_bar() + class FlowerClient(fl.client.NumPyClient): def __init__(self, model, train_loader, val_loader, test_loader): self.model = model @@ -55,7 +56,6 @@ def _set_parameters(model, parameters): def main() -> None: - parser = argparse.ArgumentParser(description="Flower") parser.add_argument( "--node-id", diff --git a/examples/quickstart-pytorch-lightning/mnist.py b/examples/quickstart-pytorch-lightning/mnist.py index d32a0afe2d1e..95342f4fb9b3 100644 --- a/examples/quickstart-pytorch-lightning/mnist.py +++ b/examples/quickstart-pytorch-lightning/mnist.py @@ -86,16 +86,20 @@ def load_data(partition): # 60 % for the federated train and 20 % for the federated validation (both in fit) partition_train_valid = partition_full["train"].train_test_split(train_size=0.75) trainloader = DataLoader( - partition_train_valid["train"], batch_size=32, - shuffle=True, collate_fn=collate_fn, num_workers=1 + partition_train_valid["train"], + batch_size=32, + shuffle=True, + collate_fn=collate_fn, + num_workers=1, ) valloader = DataLoader( - partition_train_valid["test"], batch_size=32, - collate_fn=collate_fn, num_workers=1 + partition_train_valid["test"], + batch_size=32, + collate_fn=collate_fn, + num_workers=1, ) testloader = DataLoader( - partition_full["test"], batch_size=32, - collate_fn=collate_fn, num_workers=1 + partition_full["test"], batch_size=32, collate_fn=collate_fn, num_workers=1 ) return trainloader, valloader, testloader diff --git a/examples/quickstart-sklearn-tabular/client.py b/examples/quickstart-sklearn-tabular/client.py index 88f654d4398e..5dc0e88b3c75 100644 --- a/examples/quickstart-sklearn-tabular/client.py +++ b/examples/quickstart-sklearn-tabular/client.py @@ -68,4 +68,6 @@ def evaluate(self, parameters, config): # type: ignore return loss, len(X_test), {"test_accuracy": accuracy} # Start Flower client - fl.client.start_client(server_address="0.0.0.0:8080", client=IrisClient().to_client()) + fl.client.start_client( + server_address="0.0.0.0:8080", client=IrisClient().to_client() + ) From 2b4297db02be51a2cf53ad3c7e85bfa5799d4881 Mon Sep 17 00:00:00 2001 From: Heng Pan <134433891+panh99@users.noreply.github.com> Date: Fri, 5 Jan 2024 19:35:34 +0800 Subject: [PATCH 16/30] Rename WorkloadState to RunState (#2770) --- .../Part-I/main.py | 4 +- examples/mt-pytorch/driver.py | 6 +- examples/secaggplus-mt/driver.py | 7 ++- src/proto/flwr/proto/driver.proto | 10 ++-- src/py/flwr/client/app.py | 8 +-- src/py/flwr/client/client.py | 12 ++-- .../client/message_handler/message_handler.py | 18 +++--- .../message_handler/message_handler_test.py | 6 +- src/py/flwr/client/middleware/utils_test.py | 10 ++-- src/py/flwr/client/node_state.py | 32 +++++----- src/py/flwr/client/node_state_tests.py | 26 ++++----- src/py/flwr/client/numpy_client.py | 16 ++--- .../{workload_state.py => run_state.py} | 6 +- src/py/flwr/client/typing.py | 6 +- src/py/flwr/driver/app.py | 2 +- src/py/flwr/driver/app_test.py | 6 +- src/py/flwr/driver/driver.py | 6 +- src/py/flwr/driver/driver_test.py | 6 +- src/py/flwr/driver/grpc_driver.py | 10 ++-- src/py/flwr/proto/driver_pb2.py | 58 +++++++++---------- src/py/flwr/proto/driver_pb2.pyi | 10 ++-- src/py/flwr/proto/driver_pb2_grpc.py | 28 ++++----- src/py/flwr/proto/driver_pb2_grpc.pyi | 16 ++--- src/py/flwr/server/driver/driver_servicer.py | 18 +++--- src/py/flwr/server/state/in_memory_state.py | 6 +- src/py/flwr/server/state/sqlite_state.py | 28 ++++----- src/py/flwr/server/state/state.py | 4 +- src/py/flwr/server/state/state_test.py | 28 ++++----- src/py/flwr/simulation/app.py | 2 +- .../simulation/ray_transport/ray_actor.py | 30 +++++----- .../ray_transport/ray_client_proxy.py | 12 ++-- .../ray_transport/ray_client_proxy_test.py | 18 +++--- src/py/flwr/simulation/ray_transport/utils.py | 2 +- src/py/flwr_experimental/ops/__init__.py | 2 +- 34 files changed, 228 insertions(+), 231 deletions(-) rename src/py/flwr/client/{workload_state.py => run_state.py} (88%) diff --git a/examples/flower-simulation-step-by-step-pytorch/Part-I/main.py b/examples/flower-simulation-step-by-step-pytorch/Part-I/main.py index f5c76ab6dc99..f8124b9353f7 100644 --- a/examples/flower-simulation-step-by-step-pytorch/Part-I/main.py +++ b/examples/flower-simulation-step-by-step-pytorch/Part-I/main.py @@ -24,7 +24,7 @@ def main(cfg: DictConfig): save_path = HydraConfig.get().runtime.output_dir ## 2. Prepare your dataset - # When simulating FL workloads we have a lot of freedom on how the FL clients behave, + # When simulating FL runs we have a lot of freedom on how the FL clients behave, # what data they have, how much data, etc. This is not possible in real FL settings. # In simulation you'd often encounter two types of dataset: # * naturally partitioned, that come pre-partitioned by user id (e.g. FEMNIST, @@ -91,7 +91,7 @@ def main(cfg: DictConfig): "num_gpus": 0.0, }, # (optional) controls the degree of parallelism of your simulation. # Lower resources per client allow for more clients to run concurrently - # (but need to be set taking into account the compute/memory footprint of your workload) + # (but need to be set taking into account the compute/memory footprint of your run) # `num_cpus` is an absolute number (integer) indicating the number of threads a client should be allocated # `num_gpus` is a ratio indicating the portion of gpu memory that a client needs. ) diff --git a/examples/mt-pytorch/driver.py b/examples/mt-pytorch/driver.py index 6e1e731301c9..ad4d5e1caabe 100644 --- a/examples/mt-pytorch/driver.py +++ b/examples/mt-pytorch/driver.py @@ -54,12 +54,12 @@ def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: # -------------------------------------------------------------------------- Driver SDK driver.connect() -create_workload_res: driver_pb2.CreateWorkloadResponse = driver.create_workload( - req=driver_pb2.CreateWorkloadRequest() +create_run_res: driver_pb2.CreateRunResponse = driver.create_run( + req=driver_pb2.CreateRunRequest() ) # -------------------------------------------------------------------------- Driver SDK -run_id = create_workload_res.run_id +run_id = create_run_res.run_id print(f"Created run id {run_id}") history = History() diff --git a/examples/secaggplus-mt/driver.py b/examples/secaggplus-mt/driver.py index cc52febf78e4..f5871f1b44e4 100644 --- a/examples/secaggplus-mt/driver.py +++ b/examples/secaggplus-mt/driver.py @@ -24,6 +24,7 @@ def merge(_task: task_pb2.Task, _merge_task: task_pb2.Task) -> task_pb2.Task: task_id="", # Do not set, will be created and set by the DriverAPI group_id="", run_id=run_id, + run_id=run_id, task=merge( task, task_pb2.Task( @@ -84,12 +85,12 @@ def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: # -------------------------------------------------------------------------- Driver SDK driver.connect() -create_workload_res: driver_pb2.CreateWorkloadResponse = driver.create_workload( - req=driver_pb2.CreateWorkloadRequest() +create_run_res: driver_pb2.CreateRunResponse = driver.create_run( + req=driver_pb2.CreateRunRequest() ) # -------------------------------------------------------------------------- Driver SDK -run_id = create_workload_res.run_id +run_id = create_run_res.run_id print(f"Created run id {run_id}") history = History() diff --git a/src/proto/flwr/proto/driver.proto b/src/proto/flwr/proto/driver.proto index 4d2e214e26eb..bc0062c4a51f 100644 --- a/src/proto/flwr/proto/driver.proto +++ b/src/proto/flwr/proto/driver.proto @@ -21,8 +21,8 @@ import "flwr/proto/node.proto"; import "flwr/proto/task.proto"; service Driver { - // Request workload_id - rpc CreateWorkload(CreateWorkloadRequest) returns (CreateWorkloadResponse) {} + // Request run_id + rpc CreateRun(CreateRunRequest) returns (CreateRunResponse) {} // Return a set of nodes rpc GetNodes(GetNodesRequest) returns (GetNodesResponse) {} @@ -34,9 +34,9 @@ service Driver { rpc PullTaskRes(PullTaskResRequest) returns (PullTaskResResponse) {} } -// CreateWorkload -message CreateWorkloadRequest {} -message CreateWorkloadResponse { sint64 run_id = 1; } +// CreateRun +message CreateRunRequest {} +message CreateRunResponse { sint64 run_id = 1; } // GetNodes messages message GetNodesRequest { sint64 run_id = 1; } diff --git a/src/py/flwr/client/app.py b/src/py/flwr/client/app.py index 40fcb3178ba0..a5b285fbb7fb 100644 --- a/src/py/flwr/client/app.py +++ b/src/py/flwr/client/app.py @@ -349,7 +349,7 @@ def _load_app() -> Flower: break # Register state - node_state.register_workloadstate(run_id=task_ins.run_id) + node_state.register_runstate(run_id=task_ins.run_id) # Load app app: Flower = load_flower_callable_fn() @@ -357,14 +357,14 @@ def _load_app() -> Flower: # Handle task message fwd_msg: Fwd = Fwd( task_ins=task_ins, - state=node_state.retrieve_workloadstate(run_id=task_ins.run_id), + state=node_state.retrieve_runstate(run_id=task_ins.run_id), ) bwd_msg: Bwd = app(fwd=fwd_msg) # Update node state - node_state.update_workloadstate( + node_state.update_runstate( run_id=bwd_msg.task_res.run_id, - workload_state=bwd_msg.state, + run_state=bwd_msg.state, ) # Send diff --git a/src/py/flwr/client/client.py b/src/py/flwr/client/client.py index 280e0a8ca989..54b53296fd2f 100644 --- a/src/py/flwr/client/client.py +++ b/src/py/flwr/client/client.py @@ -19,7 +19,7 @@ from abc import ABC -from flwr.client.workload_state import WorkloadState +from flwr.client.run_state import RunState from flwr.common import ( Code, EvaluateIns, @@ -38,7 +38,7 @@ class Client(ABC): """Abstract base class for Flower clients.""" - state: WorkloadState + state: RunState def get_properties(self, ins: GetPropertiesIns) -> GetPropertiesRes: """Return set of client's properties. @@ -141,12 +141,12 @@ def evaluate(self, ins: EvaluateIns) -> EvaluateRes: metrics={}, ) - def get_state(self) -> WorkloadState: - """Get the workload state from this client.""" + def get_state(self) -> RunState: + """Get the run state from this client.""" return self.state - def set_state(self, state: WorkloadState) -> None: - """Apply a workload state to this client.""" + def set_state(self, state: RunState) -> None: + """Apply a run state to this client.""" self.state = state def to_client(self) -> Client: diff --git a/src/py/flwr/client/message_handler/message_handler.py b/src/py/flwr/client/message_handler/message_handler.py index f2b05622abcd..3f30db2a4ea2 100644 --- a/src/py/flwr/client/message_handler/message_handler.py +++ b/src/py/flwr/client/message_handler/message_handler.py @@ -28,9 +28,9 @@ get_server_message_from_task_ins, wrap_client_message_in_task_res, ) +from flwr.client.run_state import RunState from flwr.client.secure_aggregation import SecureAggregationHandler from flwr.client.typing import ClientFn -from flwr.client.workload_state import WorkloadState from flwr.common import serde from flwr.proto.task_pb2 import SecureAggregation, Task, TaskIns, TaskRes from flwr.proto.transport_pb2 import ClientMessage, Reason, ServerMessage @@ -79,16 +79,16 @@ def handle_control_message(task_ins: TaskIns) -> Tuple[Optional[TaskRes], int]: def handle( - client_fn: ClientFn, state: WorkloadState, task_ins: TaskIns -) -> Tuple[TaskRes, WorkloadState]: + client_fn: ClientFn, state: RunState, task_ins: TaskIns +) -> Tuple[TaskRes, RunState]: """Handle incoming TaskIns from the server. Parameters ---------- client_fn : ClientFn A callable that instantiates a Client. - state : WorkloadState - A dataclass storing the state for the workload being executed by the client. + state : RunState + A dataclass storing the state for the run being executed by the client. task_ins: TaskIns The task instruction coming from the server, to be processed by the client. @@ -126,16 +126,16 @@ def handle( def handle_legacy_message( - client_fn: ClientFn, state: WorkloadState, server_msg: ServerMessage -) -> Tuple[ClientMessage, WorkloadState]: + client_fn: ClientFn, state: RunState, server_msg: ServerMessage +) -> Tuple[ClientMessage, RunState]: """Handle incoming messages from the server. Parameters ---------- client_fn : ClientFn A callable that instantiates a Client. - state : WorkloadState - A dataclass storing the state for the workload being executed by the client. + state : RunState + A dataclass storing the state for the run being executed by the client. server_msg: ServerMessage The message coming from the server, to be processed by the client. diff --git a/src/py/flwr/client/message_handler/message_handler_test.py b/src/py/flwr/client/message_handler/message_handler_test.py index 0ffa0c2c5de4..cd810ae220e9 100644 --- a/src/py/flwr/client/message_handler/message_handler_test.py +++ b/src/py/flwr/client/message_handler/message_handler_test.py @@ -18,8 +18,8 @@ import uuid from flwr.client import Client +from flwr.client.run_state import RunState from flwr.client.typing import ClientFn -from flwr.client.workload_state import WorkloadState from flwr.common import ( EvaluateIns, EvaluateRes, @@ -136,7 +136,7 @@ def test_client_without_get_properties() -> None: ) task_res, _ = handle( client_fn=_get_client_fn(client), - state=WorkloadState(state={}), + state=RunState(state={}), task_ins=task_ins, ) @@ -204,7 +204,7 @@ def test_client_with_get_properties() -> None: ) task_res, _ = handle( client_fn=_get_client_fn(client), - state=WorkloadState(state={}), + state=RunState(state={}), task_ins=task_ins, ) diff --git a/src/py/flwr/client/middleware/utils_test.py b/src/py/flwr/client/middleware/utils_test.py index 9a2d888a5ecd..aa4358be5a51 100644 --- a/src/py/flwr/client/middleware/utils_test.py +++ b/src/py/flwr/client/middleware/utils_test.py @@ -18,8 +18,8 @@ import unittest from typing import List +from flwr.client.run_state import RunState from flwr.client.typing import Bwd, FlowerCallable, Fwd, Layer -from flwr.client.workload_state import WorkloadState from flwr.proto.task_pb2 import TaskIns, TaskRes from .utils import make_ffn @@ -45,7 +45,7 @@ def make_mock_app(name: str, footprint: List[str]) -> FlowerCallable: def app(fwd: Fwd) -> Bwd: footprint.append(name) fwd.task_ins.task_id += f"{name}" - return Bwd(task_res=TaskRes(task_id=name), state=WorkloadState({})) + return Bwd(task_res=TaskRes(task_id=name), state=RunState({})) return app @@ -66,7 +66,7 @@ def test_multiple_middlewares(self) -> None: # Execute wrapped_app = make_ffn(mock_app, mock_middleware_layers) - task_res = wrapped_app(Fwd(task_ins=task_ins, state=WorkloadState({}))).task_res + task_res = wrapped_app(Fwd(task_ins=task_ins, state=RunState({}))).task_res # Assert trace = mock_middleware_names + ["app"] @@ -86,11 +86,11 @@ def filter_layer(fwd: Fwd, _: FlowerCallable) -> Bwd: footprint.append("filter") fwd.task_ins.task_id += "filter" # Skip calling app - return Bwd(task_res=TaskRes(task_id="filter"), state=WorkloadState({})) + return Bwd(task_res=TaskRes(task_id="filter"), state=RunState({})) # Execute wrapped_app = make_ffn(mock_app, [filter_layer]) - task_res = wrapped_app(Fwd(task_ins=task_ins, state=WorkloadState({}))).task_res + task_res = wrapped_app(Fwd(task_ins=task_ins, state=RunState({}))).task_res # Assert self.assertEqual(footprint, ["filter"]) diff --git a/src/py/flwr/client/node_state.py b/src/py/flwr/client/node_state.py index 58a1f7111250..0a29be511806 100644 --- a/src/py/flwr/client/node_state.py +++ b/src/py/flwr/client/node_state.py @@ -17,32 +17,32 @@ from typing import Any, Dict -from flwr.client.workload_state import WorkloadState +from flwr.client.run_state import RunState class NodeState: - """State of a node where client nodes execute workloads.""" + """State of a node where client nodes execute runs.""" def __init__(self) -> None: self._meta: Dict[str, Any] = {} # holds metadata about the node - self.workload_states: Dict[int, WorkloadState] = {} + self.run_states: Dict[int, RunState] = {} - def register_workloadstate(self, run_id: int) -> None: - """Register new workload state for this node.""" - if run_id not in self.workload_states: - self.workload_states[run_id] = WorkloadState({}) + def register_runstate(self, run_id: int) -> None: + """Register new run state for this node.""" + if run_id not in self.run_states: + self.run_states[run_id] = RunState({}) - def retrieve_workloadstate(self, run_id: int) -> WorkloadState: - """Get workload state given a run_id.""" - if run_id in self.workload_states: - return self.workload_states[run_id] + def retrieve_runstate(self, run_id: int) -> RunState: + """Get run state given a run_id.""" + if run_id in self.run_states: + return self.run_states[run_id] raise RuntimeError( - f"WorkloadState for run_id={run_id} doesn't exist." - " A workload must be registered before it can be retrieved or updated " + f"RunState for run_id={run_id} doesn't exist." + " A run must be registered before it can be retrieved or updated " " by a client." ) - def update_workloadstate(self, run_id: int, workload_state: WorkloadState) -> None: - """Update workload state.""" - self.workload_states[run_id] = workload_state + def update_runstate(self, run_id: int, run_state: RunState) -> None: + """Update run state.""" + self.run_states[run_id] = run_state diff --git a/src/py/flwr/client/node_state_tests.py b/src/py/flwr/client/node_state_tests.py index 29f3c80a391c..7a6bfcd31f08 100644 --- a/src/py/flwr/client/node_state_tests.py +++ b/src/py/flwr/client/node_state_tests.py @@ -16,11 +16,11 @@ from flwr.client.node_state import NodeState -from flwr.client.workload_state import WorkloadState +from flwr.client.run_state import RunState from flwr.proto.task_pb2 import TaskIns -def _run_dummy_task(state: WorkloadState) -> WorkloadState: +def _run_dummy_task(state: RunState) -> RunState: if "counter" in state.state: state.state["counter"] += "1" else: @@ -29,31 +29,31 @@ def _run_dummy_task(state: WorkloadState) -> WorkloadState: return state -def test_multiworkload_in_node_state() -> None: +def test_multirun_in_node_state() -> None: """Test basic NodeState logic.""" # Tasks to perform - tasks = [TaskIns(run_id=r_id) for r_id in [0, 1, 1, 2, 3, 2, 1, 5]] - # the "tasks" is to count how many times each workload is executed + tasks = [TaskIns(run_id=run_id) for run_id in [0, 1, 1, 2, 3, 2, 1, 5]] + # the "tasks" is to count how many times each run is executed expected_values = {0: "1", 1: "1" * 3, 2: "1" * 2, 3: "1", 5: "1"} # NodeState node_state = NodeState() for task in tasks: - r_id = task.run_id + run_id = task.run_id # Register - node_state.register_workloadstate(run_id=r_id) + node_state.register_runstate(run_id=run_id) - # Get workload state - state = node_state.retrieve_workloadstate(run_id=r_id) + # Get run state + state = node_state.retrieve_runstate(run_id=run_id) # Run "task" updated_state = _run_dummy_task(state) - # Update workload state - node_state.update_workloadstate(run_id=r_id, workload_state=updated_state) + # Update run state + node_state.update_runstate(run_id=run_id, run_state=updated_state) # Verify values - for r_id, state in node_state.workload_states.items(): - assert state.state["counter"] == expected_values[r_id] + for run_id, state in node_state.run_states.items(): + assert state.state["counter"] == expected_values[run_id] diff --git a/src/py/flwr/client/numpy_client.py b/src/py/flwr/client/numpy_client.py index 8b0893ea30aa..2312741f5af6 100644 --- a/src/py/flwr/client/numpy_client.py +++ b/src/py/flwr/client/numpy_client.py @@ -19,7 +19,7 @@ from typing import Callable, Dict, Tuple from flwr.client.client import Client -from flwr.client.workload_state import WorkloadState +from flwr.client.run_state import RunState from flwr.common import ( Config, NDArrays, @@ -70,7 +70,7 @@ class NumPyClient(ABC): """Abstract base class for Flower clients using NumPy.""" - state: WorkloadState + state: RunState def get_properties(self, config: Config) -> Dict[str, Scalar]: """Return a client's set of properties. @@ -174,12 +174,12 @@ def evaluate( _ = (self, parameters, config) return 0.0, 0, {} - def get_state(self) -> WorkloadState: - """Get the workload state from this client.""" + def get_state(self) -> RunState: + """Get the run state from this client.""" return self.state - def set_state(self, state: WorkloadState) -> None: - """Apply a workload state to this client.""" + def set_state(self, state: RunState) -> None: + """Apply a run state to this client.""" self.state = state def to_client(self) -> Client: @@ -278,12 +278,12 @@ def _evaluate(self: Client, ins: EvaluateIns) -> EvaluateRes: ) -def _get_state(self: Client) -> WorkloadState: +def _get_state(self: Client) -> RunState: """Return state of underlying NumPyClient.""" return self.numpy_client.get_state() # type: ignore -def _set_state(self: Client, state: WorkloadState) -> None: +def _set_state(self: Client, state: RunState) -> None: """Apply state to underlying NumPyClient.""" self.numpy_client.set_state(state) # type: ignore diff --git a/src/py/flwr/client/workload_state.py b/src/py/flwr/client/run_state.py similarity index 88% rename from src/py/flwr/client/workload_state.py rename to src/py/flwr/client/run_state.py index 42ae2a925f47..c2755eb995eb 100644 --- a/src/py/flwr/client/workload_state.py +++ b/src/py/flwr/client/run_state.py @@ -12,14 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Workload state.""" +"""Run state.""" from dataclasses import dataclass from typing import Dict @dataclass -class WorkloadState: - """State of a workload executed by a client node.""" +class RunState: + """State of a run executed by a client node.""" state: Dict[str, str] diff --git a/src/py/flwr/client/typing.py b/src/py/flwr/client/typing.py index 2dd368bf6d08..1652ee57674a 100644 --- a/src/py/flwr/client/typing.py +++ b/src/py/flwr/client/typing.py @@ -17,7 +17,7 @@ from dataclasses import dataclass from typing import Callable -from flwr.client.workload_state import WorkloadState +from flwr.client.run_state import RunState from flwr.proto.task_pb2 import TaskIns, TaskRes from .client import Client as Client @@ -28,7 +28,7 @@ class Fwd: """.""" task_ins: TaskIns - state: WorkloadState + state: RunState @dataclass @@ -36,7 +36,7 @@ class Bwd: """.""" task_res: TaskRes - state: WorkloadState + state: RunState FlowerCallable = Callable[[Fwd], Bwd] diff --git a/src/py/flwr/driver/app.py b/src/py/flwr/driver/app.py index 2d4f2cf81390..987b4a31981b 100644 --- a/src/py/flwr/driver/app.py +++ b/src/py/flwr/driver/app.py @@ -171,7 +171,7 @@ def update_client_manager( `client_manager.unregister()`. """ # Request for run_id - run_id = driver.create_workload(driver_pb2.CreateWorkloadRequest()).run_id + run_id = driver.create_run(driver_pb2.CreateRunRequest()).run_id # Loop until the driver is disconnected registered_nodes: Dict[int, DriverClientProxy] = {} diff --git a/src/py/flwr/driver/app_test.py b/src/py/flwr/driver/app_test.py index 4b376cb94ef5..2c3a6d2ccddf 100644 --- a/src/py/flwr/driver/app_test.py +++ b/src/py/flwr/driver/app_test.py @@ -22,7 +22,7 @@ from unittest.mock import MagicMock from flwr.driver.app import update_client_manager -from flwr.proto.driver_pb2 import CreateWorkloadResponse, GetNodesResponse +from flwr.proto.driver_pb2 import CreateRunResponse, GetNodesResponse from flwr.proto.node_pb2 import Node from flwr.server.client_manager import SimpleClientManager @@ -43,7 +43,7 @@ def test_simple_client_manager_update(self) -> None: ] driver = MagicMock() driver.stub = "driver stub" - driver.create_workload.return_value = CreateWorkloadResponse(run_id=1) + driver.create_run.return_value = CreateRunResponse(run_id=1) driver.get_nodes.return_value = GetNodesResponse(nodes=expected_nodes) client_manager = SimpleClientManager() lock = threading.Lock() @@ -76,7 +76,7 @@ def test_simple_client_manager_update(self) -> None: driver.stub = None # Assert - driver.create_workload.assert_called_once() + driver.create_run.assert_called_once() assert node_ids == {node.node_id for node in expected_nodes} assert updated_node_ids == {node.node_id for node in expected_updated_nodes} diff --git a/src/py/flwr/driver/driver.py b/src/py/flwr/driver/driver.py index 7d1af40f4ee9..9f96cc46ce1e 100644 --- a/src/py/flwr/driver/driver.py +++ b/src/py/flwr/driver/driver.py @@ -19,7 +19,7 @@ from flwr.driver.grpc_driver import DEFAULT_SERVER_ADDRESS_DRIVER, GrpcDriver from flwr.proto.driver_pb2 import ( - CreateWorkloadRequest, + CreateRunRequest, GetNodesRequest, PullTaskResRequest, PushTaskInsRequest, @@ -60,12 +60,12 @@ def __init__( def _get_grpc_driver_and_run_id(self) -> Tuple[GrpcDriver, int]: # Check if the GrpcDriver is initialized if self.grpc_driver is None or self.run_id is None: - # Connect and create workload + # Connect and create run self.grpc_driver = GrpcDriver( driver_service_address=self.addr, certificates=self.certificates ) self.grpc_driver.connect() - res = self.grpc_driver.create_workload(CreateWorkloadRequest()) + res = self.grpc_driver.create_run(CreateRunRequest()) self.run_id = res.run_id return self.grpc_driver, self.run_id diff --git a/src/py/flwr/driver/driver_test.py b/src/py/flwr/driver/driver_test.py index 8e3b7994986f..92b4230a3932 100644 --- a/src/py/flwr/driver/driver_test.py +++ b/src/py/flwr/driver/driver_test.py @@ -35,7 +35,7 @@ def setUp(self) -> None: mock_response = Mock() mock_response.run_id = 61016 self.mock_grpc_driver = Mock() - self.mock_grpc_driver.create_workload.return_value = mock_response + self.mock_grpc_driver.create_run.return_value = mock_response self.patcher = patch( "flwr.driver.driver.GrpcDriver", return_value=self.mock_grpc_driver ) @@ -47,7 +47,7 @@ def tearDown(self) -> None: self.patcher.stop() def test_check_and_init_grpc_driver_already_initialized(self) -> None: - """Test that GrpcDriver doesn't initialize if workload is created.""" + """Test that GrpcDriver doesn't initialize if run is created.""" # Prepare self.driver.grpc_driver = self.mock_grpc_driver self.driver.run_id = 61016 @@ -60,7 +60,7 @@ def test_check_and_init_grpc_driver_already_initialized(self) -> None: self.mock_grpc_driver.connect.assert_not_called() def test_check_and_init_grpc_driver_needs_initialization(self) -> None: - """Test GrpcDriver initialization when workload is not created.""" + """Test GrpcDriver initialization when run is not created.""" # Execute # pylint: disable-next=protected-access self.driver._get_grpc_driver_and_run_id() diff --git a/src/py/flwr/driver/grpc_driver.py b/src/py/flwr/driver/grpc_driver.py index 7dd0a0f501c5..b6d42fe799d5 100644 --- a/src/py/flwr/driver/grpc_driver.py +++ b/src/py/flwr/driver/grpc_driver.py @@ -24,8 +24,8 @@ from flwr.common.grpc import create_channel from flwr.common.logger import log from flwr.proto.driver_pb2 import ( - CreateWorkloadRequest, - CreateWorkloadResponse, + CreateRunRequest, + CreateRunResponse, GetNodesRequest, GetNodesResponse, PullTaskResRequest, @@ -84,15 +84,15 @@ def disconnect(self) -> None: channel.close() log(INFO, "[Driver] Disconnected") - def create_workload(self, req: CreateWorkloadRequest) -> CreateWorkloadResponse: - """Request for workload ID.""" + def create_run(self, req: CreateRunRequest) -> CreateRunResponse: + """Request for run ID.""" # Check if channel is open if self.stub is None: log(ERROR, ERROR_MESSAGE_DRIVER_NOT_CONNECTED) raise Exception("`GrpcDriver` instance not connected") # Call Driver API - res: CreateWorkloadResponse = self.stub.CreateWorkload(request=req) + res: CreateRunResponse = self.stub.CreateRun(request=req) return res def get_nodes(self, req: GetNodesRequest) -> GetNodesResponse: diff --git a/src/py/flwr/proto/driver_pb2.py b/src/py/flwr/proto/driver_pb2.py index b57152f83fb1..615bf4672afa 100644 --- a/src/py/flwr/proto/driver_pb2.py +++ b/src/py/flwr/proto/driver_pb2.py @@ -16,31 +16,31 @@ from flwr.proto import task_pb2 as flwr_dot_proto_dot_task__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17\x66lwr/proto/driver.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x15\x66lwr/proto/task.proto\"\x17\n\x15\x43reateWorkloadRequest\"(\n\x16\x43reateWorkloadResponse\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\"!\n\x0fGetNodesRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\"3\n\x10GetNodesResponse\x12\x1f\n\x05nodes\x18\x01 \x03(\x0b\x32\x10.flwr.proto.Node\"@\n\x12PushTaskInsRequest\x12*\n\rtask_ins_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskIns\"\'\n\x13PushTaskInsResponse\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"F\n\x12PullTaskResRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"A\n\x13PullTaskResResponse\x12*\n\rtask_res_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskRes2\xd0\x02\n\x06\x44river\x12Y\n\x0e\x43reateWorkload\x12!.flwr.proto.CreateWorkloadRequest\x1a\".flwr.proto.CreateWorkloadResponse\"\x00\x12G\n\x08GetNodes\x12\x1b.flwr.proto.GetNodesRequest\x1a\x1c.flwr.proto.GetNodesResponse\"\x00\x12P\n\x0bPushTaskIns\x12\x1e.flwr.proto.PushTaskInsRequest\x1a\x1f.flwr.proto.PushTaskInsResponse\"\x00\x12P\n\x0bPullTaskRes\x12\x1e.flwr.proto.PullTaskResRequest\x1a\x1f.flwr.proto.PullTaskResResponse\"\x00\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17\x66lwr/proto/driver.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x15\x66lwr/proto/task.proto\"\x12\n\x10\x43reateRunRequest\"#\n\x11\x43reateRunResponse\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\"!\n\x0fGetNodesRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\"3\n\x10GetNodesResponse\x12\x1f\n\x05nodes\x18\x01 \x03(\x0b\x32\x10.flwr.proto.Node\"@\n\x12PushTaskInsRequest\x12*\n\rtask_ins_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskIns\"\'\n\x13PushTaskInsResponse\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"F\n\x12PullTaskResRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"A\n\x13PullTaskResResponse\x12*\n\rtask_res_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskRes2\xc1\x02\n\x06\x44river\x12J\n\tCreateRun\x12\x1c.flwr.proto.CreateRunRequest\x1a\x1d.flwr.proto.CreateRunResponse\"\x00\x12G\n\x08GetNodes\x12\x1b.flwr.proto.GetNodesRequest\x1a\x1c.flwr.proto.GetNodesResponse\"\x00\x12P\n\x0bPushTaskIns\x12\x1e.flwr.proto.PushTaskInsRequest\x1a\x1f.flwr.proto.PushTaskInsResponse\"\x00\x12P\n\x0bPullTaskRes\x12\x1e.flwr.proto.PullTaskResRequest\x1a\x1f.flwr.proto.PullTaskResResponse\"\x00\x62\x06proto3') -_CREATEWORKLOADREQUEST = DESCRIPTOR.message_types_by_name['CreateWorkloadRequest'] -_CREATEWORKLOADRESPONSE = DESCRIPTOR.message_types_by_name['CreateWorkloadResponse'] +_CREATERUNREQUEST = DESCRIPTOR.message_types_by_name['CreateRunRequest'] +_CREATERUNRESPONSE = DESCRIPTOR.message_types_by_name['CreateRunResponse'] _GETNODESREQUEST = DESCRIPTOR.message_types_by_name['GetNodesRequest'] _GETNODESRESPONSE = DESCRIPTOR.message_types_by_name['GetNodesResponse'] _PUSHTASKINSREQUEST = DESCRIPTOR.message_types_by_name['PushTaskInsRequest'] _PUSHTASKINSRESPONSE = DESCRIPTOR.message_types_by_name['PushTaskInsResponse'] _PULLTASKRESREQUEST = DESCRIPTOR.message_types_by_name['PullTaskResRequest'] _PULLTASKRESRESPONSE = DESCRIPTOR.message_types_by_name['PullTaskResResponse'] -CreateWorkloadRequest = _reflection.GeneratedProtocolMessageType('CreateWorkloadRequest', (_message.Message,), { - 'DESCRIPTOR' : _CREATEWORKLOADREQUEST, +CreateRunRequest = _reflection.GeneratedProtocolMessageType('CreateRunRequest', (_message.Message,), { + 'DESCRIPTOR' : _CREATERUNREQUEST, '__module__' : 'flwr.proto.driver_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.CreateWorkloadRequest) + # @@protoc_insertion_point(class_scope:flwr.proto.CreateRunRequest) }) -_sym_db.RegisterMessage(CreateWorkloadRequest) +_sym_db.RegisterMessage(CreateRunRequest) -CreateWorkloadResponse = _reflection.GeneratedProtocolMessageType('CreateWorkloadResponse', (_message.Message,), { - 'DESCRIPTOR' : _CREATEWORKLOADRESPONSE, +CreateRunResponse = _reflection.GeneratedProtocolMessageType('CreateRunResponse', (_message.Message,), { + 'DESCRIPTOR' : _CREATERUNRESPONSE, '__module__' : 'flwr.proto.driver_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.CreateWorkloadResponse) + # @@protoc_insertion_point(class_scope:flwr.proto.CreateRunResponse) }) -_sym_db.RegisterMessage(CreateWorkloadResponse) +_sym_db.RegisterMessage(CreateRunResponse) GetNodesRequest = _reflection.GeneratedProtocolMessageType('GetNodesRequest', (_message.Message,), { 'DESCRIPTOR' : _GETNODESREQUEST, @@ -88,22 +88,22 @@ if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None - _CREATEWORKLOADREQUEST._serialized_start=85 - _CREATEWORKLOADREQUEST._serialized_end=108 - _CREATEWORKLOADRESPONSE._serialized_start=110 - _CREATEWORKLOADRESPONSE._serialized_end=150 - _GETNODESREQUEST._serialized_start=152 - _GETNODESREQUEST._serialized_end=185 - _GETNODESRESPONSE._serialized_start=187 - _GETNODESRESPONSE._serialized_end=238 - _PUSHTASKINSREQUEST._serialized_start=240 - _PUSHTASKINSREQUEST._serialized_end=304 - _PUSHTASKINSRESPONSE._serialized_start=306 - _PUSHTASKINSRESPONSE._serialized_end=345 - _PULLTASKRESREQUEST._serialized_start=347 - _PULLTASKRESREQUEST._serialized_end=417 - _PULLTASKRESRESPONSE._serialized_start=419 - _PULLTASKRESRESPONSE._serialized_end=484 - _DRIVER._serialized_start=487 - _DRIVER._serialized_end=823 + _CREATERUNREQUEST._serialized_start=85 + _CREATERUNREQUEST._serialized_end=103 + _CREATERUNRESPONSE._serialized_start=105 + _CREATERUNRESPONSE._serialized_end=140 + _GETNODESREQUEST._serialized_start=142 + _GETNODESREQUEST._serialized_end=175 + _GETNODESRESPONSE._serialized_start=177 + _GETNODESRESPONSE._serialized_end=228 + _PUSHTASKINSREQUEST._serialized_start=230 + _PUSHTASKINSREQUEST._serialized_end=294 + _PUSHTASKINSRESPONSE._serialized_start=296 + _PUSHTASKINSRESPONSE._serialized_end=335 + _PULLTASKRESREQUEST._serialized_start=337 + _PULLTASKRESREQUEST._serialized_end=407 + _PULLTASKRESRESPONSE._serialized_start=409 + _PULLTASKRESRESPONSE._serialized_end=474 + _DRIVER._serialized_start=477 + _DRIVER._serialized_end=798 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/driver_pb2.pyi b/src/py/flwr/proto/driver_pb2.pyi index cebbd41590f8..8dc254a55e8c 100644 --- a/src/py/flwr/proto/driver_pb2.pyi +++ b/src/py/flwr/proto/driver_pb2.pyi @@ -13,14 +13,14 @@ import typing_extensions DESCRIPTOR: google.protobuf.descriptor.FileDescriptor -class CreateWorkloadRequest(google.protobuf.message.Message): - """CreateWorkload""" +class CreateRunRequest(google.protobuf.message.Message): + """CreateRun""" DESCRIPTOR: google.protobuf.descriptor.Descriptor def __init__(self, ) -> None: ... -global___CreateWorkloadRequest = CreateWorkloadRequest +global___CreateRunRequest = CreateRunRequest -class CreateWorkloadResponse(google.protobuf.message.Message): +class CreateRunResponse(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor RUN_ID_FIELD_NUMBER: builtins.int run_id: builtins.int @@ -29,7 +29,7 @@ class CreateWorkloadResponse(google.protobuf.message.Message): run_id: builtins.int = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["run_id",b"run_id"]) -> None: ... -global___CreateWorkloadResponse = CreateWorkloadResponse +global___CreateRunResponse = CreateRunResponse class GetNodesRequest(google.protobuf.message.Message): """GetNodes messages""" diff --git a/src/py/flwr/proto/driver_pb2_grpc.py b/src/py/flwr/proto/driver_pb2_grpc.py index ea33b843d945..ac6815023ebd 100644 --- a/src/py/flwr/proto/driver_pb2_grpc.py +++ b/src/py/flwr/proto/driver_pb2_grpc.py @@ -14,10 +14,10 @@ def __init__(self, channel): Args: channel: A grpc.Channel. """ - self.CreateWorkload = channel.unary_unary( - '/flwr.proto.Driver/CreateWorkload', - request_serializer=flwr_dot_proto_dot_driver__pb2.CreateWorkloadRequest.SerializeToString, - response_deserializer=flwr_dot_proto_dot_driver__pb2.CreateWorkloadResponse.FromString, + self.CreateRun = channel.unary_unary( + '/flwr.proto.Driver/CreateRun', + request_serializer=flwr_dot_proto_dot_driver__pb2.CreateRunRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_driver__pb2.CreateRunResponse.FromString, ) self.GetNodes = channel.unary_unary( '/flwr.proto.Driver/GetNodes', @@ -39,8 +39,8 @@ def __init__(self, channel): class DriverServicer(object): """Missing associated documentation comment in .proto file.""" - def CreateWorkload(self, request, context): - """Request workload_id + def CreateRun(self, request, context): + """Request run_id """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') @@ -70,10 +70,10 @@ def PullTaskRes(self, request, context): def add_DriverServicer_to_server(servicer, server): rpc_method_handlers = { - 'CreateWorkload': grpc.unary_unary_rpc_method_handler( - servicer.CreateWorkload, - request_deserializer=flwr_dot_proto_dot_driver__pb2.CreateWorkloadRequest.FromString, - response_serializer=flwr_dot_proto_dot_driver__pb2.CreateWorkloadResponse.SerializeToString, + 'CreateRun': grpc.unary_unary_rpc_method_handler( + servicer.CreateRun, + request_deserializer=flwr_dot_proto_dot_driver__pb2.CreateRunRequest.FromString, + response_serializer=flwr_dot_proto_dot_driver__pb2.CreateRunResponse.SerializeToString, ), 'GetNodes': grpc.unary_unary_rpc_method_handler( servicer.GetNodes, @@ -101,7 +101,7 @@ class Driver(object): """Missing associated documentation comment in .proto file.""" @staticmethod - def CreateWorkload(request, + def CreateRun(request, target, options=(), channel_credentials=None, @@ -111,9 +111,9 @@ def CreateWorkload(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/flwr.proto.Driver/CreateWorkload', - flwr_dot_proto_dot_driver__pb2.CreateWorkloadRequest.SerializeToString, - flwr_dot_proto_dot_driver__pb2.CreateWorkloadResponse.FromString, + return grpc.experimental.unary_unary(request, target, '/flwr.proto.Driver/CreateRun', + flwr_dot_proto_dot_driver__pb2.CreateRunRequest.SerializeToString, + flwr_dot_proto_dot_driver__pb2.CreateRunResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/src/py/flwr/proto/driver_pb2_grpc.pyi b/src/py/flwr/proto/driver_pb2_grpc.pyi index 1b10d71e943d..43cf45f39b25 100644 --- a/src/py/flwr/proto/driver_pb2_grpc.pyi +++ b/src/py/flwr/proto/driver_pb2_grpc.pyi @@ -8,10 +8,10 @@ import grpc class DriverStub: def __init__(self, channel: grpc.Channel) -> None: ... - CreateWorkload: grpc.UnaryUnaryMultiCallable[ - flwr.proto.driver_pb2.CreateWorkloadRequest, - flwr.proto.driver_pb2.CreateWorkloadResponse] - """Request workload_id""" + CreateRun: grpc.UnaryUnaryMultiCallable[ + flwr.proto.driver_pb2.CreateRunRequest, + flwr.proto.driver_pb2.CreateRunResponse] + """Request run_id""" GetNodes: grpc.UnaryUnaryMultiCallable[ flwr.proto.driver_pb2.GetNodesRequest, @@ -31,11 +31,11 @@ class DriverStub: class DriverServicer(metaclass=abc.ABCMeta): @abc.abstractmethod - def CreateWorkload(self, - request: flwr.proto.driver_pb2.CreateWorkloadRequest, + def CreateRun(self, + request: flwr.proto.driver_pb2.CreateRunRequest, context: grpc.ServicerContext, - ) -> flwr.proto.driver_pb2.CreateWorkloadResponse: - """Request workload_id""" + ) -> flwr.proto.driver_pb2.CreateRunResponse: + """Request run_id""" pass @abc.abstractmethod diff --git a/src/py/flwr/server/driver/driver_servicer.py b/src/py/flwr/server/driver/driver_servicer.py index d2784834380e..546ebd884ca9 100644 --- a/src/py/flwr/server/driver/driver_servicer.py +++ b/src/py/flwr/server/driver/driver_servicer.py @@ -24,8 +24,8 @@ from flwr.common.logger import log from flwr.proto import driver_pb2_grpc from flwr.proto.driver_pb2 import ( - CreateWorkloadRequest, - CreateWorkloadResponse, + CreateRunRequest, + CreateRunResponse, GetNodesRequest, GetNodesResponse, PullTaskResRequest, @@ -57,14 +57,14 @@ def GetNodes( ] return GetNodesResponse(nodes=nodes) - def CreateWorkload( - self, request: CreateWorkloadRequest, context: grpc.ServicerContext - ) -> CreateWorkloadResponse: - """Create workload ID.""" - log(INFO, "DriverServicer.CreateWorkload") + def CreateRun( + self, request: CreateRunRequest, context: grpc.ServicerContext + ) -> CreateRunResponse: + """Create run ID.""" + log(INFO, "DriverServicer.CreateRun") state: State = self.state_factory.state() - run_id = state.create_workload() - return CreateWorkloadResponse(run_id=run_id) + run_id = state.create_run() + return CreateRunResponse(run_id=run_id) def PushTaskIns( self, request: PushTaskInsRequest, context: grpc.ServicerContext diff --git a/src/py/flwr/server/state/in_memory_state.py b/src/py/flwr/server/state/in_memory_state.py index 1ae7f65b7046..f8352fcfb091 100644 --- a/src/py/flwr/server/state/in_memory_state.py +++ b/src/py/flwr/server/state/in_memory_state.py @@ -211,13 +211,13 @@ def get_nodes(self, run_id: int) -> Set[int]: return set() return self.node_ids - def create_workload(self) -> int: - """Create one workload.""" + def create_run(self) -> int: + """Create one run.""" # Sample a random int64 as run_id run_id: int = int.from_bytes(os.urandom(8), "little", signed=True) if run_id not in self.run_ids: self.run_ids.add(run_id) return run_id - log(ERROR, "Unexpected workload creation failure.") + log(ERROR, "Unexpected run creation failure.") return 0 diff --git a/src/py/flwr/server/state/sqlite_state.py b/src/py/flwr/server/state/sqlite_state.py index 7dc456ad650a..4f66be3ff262 100644 --- a/src/py/flwr/server/state/sqlite_state.py +++ b/src/py/flwr/server/state/sqlite_state.py @@ -37,8 +37,8 @@ ); """ -SQL_CREATE_TABLE_WORKLOAD = """ -CREATE TABLE IF NOT EXISTS workload( +SQL_CREATE_TABLE_RUN = """ +CREATE TABLE IF NOT EXISTS run( run_id INTEGER UNIQUE ); """ @@ -58,7 +58,7 @@ ancestry TEXT, legacy_server_message BLOB, legacy_client_message BLOB, - FOREIGN KEY(run_id) REFERENCES workload(run_id) + FOREIGN KEY(run_id) REFERENCES run(run_id) ); """ @@ -78,7 +78,7 @@ ancestry TEXT, legacy_server_message BLOB, legacy_client_message BLOB, - FOREIGN KEY(run_id) REFERENCES workload(run_id) + FOREIGN KEY(run_id) REFERENCES run(run_id) ); """ @@ -119,7 +119,7 @@ def initialize(self, log_queries: bool = False) -> List[Tuple[str]]: cur = self.conn.cursor() # Create each table if not exists queries - cur.execute(SQL_CREATE_TABLE_WORKLOAD) + cur.execute(SQL_CREATE_TABLE_RUN) cur.execute(SQL_CREATE_TABLE_TASK_INS) cur.execute(SQL_CREATE_TABLE_TASK_RES) cur.execute(SQL_CREATE_TABLE_NODE) @@ -203,7 +203,7 @@ def store_task_ins(self, task_ins: TaskIns) -> Optional[UUID]: try: self.query(query, data) except sqlite3.IntegrityError: - log(ERROR, "`workload` is invalid") + log(ERROR, "`run` is invalid") return None return task_id @@ -338,7 +338,7 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: try: self.query(query, data) except sqlite3.IntegrityError: - log(ERROR, "`workload` is invalid") + log(ERROR, "`run` is invalid") return None return task_id @@ -493,8 +493,8 @@ def get_nodes(self, run_id: int) -> Set[int]: If the provided `run_id` does not exist or has no matching nodes, an empty `Set` MUST be returned. """ - # Validate workload ID - query = "SELECT COUNT(*) FROM workload WHERE run_id = ?;" + # Validate run ID + query = "SELECT COUNT(*) FROM run WHERE run_id = ?;" if self.query(query, (run_id,))[0]["COUNT(*)"] == 0: return set() @@ -504,19 +504,19 @@ def get_nodes(self, run_id: int) -> Set[int]: result: Set[int] = {row["node_id"] for row in rows} return result - def create_workload(self) -> int: - """Create one workload and store it in state.""" + def create_run(self) -> int: + """Create one run and store it in state.""" # Sample a random int64 as run_id run_id: int = int.from_bytes(os.urandom(8), "little", signed=True) # Check conflicts - query = "SELECT COUNT(*) FROM workload WHERE run_id = ?;" + query = "SELECT COUNT(*) FROM run WHERE run_id = ?;" # If run_id does not exist if self.query(query, (run_id,))[0]["COUNT(*)"] == 0: - query = "INSERT INTO workload VALUES(:run_id);" + query = "INSERT INTO run VALUES(:run_id);" self.query(query, {"run_id": run_id}) return run_id - log(ERROR, "Unexpected workload creation failure.") + log(ERROR, "Unexpected run creation failure.") return 0 diff --git a/src/py/flwr/server/state/state.py b/src/py/flwr/server/state/state.py index 2996bbc82e55..7ab3b6bc0848 100644 --- a/src/py/flwr/server/state/state.py +++ b/src/py/flwr/server/state/state.py @@ -150,5 +150,5 @@ def get_nodes(self, run_id: int) -> Set[int]: """ @abc.abstractmethod - def create_workload(self) -> int: - """Create one workload.""" + def create_run(self) -> int: + """Create one run.""" diff --git a/src/py/flwr/server/state/state_test.py b/src/py/flwr/server/state/state_test.py index 0421c0edb224..88b4b53aed4c 100644 --- a/src/py/flwr/server/state/state_test.py +++ b/src/py/flwr/server/state/state_test.py @@ -66,7 +66,7 @@ def test_store_task_ins_one(self) -> None: # Prepare consumer_node_id = 1 state = self.state_factory() - run_id = state.create_workload() + run_id = state.create_run() task_ins = create_task_ins( consumer_node_id=consumer_node_id, anonymous=False, run_id=run_id ) @@ -108,7 +108,7 @@ def test_store_and_delete_tasks(self) -> None: # Prepare consumer_node_id = 1 state = self.state_factory() - run_id = state.create_workload() + run_id = state.create_run() task_ins_0 = create_task_ins( consumer_node_id=consumer_node_id, anonymous=False, run_id=run_id ) @@ -182,7 +182,7 @@ def test_task_ins_store_anonymous_and_retrieve_anonymous(self) -> None: """ # Prepare state: State = self.state_factory() - run_id = state.create_workload() + run_id = state.create_run() task_ins = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) # Execute @@ -197,7 +197,7 @@ def test_task_ins_store_anonymous_and_fail_retrieving_identitiy(self) -> None: """Store anonymous TaskIns and fail to retrieve it.""" # Prepare state: State = self.state_factory() - run_id = state.create_workload() + run_id = state.create_run() task_ins = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) # Execute @@ -211,7 +211,7 @@ def test_task_ins_store_identity_and_fail_retrieving_anonymous(self) -> None: """Store identity TaskIns and fail retrieving it as anonymous.""" # Prepare state: State = self.state_factory() - run_id = state.create_workload() + run_id = state.create_run() task_ins = create_task_ins(consumer_node_id=1, anonymous=False, run_id=run_id) # Execute @@ -225,7 +225,7 @@ def test_task_ins_store_identity_and_retrieve_identity(self) -> None: """Store identity TaskIns and retrieve it.""" # Prepare state: State = self.state_factory() - run_id = state.create_workload() + run_id = state.create_run() task_ins = create_task_ins(consumer_node_id=1, anonymous=False, run_id=run_id) # Execute @@ -242,7 +242,7 @@ def test_task_ins_store_delivered_and_fail_retrieving(self) -> None: """Fail retrieving delivered task.""" # Prepare state: State = self.state_factory() - run_id = state.create_workload() + run_id = state.create_run() task_ins = create_task_ins(consumer_node_id=1, anonymous=False, run_id=run_id) # Execute @@ -285,7 +285,7 @@ def test_task_res_store_and_retrieve_by_task_ins_id(self) -> None: """Store TaskRes retrieve it by task_ins_id.""" # Prepare state: State = self.state_factory() - run_id = state.create_workload() + run_id = state.create_run() task_ins_id = uuid4() task_res = create_task_res( producer_node_id=0, @@ -306,7 +306,7 @@ def test_node_ids_initial_state(self) -> None: """Test retrieving all node_ids and empty initial state.""" # Prepare state: State = self.state_factory() - run_id = state.create_workload() + run_id = state.create_run() # Execute retrieved_node_ids = state.get_nodes(run_id) @@ -318,7 +318,7 @@ def test_create_node_and_get_nodes(self) -> None: """Test creating a client node.""" # Prepare state: State = self.state_factory() - run_id = state.create_workload() + run_id = state.create_run() node_ids = [] # Execute @@ -334,7 +334,7 @@ def test_delete_node(self) -> None: """Test deleting a client node.""" # Prepare state: State = self.state_factory() - run_id = state.create_workload() + run_id = state.create_run() node_id = state.create_node() # Execute @@ -348,7 +348,7 @@ def test_get_nodes_invalid_run_id(self) -> None: """Test retrieving all node_ids with invalid run_id.""" # Prepare state: State = self.state_factory() - state.create_workload() + state.create_run() invalid_run_id = 61016 state.create_node() @@ -362,7 +362,7 @@ def test_num_task_ins(self) -> None: """Test if num_tasks returns correct number of not delivered task_ins.""" # Prepare state: State = self.state_factory() - run_id = state.create_workload() + run_id = state.create_run() task_0 = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) task_1 = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) @@ -380,7 +380,7 @@ def test_num_task_res(self) -> None: """Test if num_tasks returns correct number of not delivered task_res.""" # Prepare state: State = self.state_factory() - run_id = state.create_workload() + run_id = state.create_run() task_0 = create_task_res( producer_node_id=0, anonymous=True, ancestry=["1"], run_id=run_id ) diff --git a/src/py/flwr/simulation/app.py b/src/py/flwr/simulation/app.py index c519f5a551f0..26bb04f17f4a 100644 --- a/src/py/flwr/simulation/app.py +++ b/src/py/flwr/simulation/app.py @@ -322,7 +322,7 @@ def update_resources(f_stop: threading.Event) -> None: "\n\t\t - You clients hit an out-of-memory (OOM) error and actors couldn't " "recover from it. Try launching your simulation with more generous " "`client_resources` setting (i.e. it seems %s is " - "not enough for your workload). Use fewer concurrent actors. " + "not enough for your run). Use fewer concurrent actors. " "\n\t\t - You were running a multi-node simulation and all worker nodes " "disconnected. The head node might still be alive but cannot accommodate " "any actor with resources: %s.", diff --git a/src/py/flwr/simulation/ray_transport/ray_actor.py b/src/py/flwr/simulation/ray_transport/ray_actor.py index 640817910396..38af3f08daa2 100644 --- a/src/py/flwr/simulation/ray_transport/ray_actor.py +++ b/src/py/flwr/simulation/ray_transport/ray_actor.py @@ -27,7 +27,7 @@ from flwr import common from flwr.client import Client, ClientFn -from flwr.client.workload_state import WorkloadState +from flwr.client.run_state import RunState from flwr.common.logger import log from flwr.simulation.ray_transport.utils import check_clientfn_returns_client @@ -61,9 +61,9 @@ def run( client_fn: ClientFn, job_fn: JobFn, cid: str, - state: WorkloadState, - ) -> Tuple[str, ClientRes, WorkloadState]: - """Run a client workload.""" + state: RunState, + ) -> Tuple[str, ClientRes, RunState]: + """Run a client run.""" # Execute tasks and return result # return also cid which is needed to ensure results # from the pool are correctly assigned to each ClientProxy @@ -79,12 +79,12 @@ def run( except Exception as ex: client_trace = traceback.format_exc() message = ( - "\n\tSomething went wrong when running your client workload." + "\n\tSomething went wrong when running your client run." "\n\tClient " + cid + " crashed when the " + self.__class__.__name__ - + " was running its workload." + + " was running its run." "\n\tException triggered on the client side: " + client_trace, ) raise ClientException(str(message)) from ex @@ -94,7 +94,7 @@ def run( @ray.remote class DefaultActor(VirtualClientEngineActor): - """A Ray Actor class that runs client workloads. + """A Ray Actor class that runs client runs. Parameters ---------- @@ -237,10 +237,8 @@ def add_actors_to_pool(self, num_actors: int) -> None: self._idle_actors.extend(new_actors) self.num_actors += num_actors - def submit( - self, fn: Any, value: Tuple[ClientFn, JobFn, str, WorkloadState] - ) -> None: - """Take idle actor and assign it a client workload. + def submit(self, fn: Any, value: Tuple[ClientFn, JobFn, str, RunState]) -> None: + """Take idle actor and assign it a client run. Submit a job to an actor by first removing it from the list of idle actors, then check if this actor was flagged to be removed from the pool @@ -257,7 +255,7 @@ def submit( self._cid_to_future[cid]["future"] = future_key def submit_client_job( - self, actor_fn: Any, job: Tuple[ClientFn, JobFn, str, WorkloadState] + self, actor_fn: Any, job: Tuple[ClientFn, JobFn, str, RunState] ) -> None: """Submit a job while tracking client ids.""" _, _, cid, _ = job @@ -297,7 +295,7 @@ def _is_future_ready(self, cid: str) -> bool: return self._cid_to_future[cid]["ready"] # type: ignore - def _fetch_future_result(self, cid: str) -> Tuple[ClientRes, WorkloadState]: + def _fetch_future_result(self, cid: str) -> Tuple[ClientRes, RunState]: """Fetch result and updated state for a VirtualClient from Object Store. The job submitted by the ClientProxy interfacing with client with cid=cid is @@ -307,7 +305,7 @@ def _fetch_future_result(self, cid: str) -> Tuple[ClientRes, WorkloadState]: future: ObjectRef[Any] = self._cid_to_future[cid]["future"] # type: ignore res_cid, res, updated_state = ray.get( future - ) # type: (str, ClientRes, WorkloadState) + ) # type: (str, ClientRes, RunState) except ray.exceptions.RayActorError as ex: log(ERROR, ex) if hasattr(ex, "actor_id"): @@ -411,7 +409,7 @@ def process_unordered_future(self, timeout: Optional[float] = None) -> None: def get_client_result( self, cid: str, timeout: Optional[float] - ) -> Tuple[ClientRes, WorkloadState]: + ) -> Tuple[ClientRes, RunState]: """Get result from VirtualClient with specific cid.""" # Loop until all jobs submitted to the pool are completed. Break early # if the result for the ClientProxy calling this method is ready @@ -423,5 +421,5 @@ def get_client_result( break # Fetch result belonging to the VirtualClient calling this method - # Return both result from tasks and (potentially) updated workload state + # Return both result from tasks and (potentially) updated run state return self._fetch_future_result(cid) diff --git a/src/py/flwr/simulation/ray_transport/ray_client_proxy.py b/src/py/flwr/simulation/ray_transport/ray_client_proxy.py index 3f3f65ee6997..5c05850dfd2f 100644 --- a/src/py/flwr/simulation/ray_transport/ray_client_proxy.py +++ b/src/py/flwr/simulation/ray_transport/ray_client_proxy.py @@ -132,16 +132,16 @@ def __init__( self.proxy_state = NodeState() def _submit_job(self, job_fn: JobFn, timeout: Optional[float]) -> ClientRes: - # The VCE is not exposed to TaskIns, it won't handle multilple workloads + # The VCE is not exposed to TaskIns, it won't handle multilple runs # For the time being, fixing run_id is a small compromise # This will be one of the first points to address integrating VCE + DriverAPI run_id = 0 # Register state - self.proxy_state.register_workloadstate(run_id=run_id) + self.proxy_state.register_runstate(run_id=run_id) # Retrieve state - state = self.proxy_state.retrieve_workloadstate(run_id=run_id) + state = self.proxy_state.retrieve_runstate(run_id=run_id) try: self.actor_pool.submit_client_job( @@ -151,14 +151,12 @@ def _submit_job(self, job_fn: JobFn, timeout: Optional[float]) -> ClientRes: res, updated_state = self.actor_pool.get_client_result(self.cid, timeout) # Update state - self.proxy_state.update_workloadstate( - run_id=run_id, workload_state=updated_state - ) + self.proxy_state.update_runstate(run_id=run_id, run_state=updated_state) except Exception as ex: if self.actor_pool.num_actors == 0: # At this point we want to stop the simulation. - # since no more client workloads will be executed + # since no more client runs will be executed log(ERROR, "ActorPool is empty!!!") log(ERROR, traceback.format_exc()) log(ERROR, ex) diff --git a/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py b/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py index 13f5a6fec2da..9df71635b949 100644 --- a/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py +++ b/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py @@ -22,7 +22,7 @@ import ray from flwr.client import Client, NumPyClient -from flwr.client.workload_state import WorkloadState +from flwr.client.run_state import RunState from flwr.common import Code, GetPropertiesRes, Status from flwr.simulation.ray_transport.ray_actor import ( ClientRes, @@ -46,7 +46,7 @@ def get_dummy_client(cid: str) -> Client: return DummyClient(cid).to_client() -# A dummy workload +# A dummy run def job_fn(cid: str) -> JobFn: # pragma: no cover """Construct a simple job with cid dependency.""" @@ -112,11 +112,11 @@ def test_cid_consistency_one_at_a_time() -> None: ray.shutdown() -def test_cid_consistency_all_submit_first_workload_consistency() -> None: +def test_cid_consistency_all_submit_first_run_consistency() -> None: """Test that ClientProxies get the result of client job they submit. All jobs are submitted at the same time. Then fetched one at a time. This also tests - NodeState (at each Proxy) and WorkloadState basic functionality. + NodeState (at each Proxy) and RunState basic functionality. """ proxies, _ = prep() run_id = 0 @@ -125,9 +125,9 @@ def test_cid_consistency_all_submit_first_workload_consistency() -> None: shuffle(proxies) for prox in proxies: # Register state - prox.proxy_state.register_workloadstate(run_id=run_id) + prox.proxy_state.register_runstate(run_id=run_id) # Retrieve state - state = prox.proxy_state.retrieve_workloadstate(run_id=run_id) + state = prox.proxy_state.retrieve_runstate(run_id=run_id) job = job_fn(prox.cid) prox.actor_pool.submit_client_job( @@ -139,12 +139,12 @@ def test_cid_consistency_all_submit_first_workload_consistency() -> None: shuffle(proxies) for prox in proxies: res, updated_state = prox.actor_pool.get_client_result(prox.cid, timeout=None) - prox.proxy_state.update_workloadstate(run_id, workload_state=updated_state) + prox.proxy_state.update_runstate(run_id, run_state=updated_state) res = cast(GetPropertiesRes, res) assert int(prox.cid) * pi == res.properties["result"] assert ( str(int(prox.cid) * pi) - == prox.proxy_state.retrieve_workloadstate(run_id).state["result"] + == prox.proxy_state.retrieve_runstate(run_id).state["result"] ) ray.shutdown() @@ -162,7 +162,7 @@ def test_cid_consistency_without_proxies() -> None: job = job_fn(cid) pool.submit_client_job( lambda a, c_fn, j_fn, cid_, state: a.run.remote(c_fn, j_fn, cid_, state), - (get_dummy_client, job, cid, WorkloadState(state={})), + (get_dummy_client, job, cid, RunState(state={})), ) # fetch results one at a time diff --git a/src/py/flwr/simulation/ray_transport/utils.py b/src/py/flwr/simulation/ray_transport/utils.py index c8e6aa6cbe21..41aa8049eaf0 100644 --- a/src/py/flwr/simulation/ray_transport/utils.py +++ b/src/py/flwr/simulation/ray_transport/utils.py @@ -37,7 +37,7 @@ def enable_tf_gpu_growth() -> None: # the same GPU. # Luckily we can disable this behavior by enabling memory growth # on the GPU. In this way, VRAM allocated to the processes grows based - # on the needs for the workload. (this is for instance the default + # on the needs for the run. (this is for instance the default # behavior in PyTorch) # While this behavior is critical for Actors, you'll likely need it # as well in your main process (where the server runs and might evaluate diff --git a/src/py/flwr_experimental/ops/__init__.py b/src/py/flwr_experimental/ops/__init__.py index b56c757e0207..bad31028e68c 100644 --- a/src/py/flwr_experimental/ops/__init__.py +++ b/src/py/flwr_experimental/ops/__init__.py @@ -13,4 +13,4 @@ # limitations under the License. # ============================================================================== """Flower ops provides an opinionated way to provision necessary compute -infrastructure for running Flower workloads.""" +infrastructure for running Flower runs.""" From 625ae8317f0b923731a3018ee1389de86cd3c4c1 Mon Sep 17 00:00:00 2001 From: Javier Date: Fri, 5 Jan 2024 11:49:55 +0000 Subject: [PATCH 17/30] Add in-place FedAvg (#2293) --- src/py/flwr/server/strategy/aggregate.py | 28 ++++++++++- src/py/flwr/server/strategy/fedavg.py | 22 +++++--- src/py/flwr/server/strategy/fedavg_test.py | 58 ++++++++++++++++++++++ 3 files changed, 100 insertions(+), 8 deletions(-) diff --git a/src/py/flwr/server/strategy/aggregate.py b/src/py/flwr/server/strategy/aggregate.py index 63926f2eaa51..4eb76111b266 100644 --- a/src/py/flwr/server/strategy/aggregate.py +++ b/src/py/flwr/server/strategy/aggregate.py @@ -20,7 +20,8 @@ import numpy as np -from flwr.common import NDArray, NDArrays +from flwr.common import FitRes, NDArray, NDArrays, parameters_to_ndarrays +from flwr.server.client_proxy import ClientProxy def aggregate(results: List[Tuple[NDArrays, int]]) -> NDArrays: @@ -41,6 +42,31 @@ def aggregate(results: List[Tuple[NDArrays, int]]) -> NDArrays: return weights_prime +def aggregate_inplace(results: List[Tuple[ClientProxy, FitRes]]) -> NDArrays: + """Compute in-place weighted average.""" + # Count total examples + num_examples_total = sum([fit_res.num_examples for _, fit_res in results]) + + # Compute scaling factors for each result + scaling_factors = [ + fit_res.num_examples / num_examples_total for _, fit_res in results + ] + + # Let's do in-place aggregation + # Get first result, then add up each other + params = [ + scaling_factors[0] * x for x in parameters_to_ndarrays(results[0][1].parameters) + ] + for i, (_, fit_res) in enumerate(results[1:]): + res = ( + scaling_factors[i + 1] * x + for x in parameters_to_ndarrays(fit_res.parameters) + ) + params = [reduce(np.add, layer_updates) for layer_updates in zip(params, res)] + + return params + + def aggregate_median(results: List[Tuple[NDArrays, int]]) -> NDArrays: """Compute median.""" # Create a list of weights and ignore the number of examples diff --git a/src/py/flwr/server/strategy/fedavg.py b/src/py/flwr/server/strategy/fedavg.py index c93c8cb8b83e..e4b126823fb6 100644 --- a/src/py/flwr/server/strategy/fedavg.py +++ b/src/py/flwr/server/strategy/fedavg.py @@ -37,7 +37,7 @@ from flwr.server.client_manager import ClientManager from flwr.server.client_proxy import ClientProxy -from .aggregate import aggregate, weighted_loss_avg +from .aggregate import aggregate, aggregate_inplace, weighted_loss_avg from .strategy import Strategy WARNING_MIN_AVAILABLE_CLIENTS_TOO_LOW = """ @@ -107,6 +107,7 @@ def __init__( initial_parameters: Optional[Parameters] = None, fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, evaluate_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, + inplace: bool = True, ) -> None: super().__init__() @@ -128,6 +129,7 @@ def __init__( self.initial_parameters = initial_parameters self.fit_metrics_aggregation_fn = fit_metrics_aggregation_fn self.evaluate_metrics_aggregation_fn = evaluate_metrics_aggregation_fn + self.inplace = inplace def __repr__(self) -> str: """Compute a string representation of the strategy.""" @@ -226,12 +228,18 @@ def aggregate_fit( if not self.accept_failures and failures: return None, {} - # Convert results - weights_results = [ - (parameters_to_ndarrays(fit_res.parameters), fit_res.num_examples) - for _, fit_res in results - ] - parameters_aggregated = ndarrays_to_parameters(aggregate(weights_results)) + if self.inplace: + # Does in-place weighted average of results + aggregated_ndarrays = aggregate_inplace(results) + else: + # Convert results + weights_results = [ + (parameters_to_ndarrays(fit_res.parameters), fit_res.num_examples) + for _, fit_res in results + ] + aggregated_ndarrays = aggregate(weights_results) + + parameters_aggregated = ndarrays_to_parameters(aggregated_ndarrays) # Aggregate custom metrics if aggregation fn was provided metrics_aggregated = {} diff --git a/src/py/flwr/server/strategy/fedavg_test.py b/src/py/flwr/server/strategy/fedavg_test.py index 947736f4a571..e62eaa5c5832 100644 --- a/src/py/flwr/server/strategy/fedavg_test.py +++ b/src/py/flwr/server/strategy/fedavg_test.py @@ -15,6 +15,16 @@ """FedAvg tests.""" +from typing import List, Tuple, Union +from unittest.mock import MagicMock + +import numpy as np +from numpy.testing import assert_allclose + +from flwr.common import Code, FitRes, Status, parameters_to_ndarrays +from flwr.common.parameter import ndarrays_to_parameters +from flwr.server.client_proxy import ClientProxy + from .fedavg import FedAvg @@ -120,3 +130,51 @@ def test_fedavg_num_evaluation_clients_minimum() -> None: # Assert assert expected == actual + + +def test_inplace_aggregate_fit_equivalence() -> None: + """Test aggregate_fit equivalence between FedAvg and its inplace version.""" + # Prepare + weights0_0 = np.random.randn(100, 64) + weights0_1 = np.random.randn(314, 628, 3) + weights1_0 = np.random.randn(100, 64) + weights1_1 = np.random.randn(314, 628, 3) + + results: List[Tuple[ClientProxy, FitRes]] = [ + ( + MagicMock(), + FitRes( + status=Status(code=Code.OK, message="Success"), + parameters=ndarrays_to_parameters([weights0_0, weights0_1]), + num_examples=1, + metrics={}, + ), + ), + ( + MagicMock(), + FitRes( + status=Status(code=Code.OK, message="Success"), + parameters=ndarrays_to_parameters([weights1_0, weights1_1]), + num_examples=5, + metrics={}, + ), + ), + ] + failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]] = [] + + fedavg_reference = FedAvg(inplace=False) + fedavg_inplace = FedAvg() + + # Execute + reference, _ = fedavg_reference.aggregate_fit(1, results, failures) + assert reference + inplace, _ = fedavg_inplace.aggregate_fit(1, results, failures) + assert inplace + + # Convert to NumPy to check similarity + reference_np = parameters_to_ndarrays(reference) + inplace_np = parameters_to_ndarrays(inplace) + + # Assert + for ref, inp in zip(reference_np, inplace_np): + assert_allclose(ref, inp) From b43cf12d9e1438ca4bff465346a36b66c76f250b Mon Sep 17 00:00:00 2001 From: Heng Pan <134433891+panh99@users.noreply.github.com> Date: Mon, 8 Jan 2024 19:07:10 +0800 Subject: [PATCH 18/30] Update the error message when simulation crashes (#2759) --- src/py/flwr/simulation/app.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/src/py/flwr/simulation/app.py b/src/py/flwr/simulation/app.py index 26bb04f17f4a..6a18a258ac60 100644 --- a/src/py/flwr/simulation/app.py +++ b/src/py/flwr/simulation/app.py @@ -314,8 +314,18 @@ def update_resources(f_stop: threading.Event) -> None: log(ERROR, traceback.format_exc()) log( ERROR, - "Your simulation crashed :(. This could be because of several reasons." + "Your simulation crashed :(. This could be because of several reasons. " "The most common are: " + "\n\t > Sometimes, issues in the simulation code itself can cause crashes. " + "It's always a good idea to double-check your code for any potential bugs " + "or inconsistencies that might be contributing to the problem. " + "For example: " + "\n\t\t - You might be using a class attribute in your clients that " + "hasn't been defined." + "\n\t\t - There could be an incorrect method call to a 3rd party library " + "(e.g., PyTorch)." + "\n\t\t - The return types of methods in your clients/strategies might be " + "incorrect." "\n\t > Your system couldn't fit a single VirtualClient: try lowering " "`client_resources`." "\n\t > All the actors in your pool crashed. This could be because: " @@ -325,7 +335,9 @@ def update_resources(f_stop: threading.Event) -> None: "not enough for your run). Use fewer concurrent actors. " "\n\t\t - You were running a multi-node simulation and all worker nodes " "disconnected. The head node might still be alive but cannot accommodate " - "any actor with resources: %s.", + "any actor with resources: %s." + "\nTake a look at the Flower simulation examples for guidance " + ".", client_resources, client_resources, ) From e6fb1a98e43dd18ffb946c2064956a0b38342d4d Mon Sep 17 00:00:00 2001 From: Javier Date: Mon, 15 Jan 2024 19:23:32 +0000 Subject: [PATCH 19/30] Introduce `RecordSet` (#2787) Co-authored-by: Daniel J. Beutel --- src/py/flwr/common/recordset.py | 78 +++++++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 src/py/flwr/common/recordset.py diff --git a/src/py/flwr/common/recordset.py b/src/py/flwr/common/recordset.py new file mode 100644 index 000000000000..0088b7397a6d --- /dev/null +++ b/src/py/flwr/common/recordset.py @@ -0,0 +1,78 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""RecordSet.""" + +from dataclasses import dataclass +from typing import Dict + + +@dataclass +class ParametersRecord: + """Parameters record.""" + + +@dataclass +class MetricsRecord: + """Metrics record.""" + + +@dataclass +class ConfigsRecord: + """Configs record.""" + + +@dataclass +class RecordSet: + """Definition of RecordSet.""" + + parameters: Dict[str, ParametersRecord] = {} + metrics: Dict[str, MetricsRecord] = {} + configs: Dict[str, ConfigsRecord] = {} + + def set_parameters(self, name: str, record: ParametersRecord) -> None: + """Add a ParametersRecord.""" + self.parameters[name] = record + + def get_parameters(self, name: str) -> ParametersRecord: + """Get a ParametesRecord.""" + return self.parameters[name] + + def del_parameters(self, name: str) -> None: + """Delete a ParametersRecord.""" + del self.parameters[name] + + def set_metrics(self, name: str, record: MetricsRecord) -> None: + """Add a MetricsRecord.""" + self.metrics[name] = record + + def get_metrics(self, name: str) -> MetricsRecord: + """Get a MetricsRecord.""" + return self.metrics[name] + + def del_metrics(self, name: str) -> None: + """Delete a MetricsRecord.""" + del self.metrics[name] + + def set_configs(self, name: str, record: ConfigsRecord) -> None: + """Add a ConfigsRecord.""" + self.configs[name] = record + + def get_configs(self, name: str) -> ConfigsRecord: + """Get a ConfigsRecord.""" + return self.configs[name] + + def del_configs(self, name: str) -> None: + """Delete a ConfigsRecord.""" + del self.configs[name] From 9dd52988ce5d7204a96d74fefad812382c5f18d2 Mon Sep 17 00:00:00 2001 From: Adam Narozniak <51029327+adam-narozniak@users.noreply.github.com> Date: Tue, 16 Jan 2024 14:03:14 +0100 Subject: [PATCH 20/30] Narrow down Python version in FDS TF e2e test (#2797) Co-authored-by: Javier --- datasets/e2e/tensorflow/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/datasets/e2e/tensorflow/pyproject.toml b/datasets/e2e/tensorflow/pyproject.toml index 9c5c72c46400..4d7b5f60e856 100644 --- a/datasets/e2e/tensorflow/pyproject.toml +++ b/datasets/e2e/tensorflow/pyproject.toml @@ -9,7 +9,7 @@ description = "Flower Datasets with TensorFlow" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = "^3.8" +python = ">=3.8,<3.11" flwr-datasets = { path = "./../../", extras = ["vision"] } tensorflow-cpu = "^2.9.1, !=2.11.1" parameterized = "==0.9.0" From 7f48ea2ae7b8ff8653eb7ce013727028a01e0f9a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 17 Jan 2024 08:38:19 +0000 Subject: [PATCH 21/30] Update types-setuptools requirement from ==68.2.0.0 to ==69.0.0.20240115 (#2790) Updates the requirements on [types-setuptools](https://github.com/python/typeshed) to permit the latest version. - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-setuptools dependency-type: direct:development ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Taner Topal --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 8a300afa8c84..0616ffdbeffd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -82,7 +82,7 @@ rest = ["requests", "starlette", "uvicorn"] types-dataclasses = "==0.6.6" types-protobuf = "==3.19.18" types-requests = "==2.31.0.10" -types-setuptools = "==68.2.0.0" +types-setuptools = "==69.0.0.20240115" clang-format = "==17.0.4" isort = "==5.12.0" black = { version = "==23.10.1", extras = ["jupyter"] } From 0c13d3b5b62351951485048211e27cf301ae2523 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 17 Jan 2024 10:58:31 +0000 Subject: [PATCH 22/30] Bump actions/download-artifact from 4.1.0 to 4.1.1 (#2788) Bumps [actions/download-artifact](https://github.com/actions/download-artifact) from 4.1.0 to 4.1.1. - [Release notes](https://github.com/actions/download-artifact/releases) - [Commits](https://github.com/actions/download-artifact/compare/f44cd7b40bfd40b6aa1cc1b9b5b7bf03d3c67110...6b208ae046db98c579e8a3aa621ab581ff575935) --- updated-dependencies: - dependency-name: actions/download-artifact dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Taner Topal --- .github/workflows/_docker-build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/_docker-build.yml b/.github/workflows/_docker-build.yml index 36b94b5c7e97..07c9d0cba0ad 100644 --- a/.github/workflows/_docker-build.yml +++ b/.github/workflows/_docker-build.yml @@ -114,7 +114,7 @@ jobs: metadata: ${{ steps.meta.outputs.json }} steps: - name: Download digests - uses: actions/download-artifact@f44cd7b40bfd40b6aa1cc1b9b5b7bf03d3c67110 # v4.1.0 + uses: actions/download-artifact@6b208ae046db98c579e8a3aa621ab581ff575935 # v4.1.1 with: pattern: digests-${{ needs.build.outputs.build-id }}-* path: /tmp/digests From f1f0299791da5d5bafc07210cd458ad0bbf2f2f9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 17 Jan 2024 12:34:16 +0000 Subject: [PATCH 23/30] Bump actions/upload-artifact from 4.0.0 to 4.1.0 (#2789) Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.0.0 to 4.1.0. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/c7d193f32edcb7bfad88892161225aeda64e9392...1eb3cb2b3e0f29609092a73eb033bb759a334595) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/_docker-build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/_docker-build.yml b/.github/workflows/_docker-build.yml index 07c9d0cba0ad..4a1289d9175a 100644 --- a/.github/workflows/_docker-build.yml +++ b/.github/workflows/_docker-build.yml @@ -98,7 +98,7 @@ jobs: touch "/tmp/digests/${digest#sha256:}" - name: Upload digest - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 + uses: actions/upload-artifact@1eb3cb2b3e0f29609092a73eb033bb759a334595 # v4.1.0 with: name: digests-${{ steps.build-id.outputs.id }}-${{ matrix.platform.name }} path: /tmp/digests/* From 097631c079165fb3d72d89fb1bad66a294746033 Mon Sep 17 00:00:00 2001 From: Daniel Nata Nugraha Date: Wed, 17 Jan 2024 14:31:27 +0100 Subject: [PATCH 24/30] Fix outdated Android README (#2804) --- examples/android/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/android/README.md b/examples/android/README.md index 7931aa96b0c5..f9f2bb93b8dc 100644 --- a/examples/android/README.md +++ b/examples/android/README.md @@ -54,4 +54,4 @@ poetry run ./run.sh Download and install the `flwr_android_client.apk` on each Android device/emulator. The server currently expects a minimum of 4 Android clients, but it can be changed in the `server.py`. -When the Android app runs, add the client ID (between 1-10), the IP and port of your server, and press `Load Dataset`. This will load the local CIFAR10 dataset in memory. Then press `Setup Connection Channel` which will establish connection with the server. Finally, press `Train Federated!` which will start the federated training. +When the Android app runs, add the client ID (between 1-10), the IP and port of your server, and press `Start`. This will load the local CIFAR10 dataset in memory, establish connection with the server, and start the federated training. To abort the federated learning process, press `Stop`. You can clear and refresh the log messages by pressing `Clear` and `Refresh` buttons respectively. From 0daa3d79d102d62e626e9d951249da12811d87dd Mon Sep 17 00:00:00 2001 From: Charles Beauville Date: Wed, 17 Jan 2024 16:29:13 +0100 Subject: [PATCH 25/30] Add conda install instructions (#2800) --- doc/source/how-to-install-flower.rst | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/doc/source/how-to-install-flower.rst b/doc/source/how-to-install-flower.rst index 1107f6798b23..ff3dbb605846 100644 --- a/doc/source/how-to-install-flower.rst +++ b/doc/source/how-to-install-flower.rst @@ -11,6 +11,9 @@ Flower requires at least `Python 3.8 `_, but `Pyth Install stable release ---------------------- +Using pip +~~~~~~~~~ + Stable releases are available on `PyPI `_:: python -m pip install flwr @@ -20,6 +23,25 @@ For simulations that use the Virtual Client Engine, ``flwr`` should be installed python -m pip install flwr[simulation] +Using conda (or mamba) +~~~~~~~~~~~~~~~~~~~~~~ + +Flower can also be installed from the ``conda-forge`` channel. + +If you have not added ``conda-forge`` to your channels, you will first need to run the following:: + + conda config --add channels conda-forge + conda config --set channel_priority strict + +Once the ``conda-forge`` channel has been enabled, ``flwr`` can be installed with ``conda``:: + + conda install flwr + +or with ``mamba``:: + + mamba install flwr + + Verify installation ------------------- From 9e6df75bec9092b89f06939d533d2d8e66a5e0cc Mon Sep 17 00:00:00 2001 From: Edoardo Gabrielli Date: Wed, 17 Jan 2024 18:36:37 +0100 Subject: [PATCH 26/30] Update FedMedian docstring (#2761) --- src/py/flwr/server/strategy/fedmedian.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/py/flwr/server/strategy/fedmedian.py b/src/py/flwr/server/strategy/fedmedian.py index 7a5bf1425b44..17e979d92beb 100644 --- a/src/py/flwr/server/strategy/fedmedian.py +++ b/src/py/flwr/server/strategy/fedmedian.py @@ -36,7 +36,7 @@ class FedMedian(FedAvg): - """Configurable FedAvg with Momentum strategy implementation.""" + """Configurable FedMedian strategy implementation.""" def __repr__(self) -> str: """Compute a string representation of the strategy.""" From 815f66277cf42b97b6bcb481a9baf32632119752 Mon Sep 17 00:00:00 2001 From: Charles Beauville Date: Wed, 17 Jan 2024 18:50:42 +0100 Subject: [PATCH 27/30] Favor docformatter for multi-line docstrings (#2807) --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 0616ffdbeffd..7a8b0d1ad45f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -184,7 +184,7 @@ target-version = "py38" line-length = 88 select = ["D", "E", "F", "W", "B", "ISC", "C4", "UP"] fixable = ["D", "E", "F", "W", "B", "ISC", "C4", "UP"] -ignore = ["B024", "B027"] +ignore = ["B024", "B027", "D205", "D209"] exclude = [ ".bzr", ".direnv", From 1fcb147c8360d90cae741ba802e3720ad145010b Mon Sep 17 00:00:00 2001 From: Javier Date: Wed, 17 Jan 2024 18:23:57 +0000 Subject: [PATCH 28/30] Add `ParametersRecord` (#2799) Co-authored-by: Daniel J. Beutel Co-authored-by: Heng Pan --- src/py/flwr/common/parametersrecord.py | 110 ++++++++++++++++++ src/py/flwr/common/recordset.py | 13 +-- src/py/flwr/common/recordset_test.py | 147 +++++++++++++++++++++++++ src/py/flwr/common/recordset_utils.py | 87 +++++++++++++++ 4 files changed, 349 insertions(+), 8 deletions(-) create mode 100644 src/py/flwr/common/parametersrecord.py create mode 100644 src/py/flwr/common/recordset_test.py create mode 100644 src/py/flwr/common/recordset_utils.py diff --git a/src/py/flwr/common/parametersrecord.py b/src/py/flwr/common/parametersrecord.py new file mode 100644 index 000000000000..3d40c0488baa --- /dev/null +++ b/src/py/flwr/common/parametersrecord.py @@ -0,0 +1,110 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""ParametersRecord and Array.""" + + +from dataclasses import dataclass, field +from typing import List, Optional, OrderedDict + + +@dataclass +class Array: + """Array type. + + A dataclass containing serialized data from an array-like or tensor-like object + along with some metadata about it. + + Parameters + ---------- + dtype : str + A string representing the data type of the serialised object (e.g. `np.float32`) + + shape : List[int] + A list representing the shape of the unserialized array-like object. This is + used to deserialize the data (depending on the serialization method) or simply + as a metadata field. + + stype : str + A string indicating the type of serialisation mechanism used to generate the + bytes in `data` from an array-like or tensor-like object. + + data: bytes + A buffer of bytes containing the data. + """ + + dtype: str + shape: List[int] + stype: str + data: bytes + + +@dataclass +class ParametersRecord: + """Parameters record. + + A dataclass storing named Arrays in order. This means that it holds entries as an + OrderedDict[str, Array]. ParametersRecord objects can be viewed as an equivalent to + PyTorch's state_dict, but holding serialised tensors instead. + """ + + keep_input: bool + data: OrderedDict[str, Array] = field(default_factory=OrderedDict[str, Array]) + + def __init__( + self, + array_dict: Optional[OrderedDict[str, Array]] = None, + keep_input: bool = False, + ) -> None: + """Construct a ParametersRecord object. + + Parameters + ---------- + array_dict : Optional[OrderedDict[str, Array]] + A dictionary that stores serialized array-like or tensor-like objects. + keep_input : bool (default: False) + A boolean indicating whether parameters should be deleted from the input + dictionary immediately after adding them to the record. If False, the + dictionary passed to `set_parameters()` will be empty once exiting from that + function. This is the desired behaviour when working with very large + models/tensors/arrays. However, if you plan to continue working with your + parameters after adding it to the record, set this flag to True. When set + to True, the data is duplicated in memory. + """ + self.keep_input = keep_input + self.data = OrderedDict() + if array_dict: + self.set_parameters(array_dict) + + def set_parameters(self, array_dict: OrderedDict[str, Array]) -> None: + """Add parameters to record. + + Parameters + ---------- + array_dict : OrderedDict[str, Array] + A dictionary that stores serialized array-like or tensor-like objects. + """ + if any(not isinstance(k, str) for k in array_dict.keys()): + raise TypeError(f"Not all keys are of valid type. Expected {str}") + if any(not isinstance(v, Array) for v in array_dict.values()): + raise TypeError(f"Not all values are of valid type. Expected {Array}") + + if self.keep_input: + # Copy + self.data = OrderedDict(array_dict) + else: + # Add entries to dataclass without duplicating memory + for key in list(array_dict.keys()): + self.data[key] = array_dict[key] + del array_dict[key] diff --git a/src/py/flwr/common/recordset.py b/src/py/flwr/common/recordset.py index 0088b7397a6d..dc723a2cea86 100644 --- a/src/py/flwr/common/recordset.py +++ b/src/py/flwr/common/recordset.py @@ -14,13 +14,10 @@ # ============================================================================== """RecordSet.""" -from dataclasses import dataclass +from dataclasses import dataclass, field from typing import Dict - -@dataclass -class ParametersRecord: - """Parameters record.""" +from .parametersrecord import ParametersRecord @dataclass @@ -37,9 +34,9 @@ class ConfigsRecord: class RecordSet: """Definition of RecordSet.""" - parameters: Dict[str, ParametersRecord] = {} - metrics: Dict[str, MetricsRecord] = {} - configs: Dict[str, ConfigsRecord] = {} + parameters: Dict[str, ParametersRecord] = field(default_factory=dict) + metrics: Dict[str, MetricsRecord] = field(default_factory=dict) + configs: Dict[str, ConfigsRecord] = field(default_factory=dict) def set_parameters(self, name: str, record: ParametersRecord) -> None: """Add a ParametersRecord.""" diff --git a/src/py/flwr/common/recordset_test.py b/src/py/flwr/common/recordset_test.py new file mode 100644 index 000000000000..90c06dcdb109 --- /dev/null +++ b/src/py/flwr/common/recordset_test.py @@ -0,0 +1,147 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""RecordSet tests.""" + + +from typing import Callable, List, OrderedDict, Type, Union + +import numpy as np +import pytest + +from .parameter import ndarrays_to_parameters, parameters_to_ndarrays +from .parametersrecord import Array, ParametersRecord +from .recordset_utils import ( + parameters_to_parametersrecord, + parametersrecord_to_parameters, +) +from .typing import NDArray, NDArrays, Parameters + + +def get_ndarrays() -> NDArrays: + """Return list of NumPy arrays.""" + arr1 = np.array([[1.0, 2.0], [3.0, 4], [5.0, 6.0]]) + arr2 = np.eye(2, 7, 3) + + return [arr1, arr2] + + +def ndarray_to_array(ndarray: NDArray) -> Array: + """Represent NumPy ndarray as Array.""" + return Array( + data=ndarray.tobytes(), + dtype=str(ndarray.dtype), + stype="numpy.ndarray.tobytes", + shape=list(ndarray.shape), + ) + + +def test_ndarray_to_array() -> None: + """Test creation of Array object from NumPy ndarray.""" + shape = (2, 7, 9) + arr = np.eye(*shape) + + array = ndarray_to_array(arr) + + arr_ = np.frombuffer(buffer=array.data, dtype=array.dtype).reshape(array.shape) + + assert np.array_equal(arr, arr_) + + +def test_parameters_to_array_and_back() -> None: + """Test conversion between legacy Parameters and Array.""" + ndarrays = get_ndarrays() + + # Array represents a single array, unlike Paramters, which represent a + # list of arrays + ndarray = ndarrays[0] + + parameters = ndarrays_to_parameters([ndarray]) + + array = Array( + data=parameters.tensors[0], dtype="", stype=parameters.tensor_type, shape=[] + ) + + parameters = Parameters(tensors=[array.data], tensor_type=array.stype) + + ndarray_ = parameters_to_ndarrays(parameters=parameters)[0] + + assert np.array_equal(ndarray, ndarray_) + + +def test_parameters_to_parametersrecord_and_back() -> None: + """Test conversion between legacy Parameters and ParametersRecords.""" + ndarrays = get_ndarrays() + + parameters = ndarrays_to_parameters(ndarrays) + + params_record = parameters_to_parametersrecord(parameters=parameters) + + parameters_ = parametersrecord_to_parameters(params_record) + + ndarrays_ = parameters_to_ndarrays(parameters=parameters_) + + for arr, arr_ in zip(ndarrays, ndarrays_): + assert np.array_equal(arr, arr_) + + +def test_set_parameters_while_keeping_intputs() -> None: + """Tests keep_input functionality in ParametersRecord.""" + # Adding parameters to a record that doesn't erase entries in the input `array_dict` + p_record = ParametersRecord(keep_input=True) + array_dict = OrderedDict( + {str(i): ndarray_to_array(ndarray) for i, ndarray in enumerate(get_ndarrays())} + ) + p_record.set_parameters(array_dict) + + # Creating a second parametersrecord passing the same `array_dict` (not erased) + p_record_2 = ParametersRecord(array_dict) + assert p_record.data == p_record_2.data + + # Now it should be empty (the second ParametersRecord wasn't flagged to keep it) + assert len(array_dict) == 0 + + +def test_set_parameters_with_correct_types() -> None: + """Test adding dictionary of Arrays to ParametersRecord.""" + p_record = ParametersRecord() + array_dict = OrderedDict( + {str(i): ndarray_to_array(ndarray) for i, ndarray in enumerate(get_ndarrays())} + ) + p_record.set_parameters(array_dict) + + +@pytest.mark.parametrize( + "key_type, value_fn", + [ + (str, lambda x: x), # correct key, incorrect value + (str, lambda x: x.tolist()), # correct key, incorrect value + (int, ndarray_to_array), # incorrect key, correct value + (int, lambda x: x), # incorrect key, incorrect value + (int, lambda x: x.tolist()), # incorrect key, incorrect value + ], +) +def test_set_parameters_with_incorrect_types( + key_type: Type[Union[int, str]], + value_fn: Callable[[NDArray], Union[NDArray, List[float]]], +) -> None: + """Test adding dictionary of unsupported types to ParametersRecord.""" + p_record = ParametersRecord() + + array_dict = { + key_type(i): value_fn(ndarray) for i, ndarray in enumerate(get_ndarrays()) + } + + with pytest.raises(TypeError): + p_record.set_parameters(array_dict) # type: ignore diff --git a/src/py/flwr/common/recordset_utils.py b/src/py/flwr/common/recordset_utils.py new file mode 100644 index 000000000000..c1e724fa2758 --- /dev/null +++ b/src/py/flwr/common/recordset_utils.py @@ -0,0 +1,87 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""RecordSet utilities.""" + + +from typing import OrderedDict + +from .parametersrecord import Array, ParametersRecord +from .typing import Parameters + + +def parametersrecord_to_parameters( + record: ParametersRecord, keep_input: bool = False +) -> Parameters: + """Convert ParameterRecord to legacy Parameters. + + Warning: Because `Arrays` in `ParametersRecord` encode more information of the + array-like or tensor-like data (e.g their datatype, shape) than `Parameters` it + might not be possible to reconstruct such data structures from `Parameters` objects + alone. Additional information or metadta must be provided from elsewhere. + + Parameters + ---------- + record : ParametersRecord + The record to be conveted into Parameters. + keep_input : bool (default: False) + A boolean indicating whether entries in the record should be deleted from the + input dictionary immediately after adding them to the record. + """ + parameters = Parameters(tensors=[], tensor_type="") + + for key in list(record.data.keys()): + parameters.tensors.append(record.data[key].data) + + if not keep_input: + del record.data[key] + + return parameters + + +def parameters_to_parametersrecord( + parameters: Parameters, keep_input: bool = False +) -> ParametersRecord: + """Convert legacy Parameters into a single ParametersRecord. + + Because there is no concept of names in the legacy Parameters, arbitrary keys will + be used when constructing the ParametersRecord. Similarly, the shape and data type + won't be recorded in the Array objects. + + Parameters + ---------- + parameters : Parameters + Parameters object to be represented as a ParametersRecord. + keep_input : bool (default: False) + A boolean indicating whether parameters should be deleted from the input + Parameters object (i.e. a list of serialized NumPy arrays) immediately after + adding them to the record. + """ + tensor_type = parameters.tensor_type + + p_record = ParametersRecord() + + num_arrays = len(parameters.tensors) + for idx in range(num_arrays): + if keep_input: + tensor = parameters.tensors[idx] + else: + tensor = parameters.tensors.pop(0) + p_record.set_parameters( + OrderedDict( + {str(idx): Array(data=tensor, dtype="", stype=tensor_type, shape=[])} + ) + ) + + return p_record From 66b3bbe81484c11be579551175991189a4888476 Mon Sep 17 00:00:00 2001 From: "Daniel J. Beutel" Date: Wed, 17 Jan 2024 19:34:06 +0100 Subject: [PATCH 29/30] Upgrade Pylint to 3.0.3 (#2488) Co-authored-by: Charles Beauville --- pyproject.toml | 4 +-- src/py/flwr/client/app.py | 9 ++++-- src/py/flwr/client/app_test.py | 16 +++++------ src/py/flwr/client/dpfedavg_numpy_client.py | 8 +++--- .../client/message_handler/task_handler.py | 3 +- src/py/flwr/client/numpy_client.py | 4 +-- src/py/flwr/client/rest_client/connection.py | 4 +++ .../secure_aggregation/secaggplus_handler.py | 12 ++++---- src/py/flwr/common/retry_invoker.py | 1 + src/py/flwr/common/serde.py | 16 +++++++---- src/py/flwr/driver/app_test.py | 1 - src/py/flwr/driver/driver_test.py | 2 ++ src/py/flwr/driver/grpc_driver.py | 8 +++--- .../server/fleet/grpc_bidi/grpc_bridge.py | 10 +++---- .../fleet/grpc_bidi/grpc_bridge_test.py | 1 + .../server/fleet/grpc_bidi/ins_scheduler.py | 2 +- src/py/flwr/server/server_test.py | 28 +++++++++---------- src/py/flwr/server/state/sqlite_state.py | 4 +-- src/py/flwr/server/state/sqlite_state_test.py | 2 +- src/py/flwr/server/state/state_test.py | 2 +- src/py/flwr/server/strategy/aggregate.py | 16 +++++------ .../flwr/server/strategy/dpfedavg_adaptive.py | 2 +- src/py/flwr/server/strategy/dpfedavg_fixed.py | 4 +-- src/py/flwr/server/strategy/fedavg_android.py | 2 -- src/py/flwr/server/strategy/qfedavg.py | 2 +- 25 files changed, 87 insertions(+), 76 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 7a8b0d1ad45f..24d20c7ced40 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -88,7 +88,7 @@ isort = "==5.12.0" black = { version = "==23.10.1", extras = ["jupyter"] } docformatter = "==1.7.5" mypy = "==1.6.1" -pylint = "==2.13.9" +pylint = "==3.0.3" flake8 = "==5.0.4" pytest = "==7.4.3" pytest-cov = "==4.1.0" @@ -137,7 +137,7 @@ line-length = 88 target-version = ["py38", "py39", "py310", "py311"] [tool.pylint."MESSAGES CONTROL"] -disable = "bad-continuation,duplicate-code,too-few-public-methods,useless-import-alias" +disable = "duplicate-code,too-few-public-methods,useless-import-alias" [tool.pytest.ini_options] minversion = "6.2" diff --git a/src/py/flwr/client/app.py b/src/py/flwr/client/app.py index a5b285fbb7fb..91fa5468ae75 100644 --- a/src/py/flwr/client/app.py +++ b/src/py/flwr/client/app.py @@ -138,10 +138,12 @@ def _check_actionable_client( client: Optional[Client], client_fn: Optional[ClientFn] ) -> None: if client_fn is None and client is None: - raise Exception("Both `client_fn` and `client` are `None`, but one is required") + raise ValueError( + "Both `client_fn` and `client` are `None`, but one is required" + ) if client_fn is not None and client is not None: - raise Exception( + raise ValueError( "Both `client_fn` and `client` are provided, but only one is allowed" ) @@ -150,6 +152,7 @@ def _check_actionable_client( # pylint: disable=too-many-branches # pylint: disable=too-many-locals # pylint: disable=too-many-statements +# pylint: disable=too-many-arguments def start_client( *, server_address: str, @@ -299,7 +302,7 @@ def single_client_factory( cid: str, # pylint: disable=unused-argument ) -> Client: if client is None: # Added this to keep mypy happy - raise Exception( + raise ValueError( "Both `client_fn` and `client` are `None`, but one is required" ) return client # Always return the same instance diff --git a/src/py/flwr/client/app_test.py b/src/py/flwr/client/app_test.py index 7ef6410debad..56d6308a0fe2 100644 --- a/src/py/flwr/client/app_test.py +++ b/src/py/flwr/client/app_test.py @@ -41,19 +41,19 @@ class PlainClient(Client): def get_properties(self, ins: GetPropertiesIns) -> GetPropertiesRes: """Raise an Exception because this method is not expected to be called.""" - raise Exception() + raise NotImplementedError() def get_parameters(self, ins: GetParametersIns) -> GetParametersRes: """Raise an Exception because this method is not expected to be called.""" - raise Exception() + raise NotImplementedError() def fit(self, ins: FitIns) -> FitRes: """Raise an Exception because this method is not expected to be called.""" - raise Exception() + raise NotImplementedError() def evaluate(self, ins: EvaluateIns) -> EvaluateRes: """Raise an Exception because this method is not expected to be called.""" - raise Exception() + raise NotImplementedError() class NeedsWrappingClient(NumPyClient): @@ -61,23 +61,23 @@ class NeedsWrappingClient(NumPyClient): def get_properties(self, config: Config) -> Dict[str, Scalar]: """Raise an Exception because this method is not expected to be called.""" - raise Exception() + raise NotImplementedError() def get_parameters(self, config: Config) -> NDArrays: """Raise an Exception because this method is not expected to be called.""" - raise Exception() + raise NotImplementedError() def fit( self, parameters: NDArrays, config: Config ) -> Tuple[NDArrays, int, Dict[str, Scalar]]: """Raise an Exception because this method is not expected to be called.""" - raise Exception() + raise NotImplementedError() def evaluate( self, parameters: NDArrays, config: Config ) -> Tuple[float, int, Dict[str, Scalar]]: """Raise an Exception because this method is not expected to be called.""" - raise Exception() + raise NotImplementedError() def test_to_client_with_client() -> None: diff --git a/src/py/flwr/client/dpfedavg_numpy_client.py b/src/py/flwr/client/dpfedavg_numpy_client.py index 41b4d676df43..c39b89b31da3 100644 --- a/src/py/flwr/client/dpfedavg_numpy_client.py +++ b/src/py/flwr/client/dpfedavg_numpy_client.py @@ -117,16 +117,16 @@ def fit( update = [np.subtract(x, y) for (x, y) in zip(updated_params, original_params)] if "dpfedavg_clip_norm" not in config: - raise Exception("Clipping threshold not supplied by the server.") + raise KeyError("Clipping threshold not supplied by the server.") if not isinstance(config["dpfedavg_clip_norm"], float): - raise Exception("Clipping threshold should be a floating point value.") + raise TypeError("Clipping threshold should be a floating point value.") # Clipping update, clipped = clip_by_l2(update, config["dpfedavg_clip_norm"]) if "dpfedavg_noise_stddev" in config: if not isinstance(config["dpfedavg_noise_stddev"], float): - raise Exception( + raise TypeError( "Scale of noise to be added should be a floating point value." ) # Noising @@ -138,7 +138,7 @@ def fit( # Calculating value of norm indicator bit, required for adaptive clipping if "dpfedavg_adaptive_clip_enabled" in config: if not isinstance(config["dpfedavg_adaptive_clip_enabled"], bool): - raise Exception( + raise TypeError( "dpfedavg_adaptive_clip_enabled should be a boolean-valued flag." ) metrics["dpfedavg_norm_bit"] = not clipped diff --git a/src/py/flwr/client/message_handler/task_handler.py b/src/py/flwr/client/message_handler/task_handler.py index 13b1948eec07..3599e1dfb254 100644 --- a/src/py/flwr/client/message_handler/task_handler.py +++ b/src/py/flwr/client/message_handler/task_handler.py @@ -80,8 +80,7 @@ def validate_task_res(task_res: TaskRes) -> bool: initialized_fields_in_task = {field.name for field, _ in task_res.task.ListFields()} # Check if certain fields are already initialized - # pylint: disable-next=too-many-boolean-expressions - if ( + if ( # pylint: disable-next=too-many-boolean-expressions "task_id" in initialized_fields_in_task_res or "group_id" in initialized_fields_in_task_res or "run_id" in initialized_fields_in_task_res diff --git a/src/py/flwr/client/numpy_client.py b/src/py/flwr/client/numpy_client.py index 2312741f5af6..d67fb90512d4 100644 --- a/src/py/flwr/client/numpy_client.py +++ b/src/py/flwr/client/numpy_client.py @@ -242,7 +242,7 @@ def _fit(self: Client, ins: FitIns) -> FitRes: and isinstance(results[1], int) and isinstance(results[2], dict) ): - raise Exception(EXCEPTION_MESSAGE_WRONG_RETURN_TYPE_FIT) + raise TypeError(EXCEPTION_MESSAGE_WRONG_RETURN_TYPE_FIT) # Return FitRes parameters_prime, num_examples, metrics = results @@ -266,7 +266,7 @@ def _evaluate(self: Client, ins: EvaluateIns) -> EvaluateRes: and isinstance(results[1], int) and isinstance(results[2], dict) ): - raise Exception(EXCEPTION_MESSAGE_WRONG_RETURN_TYPE_EVALUATE) + raise TypeError(EXCEPTION_MESSAGE_WRONG_RETURN_TYPE_EVALUATE) # Return EvaluateRes loss, num_examples, metrics = results diff --git a/src/py/flwr/client/rest_client/connection.py b/src/py/flwr/client/rest_client/connection.py index d22b246dbd61..87b06dd0be4e 100644 --- a/src/py/flwr/client/rest_client/connection.py +++ b/src/py/flwr/client/rest_client/connection.py @@ -143,6 +143,7 @@ def create_node() -> None: }, data=create_node_req_bytes, verify=verify, + timeout=None, ) # Check status code and headers @@ -185,6 +186,7 @@ def delete_node() -> None: }, data=delete_node_req_req_bytes, verify=verify, + timeout=None, ) # Check status code and headers @@ -225,6 +227,7 @@ def receive() -> Optional[TaskIns]: }, data=pull_task_ins_req_bytes, verify=verify, + timeout=None, ) # Check status code and headers @@ -303,6 +306,7 @@ def send(task_res: TaskRes) -> None: }, data=push_task_res_request_bytes, verify=verify, + timeout=None, ) state[KEY_TASK_INS] = None diff --git a/src/py/flwr/client/secure_aggregation/secaggplus_handler.py b/src/py/flwr/client/secure_aggregation/secaggplus_handler.py index efbb00a9d916..4b74c1ace3de 100644 --- a/src/py/flwr/client/secure_aggregation/secaggplus_handler.py +++ b/src/py/flwr/client/secure_aggregation/secaggplus_handler.py @@ -333,7 +333,7 @@ def _share_keys( # Check if the size is larger than threshold if len(state.public_keys_dict) < state.threshold: - raise Exception("Available neighbours number smaller than threshold") + raise ValueError("Available neighbours number smaller than threshold") # Check if all public keys are unique pk_list: List[bytes] = [] @@ -341,14 +341,14 @@ def _share_keys( pk_list.append(pk1) pk_list.append(pk2) if len(set(pk_list)) != len(pk_list): - raise Exception("Some public keys are identical") + raise ValueError("Some public keys are identical") # Check if public keys of this client are correct in the dictionary if ( state.public_keys_dict[state.sid][0] != state.pk1 or state.public_keys_dict[state.sid][1] != state.pk2 ): - raise Exception( + raise ValueError( "Own public keys are displayed in dict incorrectly, should not happen!" ) @@ -393,7 +393,7 @@ def _collect_masked_input( ciphertexts = cast(List[bytes], named_values[KEY_CIPHERTEXT_LIST]) srcs = cast(List[int], named_values[KEY_SOURCE_LIST]) if len(ciphertexts) + 1 < state.threshold: - raise Exception("Not enough available neighbour clients.") + raise ValueError("Not enough available neighbour clients.") # Decrypt ciphertexts, verify their sources, and store shares. for src, ciphertext in zip(srcs, ciphertexts): @@ -409,7 +409,7 @@ def _collect_masked_input( f"from {actual_src} instead of {src}." ) if dst != state.sid: - ValueError( + raise ValueError( f"Client {state.sid}: received an encrypted message" f"for Client {dst} from Client {src}." ) @@ -476,7 +476,7 @@ def _unmask(state: SecAggPlusState, named_values: Dict[str, Value]) -> Dict[str, # Send private mask seed share for every avaliable client (including itclient) # Send first private key share for building pairwise mask for every dropped client if len(active_sids) < state.threshold: - raise Exception("Available neighbours number smaller than threshold") + raise ValueError("Available neighbours number smaller than threshold") sids, shares = [], [] sids += active_sids diff --git a/src/py/flwr/common/retry_invoker.py b/src/py/flwr/common/retry_invoker.py index a60fff57e7bf..5441e766983a 100644 --- a/src/py/flwr/common/retry_invoker.py +++ b/src/py/flwr/common/retry_invoker.py @@ -156,6 +156,7 @@ class RetryInvoker: >>> invoker.invoke(my_func, arg1, arg2, kw1=kwarg1) """ + # pylint: disable-next=too-many-arguments def __init__( self, wait_factory: Callable[[], Generator[float, None, None]], diff --git a/src/py/flwr/common/serde.py b/src/py/flwr/common/serde.py index c8c73e87e04a..59f5387b0a07 100644 --- a/src/py/flwr/common/serde.py +++ b/src/py/flwr/common/serde.py @@ -59,7 +59,9 @@ def server_message_to_proto(server_message: typing.ServerMessage) -> ServerMessa server_message.evaluate_ins, ) ) - raise Exception("No instruction set in ServerMessage, cannot serialize to ProtoBuf") + raise ValueError( + "No instruction set in ServerMessage, cannot serialize to ProtoBuf" + ) def server_message_from_proto( @@ -91,7 +93,7 @@ def server_message_from_proto( server_message_proto.evaluate_ins, ) ) - raise Exception( + raise ValueError( "Unsupported instruction in ServerMessage, cannot deserialize from ProtoBuf" ) @@ -125,7 +127,9 @@ def client_message_to_proto(client_message: typing.ClientMessage) -> ClientMessa client_message.evaluate_res, ) ) - raise Exception("No instruction set in ClientMessage, cannot serialize to ProtoBuf") + raise ValueError( + "No instruction set in ClientMessage, cannot serialize to ProtoBuf" + ) def client_message_from_proto( @@ -157,7 +161,7 @@ def client_message_from_proto( client_message_proto.evaluate_res, ) ) - raise Exception( + raise ValueError( "Unsupported instruction in ClientMessage, cannot deserialize from ProtoBuf" ) @@ -474,7 +478,7 @@ def scalar_to_proto(scalar: typing.Scalar) -> Scalar: if isinstance(scalar, str): return Scalar(string=scalar) - raise Exception( + raise ValueError( f"Accepted types: {bool, bytes, float, int, str} (but not {type(scalar)})" ) @@ -518,7 +522,7 @@ def _check_value(value: typing.Value) -> None: for element in value: if isinstance(element, data_type): continue - raise Exception( + raise TypeError( f"Inconsistent type: the types of elements in the list must " f"be the same (expected {data_type}, but got {type(element)})." ) diff --git a/src/py/flwr/driver/app_test.py b/src/py/flwr/driver/app_test.py index 2c3a6d2ccddf..82747e5afb2c 100644 --- a/src/py/flwr/driver/app_test.py +++ b/src/py/flwr/driver/app_test.py @@ -13,7 +13,6 @@ # limitations under the License. # ============================================================================== """Flower Driver app tests.""" -# pylint: disable=no-self-use import threading diff --git a/src/py/flwr/driver/driver_test.py b/src/py/flwr/driver/driver_test.py index 92b4230a3932..8f75bbf78362 100644 --- a/src/py/flwr/driver/driver_test.py +++ b/src/py/flwr/driver/driver_test.py @@ -139,6 +139,7 @@ def test_del_with_initialized_driver(self) -> None: self.driver._get_grpc_driver_and_run_id() # Execute + # pylint: disable-next=unnecessary-dunder-call self.driver.__del__() # Assert @@ -147,6 +148,7 @@ def test_del_with_initialized_driver(self) -> None: def test_del_with_uninitialized_driver(self) -> None: """Test cleanup behavior when Driver is not initialized.""" # Execute + # pylint: disable-next=unnecessary-dunder-call self.driver.__del__() # Assert diff --git a/src/py/flwr/driver/grpc_driver.py b/src/py/flwr/driver/grpc_driver.py index b6d42fe799d5..627b95cdb1b4 100644 --- a/src/py/flwr/driver/grpc_driver.py +++ b/src/py/flwr/driver/grpc_driver.py @@ -89,7 +89,7 @@ def create_run(self, req: CreateRunRequest) -> CreateRunResponse: # Check if channel is open if self.stub is None: log(ERROR, ERROR_MESSAGE_DRIVER_NOT_CONNECTED) - raise Exception("`GrpcDriver` instance not connected") + raise ConnectionError("`GrpcDriver` instance not connected") # Call Driver API res: CreateRunResponse = self.stub.CreateRun(request=req) @@ -100,7 +100,7 @@ def get_nodes(self, req: GetNodesRequest) -> GetNodesResponse: # Check if channel is open if self.stub is None: log(ERROR, ERROR_MESSAGE_DRIVER_NOT_CONNECTED) - raise Exception("`GrpcDriver` instance not connected") + raise ConnectionError("`GrpcDriver` instance not connected") # Call gRPC Driver API res: GetNodesResponse = self.stub.GetNodes(request=req) @@ -111,7 +111,7 @@ def push_task_ins(self, req: PushTaskInsRequest) -> PushTaskInsResponse: # Check if channel is open if self.stub is None: log(ERROR, ERROR_MESSAGE_DRIVER_NOT_CONNECTED) - raise Exception("`GrpcDriver` instance not connected") + raise ConnectionError("`GrpcDriver` instance not connected") # Call gRPC Driver API res: PushTaskInsResponse = self.stub.PushTaskIns(request=req) @@ -122,7 +122,7 @@ def pull_task_res(self, req: PullTaskResRequest) -> PullTaskResResponse: # Check if channel is open if self.stub is None: log(ERROR, ERROR_MESSAGE_DRIVER_NOT_CONNECTED) - raise Exception("`GrpcDriver` instance not connected") + raise ConnectionError("`GrpcDriver` instance not connected") # Call Driver API res: PullTaskResResponse = self.stub.PullTaskRes(request=req) diff --git a/src/py/flwr/server/fleet/grpc_bidi/grpc_bridge.py b/src/py/flwr/server/fleet/grpc_bidi/grpc_bridge.py index 6ae38ea3d805..4e68499f018d 100644 --- a/src/py/flwr/server/fleet/grpc_bidi/grpc_bridge.py +++ b/src/py/flwr/server/fleet/grpc_bidi/grpc_bridge.py @@ -113,7 +113,7 @@ def _transition(self, next_status: Status) -> None: ): self._status = next_status else: - raise Exception(f"Invalid transition: {self._status} to {next_status}") + raise ValueError(f"Invalid transition: {self._status} to {next_status}") self._cv.notify_all() @@ -129,7 +129,7 @@ def request(self, ins_wrapper: InsWrapper) -> ResWrapper: self._raise_if_closed() if self._status != Status.AWAITING_INS_WRAPPER: - raise Exception("This should not happen") + raise ValueError("This should not happen") self._ins_wrapper = ins_wrapper # Write self._transition(Status.INS_WRAPPER_AVAILABLE) @@ -146,7 +146,7 @@ def request(self, ins_wrapper: InsWrapper) -> ResWrapper: self._transition(Status.AWAITING_INS_WRAPPER) if res_wrapper is None: - raise Exception("ResWrapper can not be None") + raise ValueError("ResWrapper can not be None") return res_wrapper @@ -170,7 +170,7 @@ def ins_wrapper_iterator(self) -> Iterator[InsWrapper]: self._transition(Status.AWAITING_RES_WRAPPER) if ins_wrapper is None: - raise Exception("InsWrapper can not be None") + raise ValueError("InsWrapper can not be None") yield ins_wrapper @@ -180,7 +180,7 @@ def set_res_wrapper(self, res_wrapper: ResWrapper) -> None: self._raise_if_closed() if self._status != Status.AWAITING_RES_WRAPPER: - raise Exception("This should not happen") + raise ValueError("This should not happen") self._res_wrapper = res_wrapper # Write self._transition(Status.RES_WRAPPER_AVAILABLE) diff --git a/src/py/flwr/server/fleet/grpc_bidi/grpc_bridge_test.py b/src/py/flwr/server/fleet/grpc_bidi/grpc_bridge_test.py index 18a2144072ed..bcfbe6e6fac8 100644 --- a/src/py/flwr/server/fleet/grpc_bidi/grpc_bridge_test.py +++ b/src/py/flwr/server/fleet/grpc_bidi/grpc_bridge_test.py @@ -70,6 +70,7 @@ def test_workflow_successful() -> None: _ = next(ins_wrapper_iterator) bridge.set_res_wrapper(ResWrapper(client_message=ClientMessage())) except Exception as exception: + # pylint: disable-next=broad-exception-raised raise Exception from exception # Wait until worker_thread is finished diff --git a/src/py/flwr/server/fleet/grpc_bidi/ins_scheduler.py b/src/py/flwr/server/fleet/grpc_bidi/ins_scheduler.py index 1c737d31c7fc..0fa6f82a89b5 100644 --- a/src/py/flwr/server/fleet/grpc_bidi/ins_scheduler.py +++ b/src/py/flwr/server/fleet/grpc_bidi/ins_scheduler.py @@ -166,6 +166,6 @@ def _call_client_proxy( evaluate_res_proto = serde.evaluate_res_to_proto(res=evaluate_res) return ClientMessage(evaluate_res=evaluate_res_proto) - raise Exception( + raise ValueError( "Unsupported instruction in ServerMessage, cannot deserialize from ProtoBuf" ) diff --git a/src/py/flwr/server/server_test.py b/src/py/flwr/server/server_test.py index 63ec1021ff5c..9b5c03aeeaf9 100644 --- a/src/py/flwr/server/server_test.py +++ b/src/py/flwr/server/server_test.py @@ -47,14 +47,14 @@ class SuccessClient(ClientProxy): def get_properties( self, ins: GetPropertiesIns, timeout: Optional[float] ) -> GetPropertiesRes: - """Raise an Exception because this method is not expected to be called.""" - raise Exception() + """Raise an error because this method is not expected to be called.""" + raise NotImplementedError() def get_parameters( self, ins: GetParametersIns, timeout: Optional[float] ) -> GetParametersRes: - """Raise an Exception because this method is not expected to be called.""" - raise Exception() + """Raise a error because this method is not expected to be called.""" + raise NotImplementedError() def fit(self, ins: FitIns, timeout: Optional[float]) -> FitRes: """Simulate fit by returning a success FitRes with simple set of weights.""" @@ -87,26 +87,26 @@ class FailingClient(ClientProxy): def get_properties( self, ins: GetPropertiesIns, timeout: Optional[float] ) -> GetPropertiesRes: - """Raise an Exception to simulate failure in the client.""" - raise Exception() + """Raise a NotImplementedError to simulate failure in the client.""" + raise NotImplementedError() def get_parameters( self, ins: GetParametersIns, timeout: Optional[float] ) -> GetParametersRes: - """Raise an Exception to simulate failure in the client.""" - raise Exception() + """Raise a NotImplementedError to simulate failure in the client.""" + raise NotImplementedError() def fit(self, ins: FitIns, timeout: Optional[float]) -> FitRes: - """Raise an Exception to simulate failure in the client.""" - raise Exception() + """Raise a NotImplementedError to simulate failure in the client.""" + raise NotImplementedError() def evaluate(self, ins: EvaluateIns, timeout: Optional[float]) -> EvaluateRes: - """Raise an Exception to simulate failure in the client.""" - raise Exception() + """Raise a NotImplementedError to simulate failure in the client.""" + raise NotImplementedError() def reconnect(self, ins: ReconnectIns, timeout: Optional[float]) -> DisconnectRes: - """Raise an Exception to simulate failure in the client.""" - raise Exception() + """Raise a NotImplementedError to simulate failure in the client.""" + raise NotImplementedError() def test_fit_clients() -> None: diff --git a/src/py/flwr/server/state/sqlite_state.py b/src/py/flwr/server/state/sqlite_state.py index 4f66be3ff262..26f326819971 100644 --- a/src/py/flwr/server/state/sqlite_state.py +++ b/src/py/flwr/server/state/sqlite_state.py @@ -134,7 +134,7 @@ def query( ) -> List[Dict[str, Any]]: """Execute a SQL query.""" if self.conn is None: - raise Exception("State is not initialized.") + raise AttributeError("State is not initialized.") if data is None: data = [] @@ -459,7 +459,7 @@ def delete_tasks(self, task_ids: Set[UUID]) -> None: """ if self.conn is None: - raise Exception("State not intitialized") + raise AttributeError("State not intitialized") with self.conn: self.conn.execute(query_1, data) diff --git a/src/py/flwr/server/state/sqlite_state_test.py b/src/py/flwr/server/state/sqlite_state_test.py index efdd288fc308..a3f899386011 100644 --- a/src/py/flwr/server/state/sqlite_state_test.py +++ b/src/py/flwr/server/state/sqlite_state_test.py @@ -13,7 +13,7 @@ # limitations under the License. # ============================================================================== """Test for utility functions.""" -# pylint: disable=no-self-use, invalid-name, disable=R0904 +# pylint: disable=invalid-name, disable=R0904 import unittest diff --git a/src/py/flwr/server/state/state_test.py b/src/py/flwr/server/state/state_test.py index 88b4b53aed4c..204b4ba97b5f 100644 --- a/src/py/flwr/server/state/state_test.py +++ b/src/py/flwr/server/state/state_test.py @@ -13,7 +13,7 @@ # limitations under the License. # ============================================================================== """Tests all state implemenations have to conform to.""" -# pylint: disable=no-self-use, invalid-name, disable=R0904 +# pylint: disable=invalid-name, disable=R0904 import tempfile import unittest diff --git a/src/py/flwr/server/strategy/aggregate.py b/src/py/flwr/server/strategy/aggregate.py index 4eb76111b266..c668b55eebe6 100644 --- a/src/py/flwr/server/strategy/aggregate.py +++ b/src/py/flwr/server/strategy/aggregate.py @@ -27,7 +27,7 @@ def aggregate(results: List[Tuple[NDArrays, int]]) -> NDArrays: """Compute weighted average.""" # Calculate the total number of examples used during training - num_examples_total = sum([num_examples for _, num_examples in results]) + num_examples_total = sum(num_examples for (_, num_examples) in results) # Create a list of weights, each multiplied by the related number of examples weighted_weights = [ @@ -45,7 +45,7 @@ def aggregate(results: List[Tuple[NDArrays, int]]) -> NDArrays: def aggregate_inplace(results: List[Tuple[ClientProxy, FitRes]]) -> NDArrays: """Compute in-place weighted average.""" # Count total examples - num_examples_total = sum([fit_res.num_examples for _, fit_res in results]) + num_examples_total = sum(fit_res.num_examples for (_, fit_res) in results) # Compute scaling factors for each result scaling_factors = [ @@ -95,9 +95,9 @@ def aggregate_krum( # For each client, take the n-f-2 closest parameters vectors num_closest = max(1, len(weights) - num_malicious - 2) closest_indices = [] - for i, _ in enumerate(distance_matrix): + for distance in distance_matrix: closest_indices.append( - np.argsort(distance_matrix[i])[1 : num_closest + 1].tolist() # noqa: E203 + np.argsort(distance)[1 : num_closest + 1].tolist() # noqa: E203 ) # Compute the score for each client, that is the sum of the distances @@ -202,7 +202,7 @@ def aggregate_bulyan( def weighted_loss_avg(results: List[Tuple[int, float]]) -> float: """Aggregate evaluation results obtained from multiple clients.""" - num_total_evaluation_examples = sum([num_examples for num_examples, _ in results]) + num_total_evaluation_examples = sum(num_examples for (num_examples, _) in results) weighted_losses = [num_examples * loss for num_examples, loss in results] return sum(weighted_losses) / num_total_evaluation_examples @@ -233,9 +233,9 @@ def _compute_distances(weights: List[NDArrays]) -> NDArray: """ flat_w = np.array([np.concatenate(p, axis=None).ravel() for p in weights]) distance_matrix = np.zeros((len(weights), len(weights))) - for i, _ in enumerate(flat_w): - for j, _ in enumerate(flat_w): - delta = flat_w[i] - flat_w[j] + for i, flat_w_i in enumerate(flat_w): + for j, flat_w_j in enumerate(flat_w): + delta = flat_w_i - flat_w_j norm = np.linalg.norm(delta) distance_matrix[i, j] = norm**2 return distance_matrix diff --git a/src/py/flwr/server/strategy/dpfedavg_adaptive.py b/src/py/flwr/server/strategy/dpfedavg_adaptive.py index 3269735e9d73..8b3278cc9ba0 100644 --- a/src/py/flwr/server/strategy/dpfedavg_adaptive.py +++ b/src/py/flwr/server/strategy/dpfedavg_adaptive.py @@ -91,7 +91,7 @@ def _update_clip_norm(self, results: List[Tuple[ClientProxy, FitRes]]) -> None: norm_bit_set_count = 0 for client_proxy, fit_res in results: if "dpfedavg_norm_bit" not in fit_res.metrics: - raise Exception( + raise KeyError( f"Indicator bit not returned by client with id {client_proxy.cid}." ) if fit_res.metrics["dpfedavg_norm_bit"]: diff --git a/src/py/flwr/server/strategy/dpfedavg_fixed.py b/src/py/flwr/server/strategy/dpfedavg_fixed.py index 0154cfd79fc5..f2f1c206f3de 100644 --- a/src/py/flwr/server/strategy/dpfedavg_fixed.py +++ b/src/py/flwr/server/strategy/dpfedavg_fixed.py @@ -46,11 +46,11 @@ def __init__( self.num_sampled_clients = num_sampled_clients if clip_norm <= 0: - raise Exception("The clipping threshold should be a positive value.") + raise ValueError("The clipping threshold should be a positive value.") self.clip_norm = clip_norm if noise_multiplier < 0: - raise Exception("The noise multiplier should be a non-negative value.") + raise ValueError("The noise multiplier should be a non-negative value.") self.noise_multiplier = noise_multiplier self.server_side_noising = server_side_noising diff --git a/src/py/flwr/server/strategy/fedavg_android.py b/src/py/flwr/server/strategy/fedavg_android.py index e890f7216020..6678b7ced114 100644 --- a/src/py/flwr/server/strategy/fedavg_android.py +++ b/src/py/flwr/server/strategy/fedavg_android.py @@ -234,12 +234,10 @@ def parameters_to_ndarrays(self, parameters: Parameters) -> NDArrays: """Convert parameters object to NumPy weights.""" return [self.bytes_to_ndarray(tensor) for tensor in parameters.tensors] - # pylint: disable=R0201 def ndarray_to_bytes(self, ndarray: NDArray) -> bytes: """Serialize NumPy array to bytes.""" return ndarray.tobytes() - # pylint: disable=R0201 def bytes_to_ndarray(self, tensor: bytes) -> NDArray: """Deserialize NumPy array from bytes.""" ndarray_deserialized = np.frombuffer(tensor, dtype=np.float32) diff --git a/src/py/flwr/server/strategy/qfedavg.py b/src/py/flwr/server/strategy/qfedavg.py index 94a67fbcbfae..758e8e608e9f 100644 --- a/src/py/flwr/server/strategy/qfedavg.py +++ b/src/py/flwr/server/strategy/qfedavg.py @@ -185,7 +185,7 @@ def norm_grad(grad_list: NDArrays) -> float: hs_ffl = [] if self.pre_weights is None: - raise Exception("QffedAvg pre_weights are None in aggregate_fit") + raise AttributeError("QffedAvg pre_weights are None in aggregate_fit") weights_before = self.pre_weights eval_result = self.evaluate( From cf6afe587263b2f9b7029aaf1828145205c99f0d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 17 Jan 2024 21:40:44 +0100 Subject: [PATCH 30/30] Update isort requirement from ==5.12.0 to ==5.13.2 (#2757) Updates the requirements on [isort](https://github.com/pycqa/isort) to permit the latest version. - [Release notes](https://github.com/pycqa/isort/releases) - [Changelog](https://github.com/PyCQA/isort/blob/main/CHANGELOG.md) - [Commits](https://github.com/pycqa/isort/compare/5.12.0...5.13.2) --- updated-dependencies: - dependency-name: isort dependency-type: direct:development ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 24d20c7ced40..cab083b32325 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -84,7 +84,7 @@ types-protobuf = "==3.19.18" types-requests = "==2.31.0.10" types-setuptools = "==69.0.0.20240115" clang-format = "==17.0.4" -isort = "==5.12.0" +isort = "==5.13.2" black = { version = "==23.10.1", extras = ["jupyter"] } docformatter = "==1.7.5" mypy = "==1.6.1"