diff --git a/baselines/flanders/.gitignore b/baselines/flanders/.gitignore new file mode 100644 index 000000000000..4187d73689f0 --- /dev/null +++ b/baselines/flanders/.gitignore @@ -0,0 +1,9 @@ +outputs/* +clients_params/* +flanders/datasets_files/* +*.log +flanders/__pycache__ +MNIST +.DS_Store +*/__pycache__ +multirun \ No newline at end of file diff --git a/baselines/flanders/LICENSE b/baselines/flanders/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/baselines/flanders/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/baselines/flanders/README.md b/baselines/flanders/README.md new file mode 100644 index 000000000000..f5ab6a02d6f3 --- /dev/null +++ b/baselines/flanders/README.md @@ -0,0 +1,157 @@ +--- +title: Protecting Federated Learning from Extreme Model Poisoning Attacks via Multidimensional Time Series Anomaly Detection +url: https://arxiv.org/abs/2303.16668 +labels: [robustness, model poisoning, anomaly detection, autoregressive model, regression, classification] +dataset: [MNIST, FashionMNIST] +--- + +**Paper:** [arxiv.org/abs/2303.16668](https://arxiv.org/abs/2303.16668) + +**Authors:** Edoardo Gabrielli, Gabriele Tolomei, Dimitri Belli, Vittorio Miori + +**Abstract:** Current defense mechanisms against model poisoning attacks in federated learning (FL) systems have proven effective up to a certain threshold of malicious clients. In this work, we introduce FLANDERS, a novel pre-aggregation filter for FL resilient to large-scale model poisoning attacks, i.e., when malicious clients far exceed legitimate participants. FLANDERS treats the sequence of local models sent by clients in each FL round as a matrix-valued time series. Then, it identifies malicious client updates as outliers in this time series by comparing actual observations with estimates generated by a matrix autoregressive forecasting model maintained by the server. Experiments conducted in several non-iid FL setups show that FLANDERS significantly improves robustness across a wide spectrum of attacks when paired with standard and robust existing aggregation methods. + +## About this baseline + +**What’s implemented:** The code in this directory replicates the results of FLANDERS+\[baseline\] on MNIST and Fashion-MNIST under all attack settings: Gaussian, LIE, OPT, and AGR-MM; with $r=[0.2,0.6,0.8]$ (i.e., the fraction of malicious clients), specifically about tables 1, 3, 10, 11, 15, 17, 19, 20 and Figure 3. + +**Datasets:** MNIST, FMNIST + +**Hardware Setup:** AMD Ryzen 9, 64 GB RAM, and an NVIDIA 4090 GPU with 24 GB VRAM. + +**Estimated time to run:** You can expect to run experiments on the given setup in 2m with *MNIST* and 3m with *Fashion-MNIST*, without attacks. With an Apple M2 Pro, 16gb RAM, each experiment with 10 clients for MNIST runs in about 24 minutes. Note that experiments with OPT (fang) and AGR-MM (minmax) can be up to 5x times slower. + +**Contributors:** Edoardo Gabrielli, Sapienza University of Rome ([GitHub](https://github.com/edogab33), [Scholar](https://scholar.google.com/citations?user=b3bePdYAAAAJ)) + + +## Experimental Setup + +Please, checkout Appendix F and G of the paper for a comprehensive overview of the hyperparameters setup, however here's a summary. + +**Task:** Image classification + +**Models:** + +MNIST (multilabel classification, fully connected, feed forward NN): +- Multilevel Perceptron (MLP) +- minimizing multiclass cross-entropy loss using Adam optimizer +- input: 784 +- hidden layer 1: 128 +- hidden layer 2: 256 + +Fashion-MNIST (multilabel classification, fully connected, feed forward NN): +- Multilevel Perceptron (MLP) +- minimizing multiclass cross-entropy loss using Adam optimizer +- input: 784 +- hidden layer 1: 256 +- hidden layer 2: 128 +- hidden layer 3: 64 + +**Dataset:** Every dataset is partitioned into two disjoint sets: 80% for training and 20% for testing. The training set is distributed across all clients (100) by using the Dirichlet distribution with $\alpha=0.5$, simulating a high non-i.i.d. scenario, while the testing set is uniform and held by the server to evaluate the global model. + +| Description | Default Value | +| ----------- | ----- | +| Partitions | 100 | +| Evaluation | centralized | +| Training set | 80% | +| Testing set | 20% | +| Distribution | Dirichlet | +| $\alpha$ | 0.5 | + +**Training Hyperparameters:** + +| Dataset | # of clients | Clients per round | # of rounds | Batch size | Learning rate | Optimizer | Dropout | Alpha | Beta | # of clients to keep | Sampling | +| -- | -- | -- | -- | -- | -- | -- | -- | -- | -- | -- | -- | +| MNIST | 100 | 100 | 50 | 32 | $10^{-3}$ | Adam | 0.2 | 0.0 | 0.0 | $m - b$ | 500 | +| FMNIST | 100 | 100 | 50 | 32 | $10^{-3}$ | Adam | 0.2 | 0.0 | 0.0 | $m - b$ | 500 | + +Where $m$ is the number of clients partecipating during n-th round and $b$ is the number of malicious clients. The variable $sampling$ identifies how many parameters MAR analyzes. + + +## Environment Setup + +```bash +# Use a version of Python >=3.9 and <3.12.0. +pyenv local 3.10.12 +poetry env use 3.10.12 + +# Install everything from the toml +poetry install + +# Activate the env +poetry shell +``` + + +## Running the Experiments +Ensure that the environment is properly set up, then run: + +```bash +python -m flanders.main +``` + +To execute a single experiment with the default values in `conf/base.yaml`. + +To run custom experiments, you can override the default values like that: + +```bash +python -m flanders.main dataset=mnist server.attack_fn=lie server.num_malicious=1 +``` + +To run multiple custom experiments: + +```bash +python -m flanders.main --multirun dataset=mnist,fmnist server.attack_fn=gaussian,lie,fang,minmax server.num_malicious=0,1,2,3,4,5 +``` + +## Expected Results + +To run all the experiments of the paper (for MNIST and Fashion-MNIST), I've set up a script: + +```bash +sh run.sh +``` + +This code will produce the output in the file `outputs/all_results.csv`. To generate the plots and tables displayed below, you can use the notebook in the `plotting/` directory. + + +### Accuracy over multiple rounds +**(left) MNIST, FLANDERS+FedAvg with 80% of malicious clients (b = 80); (right) Vanilla FedAvg in the same setting:** + +![acc_over_rounds](_static/screenshot-8.png) + +### Precision and Recall of FLANDERS + +**b = 20:** + +![alt text](_static/screenshot-4.png) +--- + +**b = 60:** + +![alt text](_static/screenshot-5.png) +--- +**b = 80:** + +![alt text](_static/screenshot-6.png) + + +### Accuracy w.r.t. number of attackers: +**b = 0:** + +![alt text](_static/screenshot.png) + +--- +**b = 20:** + +![alt text](_static/screenshot-1.png) + +--- +**b = 60:** + +![alt text](_static/screenshot-2.png) + +--- +**b = 80:** + +![alt text](_static/screenshot-3.png) diff --git a/baselines/flanders/_static/screenshot-1.png b/baselines/flanders/_static/screenshot-1.png new file mode 100644 index 000000000000..f9c14a7e72f2 Binary files /dev/null and b/baselines/flanders/_static/screenshot-1.png differ diff --git a/baselines/flanders/_static/screenshot-2.png b/baselines/flanders/_static/screenshot-2.png new file mode 100644 index 000000000000..7aacd2ba5778 Binary files /dev/null and b/baselines/flanders/_static/screenshot-2.png differ diff --git a/baselines/flanders/_static/screenshot-3.png b/baselines/flanders/_static/screenshot-3.png new file mode 100644 index 000000000000..978ed4902bf5 Binary files /dev/null and b/baselines/flanders/_static/screenshot-3.png differ diff --git a/baselines/flanders/_static/screenshot-4.png b/baselines/flanders/_static/screenshot-4.png new file mode 100644 index 000000000000..5a24c47ff513 Binary files /dev/null and b/baselines/flanders/_static/screenshot-4.png differ diff --git a/baselines/flanders/_static/screenshot-5.png b/baselines/flanders/_static/screenshot-5.png new file mode 100644 index 000000000000..e0defab01d22 Binary files /dev/null and b/baselines/flanders/_static/screenshot-5.png differ diff --git a/baselines/flanders/_static/screenshot-6.png b/baselines/flanders/_static/screenshot-6.png new file mode 100644 index 000000000000..bfb3120fef7b Binary files /dev/null and b/baselines/flanders/_static/screenshot-6.png differ diff --git a/baselines/flanders/_static/screenshot-8.png b/baselines/flanders/_static/screenshot-8.png new file mode 100644 index 000000000000..cda98c21d034 Binary files /dev/null and b/baselines/flanders/_static/screenshot-8.png differ diff --git a/baselines/flanders/_static/screenshot.png b/baselines/flanders/_static/screenshot.png new file mode 100644 index 000000000000..537ebb66c123 Binary files /dev/null and b/baselines/flanders/_static/screenshot.png differ diff --git a/baselines/flanders/flanders/__init__.py b/baselines/flanders/flanders/__init__.py new file mode 100644 index 000000000000..eb3edd489459 --- /dev/null +++ b/baselines/flanders/flanders/__init__.py @@ -0,0 +1 @@ +"""FLANDERS package.""" diff --git a/baselines/flanders/flanders/attacks.py b/baselines/flanders/flanders/attacks.py new file mode 100644 index 000000000000..9b1acd9ad639 --- /dev/null +++ b/baselines/flanders/flanders/attacks.py @@ -0,0 +1,493 @@ +"""Implementation of attacks used in the paper.""" + +import math +from typing import Dict, List, Tuple + +import numpy as np +from flwr.common import FitRes, ndarrays_to_parameters, parameters_to_ndarrays +from flwr.server.client_proxy import ClientProxy +from scipy.stats import norm + + +# pylint: disable=unused-argument +def no_attack( + ordered_results: List[Tuple[ClientProxy, FitRes]], states: Dict[str, bool], **kwargs +): + """No attack.""" + return ordered_results, {} + + +def gaussian_attack(ordered_results, states, **kwargs): + """Apply Gaussian attack on parameters. + + Parameters + ---------- + ordered_results + List of tuples (client_proxy, fit_result) ordered by client id. + states + Dictionary of client ids and their states (True if malicious, False otherwise). + magnitude + Magnitude of the attack. + dataset_name + Name of the dataset. + + Returns + ------- + results + List of tuples (client_proxy, fit_result) ordered by client id. + """ + magnitude = kwargs.get("magnitude", 0.0) + dataset_name = kwargs.get("dataset_name", "no name") + results = ordered_results.copy() + + def perturbate(vect): + return vect + np.random.normal(loc=0, scale=magnitude, size=vect.size) + + for proxy, fitres in ordered_results: + if states[fitres.metrics["cid"]]: + params = parameters_to_ndarrays(fitres.parameters) + if dataset_name == "income": + new_params = [perturbate(layer) for layer in params] + else: + new_params = [] + for par in params: + # if par is an array of one element, it is a scalar + if par.size == 1: + new_params.append(perturbate(par)) + else: + new_params.append(np.apply_along_axis(perturbate, 0, par)) + fitres.parameters = ndarrays_to_parameters(new_params) + results[int(fitres.metrics["cid"])] = (proxy, fitres) + return results, {} + + +# pylint: disable=too-many-locals, unused-argument +def lie_attack( + ordered_results, + states, + omniscent=True, + **kwargs, +): + """Apply Omniscent LIE attack, Baruch et al. (2019) on parameters. + + Parameters + ---------- + ordered_results + List of tuples (client_proxy, fit_result) ordered by client id. + states + Dictionary of client ids and their states (True if malicious, False otherwise). + omniscent + Whether the attacker knows the local models of all clients or not. + + Returns + ------- + results + List of tuples (client_proxy, fit_result) ordered by client id. + """ + results = ordered_results.copy() + params = [parameters_to_ndarrays(fitres.parameters) for _, fitres in results] + grads_mean = [np.mean(layer, axis=0) for layer in zip(*params)] + grads_stdev = [np.std(layer, axis=0) ** 0.5 for layer in zip(*params)] + + if not omniscent: + # if not omniscent, the attacker doesn't know the + # local models of all clients, but only of the corrupted ones + params = [ + params[i] + for i in range(len(params)) + if states[results[i][1].metrics["cid"]] + ] + + num_clients = len(ordered_results) + num_malicious = sum(val is True for val in states.values()) + + # pylint: disable=c-extension-no-member + num_supporters = math.floor((num_clients / 2) + 1) - num_malicious + + z_max = norm.cdf( + (num_clients - num_malicious - num_supporters) / (num_clients - num_malicious) + ) + + for proxy, fitres in ordered_results: + if states[fitres.metrics["cid"]]: + mul_std = [layer * z_max for layer in grads_stdev] + new_params = [grads_mean[i] - mul_std[i] for i in range(len(grads_mean))] + fitres.parameters = ndarrays_to_parameters(new_params) + results[int(fitres.metrics["cid"])] = (proxy, fitres) + return results, {} + + +def fang_attack( + ordered_results, + states, + omniscent=True, + **kwargs, +): + """Apply Local Model Poisoning Attacks. + + (Fang et al. (2020)) + Specifically designed for Krum, but they claim it works for other + aggregation functions as well. + Full-knowledge version (attackers knows the local models of all clients). + + Parameters + ---------- + ordered_results + List of tuples (client_proxy, fit_result) ordered by client id. + states + Dictionary of client ids and their states (True if malicious, False + otherwise). + omniscent + Whether the attacker knows the local models of all clients or not. + num_layers + Number of layers. + w_re + The received global model. + old_lambda + The lambda from the previous round. + threshold + The threshold for lambda. + malicious_selected + Whether the attacker was selected as malicious in the previous round. + + Returns + ------- + results + List of tuples (client_proxy, fit_result) ordered by client id. + """ + num_layers = kwargs.get("num_layers", 2) + w_re = kwargs.get("w_re", None) # the received global model + threshold = kwargs.get("threshold", 1e-5) + + num_clients = len(ordered_results) + num_corrupted = sum(val is True for val in states.values()) + # there can't be an attack with less than 2 malicious clients + # to avoid division by 0 + num_corrupted = max(num_corrupted, 2) + + if not omniscent: + # if not omniscent, the attacker doesn't know the + # local models of all clients, but only of the corrupted ones + ordered_results = [ + ordered_results[i] + for i in range(len(ordered_results)) + if states[ordered_results[i][1].metrics["cid"]] + ] + + # Initialize lambda + benign = [ + (parameters_to_ndarrays(fitres.parameters), fitres.num_examples) + for _, fitres in ordered_results + if states[fitres.metrics["cid"]] is False + ] + all_params = [ + (parameters_to_ndarrays(fitres.parameters), fitres.num_examples) + for _, fitres in ordered_results + ] + # Compute the smallest distance that Krum would choose + _, _, _, distances = _krum(all_params, num_corrupted, 1) + + idx_benign = [int(cid) for cid in states.keys() if states[cid] is False] + + min_dist = np.min(np.array(distances)[idx_benign]) / ( + ((num_clients - 2) * (num_corrupted - 1)) * np.sqrt(num_layers) + ) + + # Compute max distance from w_re + dist_wre = np.zeros((len(benign))) + for i in range(len(benign)): + dist = [benign[i][0][j] - w_re[j] for j in range(num_layers)] + norm_sums = 0 + for k in dist: + norm_sums += np.linalg.norm(k) + dist_wre[i] = norm_sums**2 + max_dist = np.max(dist_wre) / np.sqrt(num_layers) + lamda = min( + min_dist + max_dist, 999 + ) # lambda (capped to 999 to avoid numerical problems in specific settings) + + malicious_selected, corrupted_params = _fang_corrupt_and_select( + all_params, w_re, states, num_corrupted, lamda + ) + while lamda > threshold and malicious_selected is False: + lamda = lamda * 0.5 + malicious_selected, corrupted_params = _fang_corrupt_and_select( + all_params, w_re, states, num_corrupted, lamda + ) + + # Set corrupted clients' updates to w_1 + results = [ + ( + ( + proxy, + FitRes( + fitres.status, + parameters=ndarrays_to_parameters(corrupted_params), + num_examples=fitres.num_examples, + metrics=fitres.metrics, + ), + ) + if states[fitres.metrics["cid"]] + else (proxy, fitres) + ) + for proxy, fitres in ordered_results + ] + + return results, {} + + +def minmax_attack( + ordered_results, + states, + omniscent=True, + **kwargs, +): + """Apply Min-Max agnostic attack. + + Full-knowledge, perturbation function chosen according to our experimental + results. + From: + "Manipulating the Byzantine: Optimizing Model Poisoning Attacks and + Defenses for Federated Learning" (Shejwalkar et al., 2021) + + Parameters + ---------- + ordered_results + List of tuples (client_proxy, fit_result) ordered by client id. + states + Dictionary of client ids and their states (True if malicious, False + otherwise). + omniscent + Whether the attacker knows the local models of all clients or not. + threshold + Threshold for lambda. + lambda_init + Initial value for lambda. + + Returns + ------- + results + List of tuples (client_proxy, fit_result) ordered by client id. + """ + dataset_name = kwargs.get("dataset_name", None) + threshold = kwargs.get("threshold", 1e-5) + lambda_init = kwargs.get("lambda", 5.0) + malicious_num = kwargs.get("malicious_num", 0) + + results = ordered_results.copy() + params = [parameters_to_ndarrays(fitres.parameters) for _, fitres in results] + params_avg = [np.mean(param, axis=0) for param in zip(*params)] + + if not omniscent: + # if not omniscent, the attacker doesn't know the + # local models of all clients, but only of the corrupted ones + results = [ + results[i] + for i in range(len(results)) + if states[results[i][1].metrics["cid"]] + ] + + # Decide what perturbation to use according to the + # results presented in the paper. + if dataset_name == "mnist": + # Apply std perturbation + # In the paper authors state that sign function is the best + # but in my experience std perturbation works better + perturbation_vect = [-np.std(layer, axis=0) for layer in zip(*params)] + elif dataset_name == "cifar": + # Apply std perturbation + perturbation_vect = [-np.std(layer, axis=0) for layer in zip(*params)] + else: + # Apply std perturbation + perturbation_vect = [-np.std(layer, axis=0) for layer in zip(*params)] + + # Compute lambda (referred as gamma in the paper) + lambda_succ = lambda_init + 1 + curr_lambda = lambda_init + step = lambda_init * 0.5 + while ( + abs(lambda_succ - curr_lambda) > threshold + and step > threshold + and malicious_num > 0 + ): + # Compute malicious gradients + perturbed_params = [ + curr_lambda * perturbation_vect[i] for i in range(len(perturbation_vect)) + ] + corrupted_params = [ + params_avg[i] + perturbed_params[i] for i in range(len(params_avg)) + ] + + # Set corrupted clients' updates to corrupted_params + params_c = [ + corrupted_params if states[str(i)] else params[i] + for i in range(len(params)) + ] + distance_matrix = _compute_distances(params_c) + + # Remove from matrix distance_matrix all malicious clients in both + # rows and columns + distance_matrix_b = np.delete( + distance_matrix, + [ + i + for i in range(len(distance_matrix)) + if states[results[i][1].metrics["cid"]] + ], + axis=0, + ) + distance_matrix_b = np.delete( + distance_matrix_b, + [ + i + for i in range(len(distance_matrix)) + if states[results[i][1].metrics["cid"]] + ], + axis=1, + ) + + # Remove from distance_matrix all benign clients on + # rows and all malicious on columns + distance_matrix_m = np.delete( + distance_matrix, + [ + i + for i in range(len(distance_matrix)) + if not states[results[i][1].metrics["cid"]] + ], + axis=0, + ) + distance_matrix_m = np.delete( + distance_matrix_m, + [ + i + for i in range(len(distance_matrix)) + if states[results[i][1].metrics["cid"]] + ], + axis=1, + ) + + # Take the maximum distance between any benign client and any malicious one + max_dist_m = np.max(distance_matrix_m) + + # Take the maximum distance between any two benign clients + max_dist_b = np.max(distance_matrix_b) + + # Compute lambda (best scaling coefficient) + if max_dist_m < max_dist_b: + # Lambda (gamma in the paper) is good. Save and try to increase it + lambda_succ = curr_lambda + curr_lambda = curr_lambda + step * 0.5 + else: + # Lambda is to big, must be reduced to increse the chances of being selected + curr_lambda = curr_lambda - step * 0.5 + step *= 0.5 + + # Compute the final malicious update + perturbation_vect = [ + lambda_succ * perturbation_vect[i] for i in range(len(perturbation_vect)) + ] + corrupted_params = [ + params_avg[i] + perturbation_vect[i] for i in range(len(params_avg)) + ] + corrupted_params = ndarrays_to_parameters(corrupted_params) + for proxy, fitres in ordered_results: + if states[fitres.metrics["cid"]]: + fitres.parameters = corrupted_params + results[int(fitres.metrics["cid"])] = (proxy, fitres) + return results, {} + + +def _krum(results, num_malicious, to_keep, num_closest=None): + """Get the best parameters vector according to the Krum function. + + Output: the best parameters vector. + """ + weights = [w for w, _ in results] # list of weights + distance_matrix = _compute_distances(weights) # matrix of distances + + if not num_closest: + num_closest = ( + len(weights) - num_malicious - 2 + ) # number of closest points to use + if num_closest <= 0: + num_closest = 1 + elif num_closest > len(weights): + num_closest = len(weights) + + closest_indices = _get_closest_indices( + distance_matrix, num_closest + ) # indices of closest points + + scores = [ + np.sum(distance_matrix[i, closest_indices[i]]) + for i in range(len(distance_matrix)) + ] # scores i->j for each i + + best_index = np.argmin(scores) # index of the best score + best_indices = np.argsort(scores)[::-1][ + len(scores) - to_keep : + ] # indices of best scores (multikrum) + return weights[best_index], best_index, best_indices, scores + + +def _compute_distances(weights): + """Compute distances between vectors. + + Input: weights - list of weights vectors + Output: distances - matrix distance_matrix of squared distances between the vectors + """ + flat_w = np.array([np.concatenate(par, axis=None).ravel() for par in weights]) + distance_matrix = np.zeros((len(weights), len(weights))) + for i, _ in enumerate(flat_w): + for j, _ in enumerate(flat_w): + delta = flat_w[i] - flat_w[j] + dist = np.linalg.norm(delta) + distance_matrix[i, j] = dist**2 + return distance_matrix + + +def _get_closest_indices(distance_matrix, num_closest): + """Get the indices of the closest points. + + Args: + distance_matrix + matrix of distances + num_closest + number of closest points to get for each parameter vector + Output: + closest_indices + list of lists of indices of the closest points for each vector. + """ + closest_indices = [] + for idx, _ in enumerate(distance_matrix): + closest_indices.append( + np.argsort(distance_matrix[idx])[1 : num_closest + 1].tolist() + ) + return closest_indices + + +def _fang_corrupt_params(global_model, lamda): + # Compute sign vector num_supporters + magnitude = [] + for i, _ in enumerate(global_model): + magnitude.append(np.sign(global_model[i]) * lamda) + + corrupted_params = [ + global_model[i] - magnitude[i] for i in range(len(global_model)) + ] # corrupted model + return corrupted_params + + +def _fang_corrupt_and_select(all_models, global_model, states, num_corrupted, lamda): + # Check that krum selects a malicious client + corrupted_params = _fang_corrupt_params(global_model, lamda) + all_models_m = [ + (corrupted_params, num_examples) if states[str(i)] else (model, num_examples) + for i, (model, num_examples) in enumerate(all_models) + ] + _, idx_best_model, _, _ = _krum(all_models_m, num_corrupted, 1) + + # Check if the best model is malicious + malicious_selected = states[str(idx_best_model)] + return malicious_selected, corrupted_params diff --git a/baselines/flanders/flanders/client.py b/baselines/flanders/flanders/client.py new file mode 100644 index 000000000000..57513ccf7291 --- /dev/null +++ b/baselines/flanders/flanders/client.py @@ -0,0 +1,174 @@ +"""Clients implementation for Flanders.""" + +from collections import OrderedDict +from pathlib import Path +from typing import Tuple + +import flwr as fl +import numpy as np +import ray +import torch + +from .dataset import get_dataloader, mnist_transformation +from .models import ( + FMnistNet, + MnistNet, + test_fmnist, + test_mnist, + train_fmnist, + train_mnist, +) + +XY = Tuple[np.ndarray, np.ndarray] + + +def get_params(model): + """Get model weights as a list of NumPy ndarrays.""" + return [val.cpu().numpy() for _, val in model.state_dict().items()] + + +def set_params(model, params): + """Set model weights from a list of NumPy ndarrays.""" + params_dict = zip(model.state_dict().keys(), params) + state_dict = OrderedDict({k: torch.from_numpy(np.copy(v)) for k, v in params_dict}) + model.load_state_dict(state_dict, strict=True) + + +class MnistClient(fl.client.NumPyClient): + """Implementation of MNIST image classification using PyTorch.""" + + def __init__(self, cid, fed_dir_data): + """Instantiate a client for the MNIST dataset.""" + self.cid = cid + self.fed_dir = Path(fed_dir_data) + self.properties = {"tensor_type": "numpy.ndarray"} + + # Instantiate model + self.net = MnistNet() + + # Determine device + # self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + if torch.cuda.is_available(): + self.device = torch.device("cuda") + elif torch.backends.mps.is_available(): + self.device = torch.device("mps") + else: + self.device = torch.device("cpu") + + def get_parameters(self, config): + """Get model parameters as a list of NumPy ndarrays.""" + return get_params(self.net) + + def fit(self, parameters, config): + """Set model parameters from a list of NumPy ndarrays.""" + set_params(self.net, parameters) + + # Load data for this client and get trainloader + num_workers = 1 + trainloader = get_dataloader( + self.fed_dir, + self.cid, + is_train=True, + batch_size=config["batch_size"], + workers=num_workers, + transform=mnist_transformation, + ) + + self.net.to(self.device) + train_mnist(self.net, trainloader, epochs=config["epochs"], device=self.device) + + return ( + get_params(self.net), + len(trainloader.dataset), + {"cid": self.cid, "malicious": config["malicious"]}, + ) + + def evaluate(self, parameters, config): + """Evaluate using local test dataset.""" + set_params(self.net, parameters) + + # Load data for this client and get trainloader + num_workers = len(ray.worker.get_resource_ids()["CPU"]) + valloader = get_dataloader( + self.fed_dir, + self.cid, + is_train=False, + batch_size=50, + workers=num_workers, + transform=mnist_transformation, + ) + + self.net.to(self.device) + loss, accuracy = test_mnist(self.net, valloader, device=self.device) + + return float(loss), len(valloader.dataset), {"accuracy": float(accuracy)} + + +class FMnistClient(fl.client.NumPyClient): + """Implementation of MNIST image classification using PyTorch.""" + + def __init__(self, cid, fed_dir_data): + """Instantiate a client for the MNIST dataset.""" + self.cid = cid + self.fed_dir = Path(fed_dir_data) + self.properties = {"tensor_type": "numpy.ndarray"} + + # Instantiate model + self.net = FMnistNet() + + # Determine device + # self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + if torch.cuda.is_available(): + self.device = torch.device("cuda") + elif torch.backends.mps.is_available(): + self.device = torch.device("mps") + else: + self.device = torch.device("cpu") + + def get_parameters(self, config): + """Get model parameters as a list of NumPy ndarrays.""" + return get_params(self.net) + + def fit(self, parameters, config): + """Set model parameters from a list of NumPy ndarrays.""" + set_params(self.net, parameters) + + # Load data for this client and get trainloader + num_workers = 1 + trainloader = get_dataloader( + self.fed_dir, + self.cid, + is_train=True, + batch_size=config["batch_size"], + workers=num_workers, + transform=mnist_transformation, + ) + + self.net.to(self.device) + train_fmnist(self.net, trainloader, epochs=config["epochs"], device=self.device) + + return ( + get_params(self.net), + len(trainloader.dataset), + {"cid": self.cid, "malicious": config["malicious"]}, + ) + + def evaluate(self, parameters, config): + """Evaluate using local test dataset.""" + set_params(self.net, parameters) + + # Load data for this client and get trainloader + num_workers = len(ray.worker.get_resource_ids()["CPU"]) + valloader = get_dataloader( + self.fed_dir, + self.cid, + is_train=False, + batch_size=50, + workers=num_workers, + transform=mnist_transformation, + ) + + self.net.to(self.device) + loss, accuracy = test_fmnist(self.net, valloader, device=self.device) + + return float(loss), len(valloader.dataset), {"accuracy": float(accuracy)} diff --git a/baselines/flanders/flanders/conf/aggregate_fn/bulyan.yaml b/baselines/flanders/flanders/conf/aggregate_fn/bulyan.yaml new file mode 100644 index 000000000000..1361f158daf1 --- /dev/null +++ b/baselines/flanders/flanders/conf/aggregate_fn/bulyan.yaml @@ -0,0 +1,9 @@ +--- +name: bulyan + +aggregate_fn: + function: flwr.server.strategy.aggregate.aggregate_bulyan + parameters: + aggregation_name: aggregate_krum + aggregation_module_name: flwr.server.strategy.aggregate + to_keep: 0 # if 0, normal Krum is applied \ No newline at end of file diff --git a/baselines/flanders/flanders/conf/aggregate_fn/fedavg.yaml b/baselines/flanders/flanders/conf/aggregate_fn/fedavg.yaml new file mode 100644 index 000000000000..826a4163b2eb --- /dev/null +++ b/baselines/flanders/flanders/conf/aggregate_fn/fedavg.yaml @@ -0,0 +1,6 @@ +--- +name: fedavg + +aggregate_fn: + function: flwr.server.strategy.aggregate.aggregate + parameters: {} \ No newline at end of file diff --git a/baselines/flanders/flanders/conf/aggregate_fn/fedmedian.yaml b/baselines/flanders/flanders/conf/aggregate_fn/fedmedian.yaml new file mode 100644 index 000000000000..7bf0a725ab6f --- /dev/null +++ b/baselines/flanders/flanders/conf/aggregate_fn/fedmedian.yaml @@ -0,0 +1,6 @@ +--- +name: fedmedian + +aggregate_fn: + function: flwr.server.strategy.aggregate.aggregate_median + parameters: {} \ No newline at end of file diff --git a/baselines/flanders/flanders/conf/aggregate_fn/krum.yaml b/baselines/flanders/flanders/conf/aggregate_fn/krum.yaml new file mode 100644 index 000000000000..220b93d92b3e --- /dev/null +++ b/baselines/flanders/flanders/conf/aggregate_fn/krum.yaml @@ -0,0 +1,7 @@ +--- +name: krum + +aggregate_fn: + function: flwr.server.strategy.aggregate.aggregate_krum + parameters: + to_keep: 10 \ No newline at end of file diff --git a/baselines/flanders/flanders/conf/aggregate_fn/trimmedmean.yaml b/baselines/flanders/flanders/conf/aggregate_fn/trimmedmean.yaml new file mode 100644 index 000000000000..d2e418fa9738 --- /dev/null +++ b/baselines/flanders/flanders/conf/aggregate_fn/trimmedmean.yaml @@ -0,0 +1,7 @@ +--- +name: trimmedmean + +aggregate_fn: + function: flwr.server.strategy.aggregate.aggregate_trimmed_avg + parameters: + proportiontocut: 0.4 \ No newline at end of file diff --git a/baselines/flanders/flanders/conf/base.yaml b/baselines/flanders/flanders/conf/base.yaml new file mode 100644 index 000000000000..9742d85e2af8 --- /dev/null +++ b/baselines/flanders/flanders/conf/base.yaml @@ -0,0 +1,27 @@ +defaults: + - _self_ + - strategy: fedavg + - aggregate_fn: fedavg + +dataset: mnist + +server: + _target_: flanders.server.EnhancedServer + num_rounds: 100 + pool_size: 100 + warmup_rounds: 2 + sampling: 500 + history_dir: clients_params + magnitude: 10 + threshold: 1e-05 + attack_fn: gaussian + num_malicious: 0 + omniscent: True + noniidness: 0.5 + +server_device: cpu +seed: 33 + +client_resources: + num_cpus: 1 + num_gpus: 0 \ No newline at end of file diff --git a/baselines/flanders/flanders/conf/strategy/bulyan.yaml b/baselines/flanders/flanders/conf/strategy/bulyan.yaml new file mode 100644 index 000000000000..1692d5d4306c --- /dev/null +++ b/baselines/flanders/flanders/conf/strategy/bulyan.yaml @@ -0,0 +1,8 @@ +--- +name: bulyan + +strategy: + _target_: flwr.server.strategy.Bulyan + _recursive_: true + num_malicious_clients: $(server.num_malicious) + to_keep: 0 # Normal Krum is applied \ No newline at end of file diff --git a/baselines/flanders/flanders/conf/strategy/fedavg.yaml b/baselines/flanders/flanders/conf/strategy/fedavg.yaml new file mode 100644 index 000000000000..1be4b0a0cc5b --- /dev/null +++ b/baselines/flanders/flanders/conf/strategy/fedavg.yaml @@ -0,0 +1,5 @@ +--- +name: fedavg + +strategy: + _target_: flwr.server.strategy.FedAvg \ No newline at end of file diff --git a/baselines/flanders/flanders/conf/strategy/fedmedian.yaml b/baselines/flanders/flanders/conf/strategy/fedmedian.yaml new file mode 100644 index 000000000000..d79293f4ca23 --- /dev/null +++ b/baselines/flanders/flanders/conf/strategy/fedmedian.yaml @@ -0,0 +1,5 @@ +--- +name: fedmedian + +strategy: + _target_: flwr.server.strategy.FedMedian \ No newline at end of file diff --git a/baselines/flanders/flanders/conf/strategy/flanders.yaml b/baselines/flanders/flanders/conf/strategy/flanders.yaml new file mode 100644 index 000000000000..0222708dd836 --- /dev/null +++ b/baselines/flanders/flanders/conf/strategy/flanders.yaml @@ -0,0 +1,10 @@ +--- +name: flanders + +strategy: + _target_: flanders.strategy.Flanders + _recursive_: true + num_clients_to_keep: 3 # number of benign local models to filter-out before the aggregation (atm it's set to be pool_size - num_malicious, hard coded in main.py) + maxiter: 100 # number of iterations done by MAR + alpha: 1 + beta: 1 \ No newline at end of file diff --git a/baselines/flanders/flanders/conf/strategy/krum.yaml b/baselines/flanders/flanders/conf/strategy/krum.yaml new file mode 100644 index 000000000000..bc36d37755fa --- /dev/null +++ b/baselines/flanders/flanders/conf/strategy/krum.yaml @@ -0,0 +1,7 @@ +--- +name: krum + +strategy: + _target_: flwr.server.strategy.Krum + num_clients_to_keep: 3 + num_malicious_clients: ${server.num_malicious} \ No newline at end of file diff --git a/baselines/flanders/flanders/conf/strategy/trimmedmean.yaml b/baselines/flanders/flanders/conf/strategy/trimmedmean.yaml new file mode 100644 index 000000000000..561755f82d35 --- /dev/null +++ b/baselines/flanders/flanders/conf/strategy/trimmedmean.yaml @@ -0,0 +1,6 @@ +--- +name: trimmedmean + +strategy: + _target_: flwr.server.strategy.FedTrimmedAvg + beta: 0.2 \ No newline at end of file diff --git a/baselines/flanders/flanders/dataset.py b/baselines/flanders/flanders/dataset.py new file mode 100644 index 000000000000..2c13e80d75c5 --- /dev/null +++ b/baselines/flanders/flanders/dataset.py @@ -0,0 +1,289 @@ +"""Dataset utilities for FL experiments.""" + +# Borrowed from adap/Flower examples + +import shutil +from pathlib import Path +from typing import Any, Callable, Optional, Tuple + +import numpy as np +import torch +from PIL import Image +from torch.utils.data import DataLoader, SubsetRandomSampler +from torchvision import datasets, transforms +from torchvision.datasets import VisionDataset + +from .dataset_preparation import create_lda_partitions + + +class Data(torch.utils.data.Dataset): + """Dataset class.""" + + def __init__(self, X, y): + """Initialize dataset.""" + self.X = torch.from_numpy(X.astype(np.float32)) + self.y = torch.from_numpy(y.astype(np.float32)) + self.len = self.X.shape[0] + + def __getitem__(self, index): + """Return data and label pair.""" + return self.X[index], self.y[index] + + def __len__(self): + """Return size of dataset.""" + return self.len + + +def get_dataset(path_to_data: Path, cid: str, partition: str, transform=None): + """Return TorchVisionFL dataset object.""" + # generate path to cid's data + path_to_data = path_to_data / cid / (partition + ".pt") + + return TorchVisionFL(path_to_data, transform=transform) + + +# pylint: disable=too-many-arguments, too-many-locals +def get_dataloader( + path_to_data: str, + cid: str, + is_train: bool, + batch_size: int, + workers: int, + transform=None, +): + """Generate trainset/valset object and returns appropiate dataloader.""" + partition = "train" if is_train else "val" + dataset = get_dataset(Path(path_to_data), str(cid), partition, transform=transform) + + # we use as number of workers all the cpu cores assigned to this actor + kwargs = {"num_workers": workers, "pin_memory": True, "drop_last": False} + return DataLoader(dataset, batch_size=batch_size, **kwargs) + + +def get_random_id_splits(total: int, val_ratio: float, shuffle: bool = True): + """Random split. + + Split a list of length `total` into two following a (1-val_ratio):val_ratio + partitioning. + + By default the indices are shuffled before creating the split and returning. + """ + if isinstance(total, int): + indices = list(range(total)) + else: + indices = total + + split = int(np.floor(val_ratio * len(indices))) + # print(f"Users left out for validation (ratio={val_ratio}) = {split} ") + if shuffle: + np.random.shuffle(indices) + return indices[split:], indices[:split] + + +# pylint: disable=too-many-arguments, too-many-locals +def do_fl_partitioning( + path_to_dataset, pool_size, alpha, num_classes, val_ratio=0.0, seed=None +): + """Torchvision (e.g. CIFAR-10) datasets using LDA.""" + images, labels = torch.load(path_to_dataset) + idx = np.array(range(len(images))) + dataset = [idx, labels] + partitions, _ = create_lda_partitions( + dataset, + num_partitions=pool_size, + concentration=alpha, + accept_imbalanced=True, + seed=seed, + ) + + # Show label distribution for first partition (purely informative) + partition_zero = partitions[0][1] + hist, _ = np.histogram(partition_zero, bins=list(range(num_classes + 1))) + print( + "Class histogram for 0-th partition" + f"(alpha={alpha}, {num_classes} classes): {hist}" + ) + + # now save partitioned dataset to disk + # first delete dir containing splits (if exists), then create it + splits_dir = path_to_dataset.parent / "federated" + if splits_dir.exists(): + shutil.rmtree(splits_dir) + Path.mkdir(splits_dir, parents=True) + + for idx in range(pool_size): + labels = partitions[idx][1] + image_idx = partitions[idx][0] + imgs = images[image_idx] + + # create dir + Path.mkdir(splits_dir / str(idx)) + + if val_ratio > 0.0: + # split data according to val_ratio + train_idx, val_idx = get_random_id_splits(len(labels), val_ratio) + val_imgs = imgs[val_idx] + val_labels = labels[val_idx] + + with open(splits_dir / str(idx) / "val.pt", "wb") as fil: + torch.save([val_imgs, val_labels], fil) + + # remaining images for training + imgs = imgs[train_idx] + labels = labels[train_idx] + + with open(splits_dir / str(idx) / "train.pt", "wb") as fil: + torch.save([imgs, labels], fil) + + return splits_dir + + +def mnist_transformation(img): + """Return TorchVision transformation for MNIST.""" + return transforms.Compose( + [ + transforms.ToTensor(), + transforms.Normalize(mean=(0.5,), std=(0.5,)), + ] + )(img) + + +class TorchVisionFL(VisionDataset): + """TorchVision FL class. + + Use this class by either passing a path to a torch file (.pt) containing (data, + targets) or pass the data, targets directly instead. + + This is just a trimmed down version of torchvision.datasets.MNIST. + """ + + def __init__( + self, + path_to_data=None, + data=None, + targets=None, + transform: Optional[Callable] = None, + ) -> None: + """Initialize dataset.""" + path = path_to_data.parent if path_to_data else None + super().__init__(path, transform=transform) + self.transform = transform + + if path_to_data: + # load data and targets (path_to_data points to an specific .pt file) + self.data, self.targets = torch.load(path_to_data) + else: + self.data = data + self.targets = targets + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + """Return a tuple (data, target).""" + img, target = self.data[index], int(self.targets[index]) + + # doing this so that it is consistent with all other datasets + # to return a PIL Image + if not isinstance(img, Image.Image): # if not PIL image + if not isinstance(img, np.ndarray): # if torch tensor + img = img.numpy() + + img = Image.fromarray(img) + + if self.transform is not None: + img = self.transform(img) + + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target + + def __len__(self) -> int: + """Return length of dataset.""" + return len(self.data) + + +def get_mnist(path_to_data="flanders/datasets_files/mnist/data"): + """Download MNIST dataset.""" + # download dataset and load train set + train_set = datasets.MNIST(root=path_to_data, train=True, download=True) + + # fuse all data splits into a single "training.pt" + data_loc = Path(path_to_data) / "MNIST" + training_data = data_loc / "training.pt" + print("Generating unified MNIST dataset") + torch.save([train_set.data, np.array(train_set.targets)], training_data) + + test_set = datasets.MNIST( + root=path_to_data, train=False, transform=mnist_transformation + ) + + # returns path where training data is and testset + return training_data, test_set + + +def get_fmnist(path_to_data="flanders/datasets_files/fmnist/data"): + """Download FashionMNIST dataset.""" + # download dataset and load train set + train_set = datasets.FashionMNIST(root=path_to_data, train=True, download=True) + + # fuse all data splits into a single "training.pt" + data_loc = Path(path_to_data) / "FashionMNIST" + training_data = data_loc / "training.pt" + print("Generating unified FashionMNIST dataset") + torch.save([train_set.data, np.array(train_set.targets)], training_data) + + test_set = datasets.FashionMNIST( + root=path_to_data, train=False, transform=mnist_transformation + ) + + # returns path where training data is and testset + return training_data, test_set + + +def dataset_partitioner( + dataset: torch.utils.data.Dataset, + batch_size: int, + client_id: int, + number_of_clients: int, + workers: int = 1, +) -> torch.utils.data.DataLoader: + """Make datasets partitions for a specific client_id. + + Parameters + ---------- + dataset: torch.utils.data.Dataset + Dataset to be partitioned into *number_of_clients* subsets. + batch_size: int + Size of mini-batches used by the returned DataLoader. + client_id: int + Unique integer used for selecting a specific partition. + number_of_clients: int + Total number of clients launched during training. + This value dictates the number of partitions to be created. + + Returns + ------- + data_loader: torch.utils.data.Dataset + DataLoader for specific client_id considering number_of_clients partitions. + """ + # Set the seed so we are sure to generate the same global batches + # indices across all clients + np.random.seed(123) + + # Get the data corresponding to this client + dataset_size = len(dataset) + nb_samples_per_clients = dataset_size // number_of_clients + dataset_indices = list(range(dataset_size)) + np.random.shuffle(dataset_indices) + + # Get starting and ending indices w.r.t CLIENT_ID + start_ind = int(client_id) * nb_samples_per_clients + end_ind = start_ind + nb_samples_per_clients + data_sampler = SubsetRandomSampler(dataset_indices[start_ind:end_ind]) + data_loader = torch.utils.data.DataLoader( + dataset, + batch_size=batch_size, + shuffle=False, + sampler=data_sampler, + num_workers=workers, + ) + return data_loader diff --git a/baselines/flanders/flanders/dataset_preparation.py b/baselines/flanders/flanders/dataset_preparation.py new file mode 100644 index 000000000000..3c1cfbe6a5d2 --- /dev/null +++ b/baselines/flanders/flanders/dataset_preparation.py @@ -0,0 +1,490 @@ +# Copyright 2020 Adap GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Commonly used functions for generating partitioned datasets.""" + +# pylint: disable=invalid-name + +from typing import List, Optional, Tuple, Union + +import numpy as np +from numpy.random import BitGenerator, Generator, SeedSequence + +XY = Tuple[np.ndarray, np.ndarray] +XYList = List[XY] +PartitionedDataset = Tuple[XYList, XYList] + + +def float_to_int(i: float) -> int: + """Return float as int but raise if decimal is dropped.""" + if not i.is_integer(): + raise Exception("Cast would drop decimals") + + return int(i) + + +def sort_by_label(x: np.ndarray, y: np.ndarray) -> XY: + """Sort by label. + + Assuming two labels and four examples the resulting label order would be 1,1,2,2 + """ + idx = np.argsort(y, axis=0).reshape((y.shape[0])) + return (x[idx], y[idx]) + + +def sort_by_label_repeating(x: np.ndarray, y: np.ndarray) -> XY: + """Sort by label in repeating groups. + + Assuming two labels and four examples the resulting label order would be 1,2,1,2. + + Create sorting index which is applied to by label sorted x, y + + .. code-block:: python + + # given: + y = [ + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9 + ] + + # use: + idx = [ + 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19 + ] + + # so that y[idx] becomes: + y = [ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 + ] + """ + x, y = sort_by_label(x, y) + + num_example = x.shape[0] + num_class = np.unique(y).shape[0] + idx = ( + np.array(range(num_example), np.int64) + .reshape((num_class, num_example // num_class)) + .transpose() + .reshape(num_example) + ) + + return (x[idx], y[idx]) + + +def split_at_fraction(x: np.ndarray, y: np.ndarray, fraction: float) -> Tuple[XY, XY]: + """Split x, y at a certain fraction.""" + splitting_index = float_to_int(x.shape[0] * fraction) + # Take everything BEFORE splitting_index + x_0, y_0 = x[:splitting_index], y[:splitting_index] + # Take everything AFTER splitting_index + x_1, y_1 = x[splitting_index:], y[splitting_index:] + return (x_0, y_0), (x_1, y_1) + + +def shuffle(x: np.ndarray, y: np.ndarray) -> XY: + """Shuffle x and y.""" + idx = np.random.permutation(len(x)) + return x[idx], y[idx] + + +def partition(x: np.ndarray, y: np.ndarray, num_partitions: int) -> List[XY]: + """Return x, y as list of partitions.""" + return list(zip(np.split(x, num_partitions), np.split(y, num_partitions))) + + +def combine_partitions(xy_list_0: XYList, xy_list_1: XYList) -> XYList: + """Combine two lists of ndarray Tuples into one list.""" + return [ + (np.concatenate([x_0, x_1], axis=0), np.concatenate([y_0, y_1], axis=0)) + for (x_0, y_0), (x_1, y_1) in zip(xy_list_0, xy_list_1) + ] + + +def create_partitions( + unpartitioned_dataset: XY, + iid_fraction: float, + num_partitions: int, +) -> XYList: + """Create partitioned version of a training or test set. + + Currently tested and supported are MNIST and FashionMNIST + """ + x, y = unpartitioned_dataset + + x, y = shuffle(x, y) + x, y = sort_by_label_repeating(x, y) + + (x_0, y_0), (x_1, y_1) = split_at_fraction(x, y, fraction=iid_fraction) + + # Shift in second split of dataset the classes into two groups + x_1, y_1 = _shift(x_1, y_1) + + xy_0_partitions = partition(x_0, y_0, num_partitions) + xy_1_partitions = partition(x_1, y_1, num_partitions) + + xy_partitions = combine_partitions(xy_0_partitions, xy_1_partitions) + + # Adjust x and y shape + return [adjust_xy_shape(xy) for xy in xy_partitions] + + +def create_partitioned_dataset( + keras_dataset: Tuple[XY, XY], + iid_fraction: float, + num_partitions: int, +) -> Tuple[PartitionedDataset, XY]: + """Create partitioned version of keras dataset. + + Currently tested and supported are MNIST and FashionMNIST + """ + xy_train, xy_test = keras_dataset + + xy_train_partitions = create_partitions( + unpartitioned_dataset=xy_train, + iid_fraction=iid_fraction, + num_partitions=num_partitions, + ) + + xy_test_partitions = create_partitions( + unpartitioned_dataset=xy_test, + iid_fraction=iid_fraction, + num_partitions=num_partitions, + ) + + return (xy_train_partitions, xy_test_partitions), adjust_xy_shape(xy_test) + + +def log_distribution(xy_partitions: XYList) -> None: + """Print label distribution for list of paritions.""" + distro = [np.unique(y, return_counts=True) for _, y in xy_partitions] + for d in distro: + print(d) + + +def adjust_xy_shape(xy: XY) -> XY: + """Adjust shape of both x and y.""" + x, y = xy + if x.ndim == 3: + x = adjust_x_shape(x) + if y.ndim == 2: + y = adjust_y_shape(y) + return (x, y) + + +def adjust_x_shape(nda: np.ndarray) -> np.ndarray: + """Turn shape (x, y, z) into (x, y, z, 1).""" + nda_adjusted = np.reshape(nda, (nda.shape[0], nda.shape[1], nda.shape[2], 1)) + return nda_adjusted + + +def adjust_y_shape(nda: np.ndarray) -> np.ndarray: + """Turn shape (x, 1) into (x).""" + nda_adjusted = np.reshape(nda, (nda.shape[0])) + return nda_adjusted + + +def split_array_at_indices( + x: np.ndarray, split_idx: np.ndarray +) -> List[List[np.ndarray]]: + """Split an array `x` into list of elements using starting indices from `split_idx`. + + This function should be used with `unique_indices` from `np.unique()` after + sorting by label. + + Args: + x (np.ndarray): Original array of dimension (N,a,b,c,...) + split_idx (np.ndarray): 1-D array contaning increasing number of + indices to be used as partitions. Initial value must be zero. Last value + must be less than N. + + Returns + ------- + List[List[np.ndarray]]: List of list of samples. + """ + if split_idx.ndim != 1: + raise ValueError("Variable `split_idx` must be a 1-D numpy array.") + if split_idx.dtype != np.int64: + raise ValueError("Variable `split_idx` must be of type np.int64.") + if split_idx[0] != 0: + raise ValueError("First value of `split_idx` must be 0.") + if split_idx[-1] >= x.shape[0]: + raise ValueError( + """Last value in `split_idx` must be less than + the number of samples in `x`.""" + ) + if not np.all(split_idx[:-1] <= split_idx[1:]): + raise ValueError("Items in `split_idx` must be in increasing order.") + + num_splits: int = len(split_idx) + split_idx = np.append(split_idx, x.shape[0]) + + list_samples_split: List[List[np.ndarray]] = [[] for _ in range(num_splits)] + for j in range(num_splits): + tmp_x = x[split_idx[j] : split_idx[j + 1]] # noqa: E203 + for sample in tmp_x: + list_samples_split[j].append(sample) + + return list_samples_split + + +def exclude_classes_and_normalize( + distribution: np.ndarray, exclude_dims: List[bool], eps: float = 1e-5 +) -> np.ndarray: + """Exclude classes from a distribution. + + This function is particularly useful when sampling without replacement. + Classes for which no sample is available have their probabilities are set to 0. + Classes that had probabilities originally set to 0 are incremented with + `eps` to allow sampling from remaining items. + + Args: + distribution (np.array): Distribution being used. + exclude_dims (List[bool]): Dimensions to be excluded. + eps (float, optional): Small value to be addad to non-excluded dimensions. + Defaults to 1e-5. + + Returns + ------- + np.ndarray: Normalized distributions. + """ + if np.any(distribution < 0) or (not np.isclose(np.sum(distribution), 1.0)): + raise ValueError("distribution must sum to 1 and have only positive values.") + + if distribution.size != len(exclude_dims): + raise ValueError( + """Length of distribution must be equal + to the length `exclude_dims`.""" + ) + if eps < 0: + raise ValueError("""The value of `eps` must be positive and small.""") + + distribution[[not x for x in exclude_dims]] += eps + distribution[exclude_dims] = 0.0 + sum_rows = np.sum(distribution) + np.finfo(float).eps + distribution = distribution / sum_rows + + return distribution + + +def sample_without_replacement( + distribution: np.ndarray, + list_samples: List[List[np.ndarray]], + num_samples: int, + empty_classes: List[bool], +) -> Tuple[XY, List[bool]]: + """Sample from a list without replacement using a given distribution. + + Args: + distribution (np.ndarray): Distribution used for sampling. + list_samples(List[List[np.ndarray]]): List of samples. + num_samples (int): Total number of items to be sampled. + empty_classes (List[bool]): List of booleans indicating which classes are empty. + This is useful to differentiate which classes should still be sampled. + + Returns + ------- + XY: Dataset contaning samples + List[bool]: empty_classes. + """ + if np.sum([len(x) for x in list_samples]) < num_samples: + raise ValueError( + """Number of samples in `list_samples` is less than `num_samples`""" + ) + + # Make sure empty classes are not sampled + # and solves for rare cases where + if not empty_classes: + empty_classes = len(distribution) * [False] + + distribution = exclude_classes_and_normalize( + distribution=distribution, exclude_dims=empty_classes + ) + + data: List[np.ndarray] = [] + target: List[np.ndarray] = [] + + for _ in range(num_samples): + sample_class = np.where(np.random.multinomial(1, distribution) == 1)[0][0] + sample: np.ndarray = list_samples[sample_class].pop() + + data.append(sample) + target.append(sample_class) + + # If last sample of the class was drawn, then set the + # probability density function (PDF) to zero for that class. + if len(list_samples[sample_class]) == 0: + empty_classes[sample_class] = True + # Be careful to distinguish between classes that had zero probability + # and classes that are now empty + distribution = exclude_classes_and_normalize( + distribution=distribution, exclude_dims=empty_classes + ) + data_array: np.ndarray = np.concatenate([data], axis=0) + target_array: np.ndarray = np.array(target, dtype=np.int64) + + return (data_array, target_array), empty_classes + + +def get_partitions_distributions(partitions: XYList) -> Tuple[np.ndarray, List[int]]: + """Evaluate the distribution over classes for a set of partitions. + + Args: + partitions (XYList): Input partitions + + Returns + ------- + np.ndarray: Distributions of size (num_partitions, num_classes) + """ + # Get largest available label + labels = set() + for _, y in partitions: + labels.update(set(y)) + list_labels = sorted(labels) + bin_edges = np.arange(len(list_labels) + 1) + + # Pre-allocate distributions + distributions = np.zeros((len(partitions), len(list_labels)), dtype=np.float32) + for idx, (_, _y) in enumerate(partitions): + hist, _ = np.histogram(_y, bin_edges) + distributions[idx] = hist / hist.sum() + + return distributions, list_labels + + +def create_lda_partitions( + dataset: XY, + dirichlet_dist: Optional[np.ndarray] = None, + num_partitions: int = 100, + concentration: Union[float, np.ndarray, List[float]] = 0.5, + accept_imbalanced: bool = False, + seed: Optional[Union[int, SeedSequence, BitGenerator, Generator]] = None, +) -> Tuple[XYList, np.ndarray]: + r"""Create imbalanced non-iid partitions. + + Create imbalanced non-iid partitions using Latent Dirichlet Allocation (LDA) + without resampling. + + Args: + dataset (XY): Dataset containing samples X and labels Y. + dirichlet_dist (numpy.ndarray, optional): previously generated distribution to + be used. This is useful when applying the same distribution for train and + validation sets. + num_partitions (int, optional): Number of partitions to be created. + Defaults to 100. + concentration (float, np.ndarray, List[float]): Dirichlet Concentration + (:math:`\\alpha`) parameter. Set to float('inf') to get uniform partitions. + An :math:`\\alpha \\to \\Inf` generates uniform distributions over classes. + An :math:`\\alpha \\to 0.0` generates one class per client. Defaults to 0.5. + accept_imbalanced (bool): Whether or not to accept imbalanced output classes. + Default False. + seed (None, int, SeedSequence, BitGenerator, Generator): + A seed to initialize the BitGenerator for generating the Dirichlet + distribution. This is defined in Numpy's official documentation as follows: + If None, then fresh, unpredictable entropy will be pulled from the OS. + One may also pass in a SeedSequence instance. + Additionally, when passed a BitGenerator, it will be wrapped by Generator. + If passed a Generator, it will be returned unaltered. + See official Numpy Documentation for further details. + + Returns + ------- + Tuple[XYList, numpy.ndarray]: List of XYList containing partitions + for each dataset and the dirichlet probability density functions. + """ + # pylint: disable=too-many-arguments,too-many-locals + + x, y = dataset + x, y = shuffle(x, y) + x, y = sort_by_label(x, y) + + if (x.shape[0] % num_partitions) and (not accept_imbalanced): + raise ValueError( + """Total number of samples must be a multiple of `num_partitions`. + If imbalanced classes are allowed, set + `accept_imbalanced=True`.""" + ) + + num_samples = num_partitions * [0] + for j in range(x.shape[0]): + num_samples[j % num_partitions] += 1 + + # Get number of classes and verify if they matching with + classes, start_indices = np.unique(y, return_index=True) + + # Make sure that concentration is np.array and + # check if concentration is appropriate + concentration = np.asarray(concentration) + + # Check if concentration is Inf, if so create uniform partitions + partitions: List[XY] = [(_, _) for _ in range(num_partitions)] + if float("inf") in concentration: + partitions = create_partitions( + unpartitioned_dataset=(x, y), + iid_fraction=1.0, + num_partitions=num_partitions, + ) + dirichlet_dist = get_partitions_distributions(partitions)[0] + + return partitions, dirichlet_dist + + if concentration.size == 1: + concentration = np.repeat(concentration, classes.size) + elif concentration.size != classes.size: # Sequence + raise ValueError( + f"The size of the provided concentration ({concentration.size}) ", + f"must be either 1 or equal number of classes {classes.size})", + ) + + # Split into list of list of samples per class + list_samples_per_class: List[List[np.ndarray]] = split_array_at_indices( + x, start_indices + ) + + if dirichlet_dist is None: + dirichlet_dist = np.random.default_rng(seed).dirichlet( + alpha=concentration, size=num_partitions + ) + + if dirichlet_dist.size != 0: + if dirichlet_dist.shape != (num_partitions, classes.size): + raise ValueError( + f"""The shape of the provided dirichlet distribution + ({dirichlet_dist.shape}) must match the provided number + of partitions and classes ({num_partitions},{classes.size})""" + ) + + # Assuming balanced distribution + empty_classes = classes.size * [False] + for partition_id in range(num_partitions): + partitions[partition_id], empty_classes = sample_without_replacement( + distribution=dirichlet_dist[partition_id].copy(), + list_samples=list_samples_per_class, + num_samples=num_samples[partition_id], + empty_classes=empty_classes, + ) + + return partitions, dirichlet_dist + + +def _shift(x: np.ndarray, y: np.ndarray) -> XY: + """Shift data. + + Shift x_1, y_1 so that the first half contains only labels 0 to 4 and the second + half 5 to 9. + """ + x, y = sort_by_label(x, y) + + (x_0, y_0), (x_1, y_1) = split_at_fraction(x, y, fraction=0.5) + (x_0, y_0), (x_1, y_1) = shuffle(x_0, y_0), shuffle(x_1, y_1) + x, y = np.concatenate([x_0, x_1], axis=0), np.concatenate([y_0, y_1], axis=0) + return x, y diff --git a/baselines/flanders/flanders/main.py b/baselines/flanders/flanders/main.py new file mode 100644 index 000000000000..022c38b1ef32 --- /dev/null +++ b/baselines/flanders/flanders/main.py @@ -0,0 +1,279 @@ +"""FLANDERS main scrip.""" + +import importlib +import os +import random +import shutil + +import flwr as fl +import hydra +import numpy as np +import pandas as pd +import torch +from flwr.server.client_manager import SimpleClientManager +from hydra.core.hydra_config import HydraConfig +from hydra.utils import instantiate +from omegaconf import DictConfig, OmegaConf + +from .attacks import fang_attack, gaussian_attack, lie_attack, minmax_attack, no_attack +from .client import FMnistClient, MnistClient +from .dataset import do_fl_partitioning, get_fmnist, get_mnist +from .server import EnhancedServer +from .utils import fmnist_evaluate, l2_norm, mnist_evaluate + + +# pylint: disable=too-many-locals, too-many-branches, too-many-statements +@hydra.main(config_path="conf", config_name="base", version_base=None) +def main(cfg: DictConfig) -> None: + """Run the baseline. + + Parameters + ---------- + cfg : DictConfig + An omegaconf object that stores the hydra config. + """ + # 0. Set random seed + seed = cfg.seed + np.random.seed(seed) + np.random.set_state( + np.random.RandomState(seed).get_state() # pylint: disable=no-member + ) + random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + + # 1. Print parsed config + print(OmegaConf.to_yaml(cfg)) + + # Skip if: + # - strategy = bulyan and num_malicious > 20 + # - attack_fn != gaussian and num_malicious = 0 + if cfg.strategy.name == "bulyan" and cfg.server.num_malicious > 20: + print( + "Skipping experiment because strategy is bulyan and num_malicious is > 20" + ) + return + # skip if attack_fn is not gaussian and num_malicious is 0, but continue if + # attack_fn is na + if ( + cfg.server.attack_fn != "gaussian" + and cfg.server.num_malicious == 0 + and cfg.server.attack_fn != "na" + ): + print( + "Skipping experiment because attack_fn is not gaussian and " + "num_malicious is 0" + ) + return + + attacks = { + "na": no_attack, + "gaussian": gaussian_attack, + "lie": lie_attack, + "fang": fang_attack, # OPT + "minmax": minmax_attack, # AGR-MM + } + + clients = { + "mnist": (MnistClient, mnist_evaluate), + "fmnist": (FMnistClient, fmnist_evaluate), + } + + # Delete old client_params + if os.path.exists(cfg.server.history_dir): + shutil.rmtree(cfg.server.history_dir) + + dataset_name = cfg.dataset + attack_fn = cfg.server.attack_fn + num_malicious = cfg.server.num_malicious + + # 2. Prepare your dataset + if dataset_name in ["mnist", "fmnist"]: + if dataset_name == "mnist": + train_path, _ = get_mnist() + elif dataset_name == "fmnist": + train_path, _ = get_fmnist() + fed_dir = do_fl_partitioning( + train_path, + pool_size=cfg.server.pool_size, + alpha=cfg.server.noniidness, + num_classes=10, + val_ratio=0.2, + seed=seed, + ) + else: + raise ValueError("Dataset not supported") + + # 3. Define your clients + # pylint: disable=no-else-return + def client_fn(cid: str, dataset_name: str = dataset_name): + client = clients[dataset_name][0] + if dataset_name in ["mnist", "fmnist"]: + return client(cid, fed_dir) + else: + raise ValueError("Dataset not supported") + + # 4. Define your strategy + strategy = None + if cfg.strategy.name == "flanders": + function_path = cfg.aggregate_fn.aggregate_fn.function + module_name, function_name = function_path.rsplit(".", 1) + module = importlib.import_module(module_name, package=__package__) + aggregation_fn = getattr(module, function_name) + + strategy = instantiate( + cfg.strategy.strategy, + evaluate_fn=clients[dataset_name][1], + on_fit_config_fn=fit_config, + fraction_fit=1, + fraction_evaluate=0, + min_fit_clients=cfg.server.pool_size, + min_evaluate_clients=0, + num_clients_to_keep=cfg.server.pool_size - num_malicious, + aggregate_fn=aggregation_fn, + aggregate_parameters=cfg.aggregate_fn.aggregate_fn.parameters, + min_available_clients=cfg.server.pool_size, + window=cfg.server.warmup_rounds, + distance_function=l2_norm, + maxiter=cfg.strategy.strategy.maxiter, + alpha=cfg.strategy.strategy.alpha, + beta=int(cfg.strategy.strategy.beta), + ) + elif cfg.strategy.name == "krum": + strategy = instantiate( + cfg.strategy.strategy, + evaluate_fn=clients[dataset_name][1], + on_fit_config_fn=fit_config, + fraction_fit=1, + fraction_evaluate=0, + min_fit_clients=cfg.server.pool_size, + min_evaluate_clients=0, + num_clients_to_keep=cfg.strategy.strategy.num_clients_to_keep, + min_available_clients=cfg.server.pool_size, + num_malicious_clients=num_malicious, + ) + elif cfg.strategy.name == "fedavg": + strategy = instantiate( + cfg.strategy.strategy, + evaluate_fn=clients[dataset_name][1], + on_fit_config_fn=fit_config, + fraction_fit=1, + fraction_evaluate=0, + min_fit_clients=cfg.server.pool_size, + min_evaluate_clients=0, + min_available_clients=cfg.server.pool_size, + ) + elif cfg.strategy.name == "bulyan": + # Get aggregation rule function + strategy = instantiate( + cfg.strategy.strategy, + evaluate_fn=clients[dataset_name][1], + on_fit_config_fn=fit_config, + fraction_fit=1, + fraction_evaluate=0, + min_fit_clients=cfg.server.pool_size, + min_evaluate_clients=0, + min_available_clients=cfg.server.pool_size, + num_malicious_clients=num_malicious, + to_keep=cfg.strategy.strategy.to_keep, + ) + elif cfg.strategy.name == "trimmedmean": + strategy = instantiate( + cfg.strategy.strategy, + evaluate_fn=clients[dataset_name][1], + on_fit_config_fn=fit_config, + fraction_fit=1, + fraction_evaluate=0, + min_fit_clients=cfg.server.pool_size, + min_evaluate_clients=0, + min_available_clients=cfg.server.pool_size, + beta=cfg.strategy.strategy.beta, + ) + elif cfg.strategy.name == "fedmedian": + strategy = instantiate( + cfg.strategy.strategy, + evaluate_fn=clients[dataset_name][1], + on_fit_config_fn=fit_config, + fraction_fit=1, + fraction_evaluate=0, + min_fit_clients=cfg.server.pool_size, + min_evaluate_clients=0, + min_available_clients=cfg.server.pool_size, + ) + else: + raise ValueError("Strategy not supported") + + # 5. Start Simulation + history = fl.simulation.start_simulation( + client_fn=client_fn, + num_clients=cfg.server.pool_size, + client_resources=cfg.client_resources, + server=EnhancedServer( + warmup_rounds=cfg.server.warmup_rounds, + num_malicious=num_malicious, + attack_fn=attacks[attack_fn], # type: ignore + magnitude=cfg.server.magnitude, + client_manager=SimpleClientManager(), + strategy=strategy, + sampling=cfg.server.sampling, + history_dir=cfg.server.history_dir, + dataset_name=dataset_name, + threshold=cfg.server.threshold, + omniscent=cfg.server.omniscent, + ), + config=fl.server.ServerConfig(num_rounds=cfg.server.num_rounds), + strategy=strategy, + ) + + save_path = HydraConfig.get().runtime.output_dir + + rounds, test_loss = zip(*history.losses_centralized) + _, test_accuracy = zip(*history.metrics_centralized["accuracy"]) + _, test_auc = zip(*history.metrics_centralized["auc"]) + _, truep = zip(*history.metrics_centralized["TP"]) + _, truen = zip(*history.metrics_centralized["TN"]) + _, falsep = zip(*history.metrics_centralized["FP"]) + _, falsen = zip(*history.metrics_centralized["FN"]) + + if not os.path.exists(os.path.join(save_path, "outputs")): + os.makedirs(os.path.join(save_path, "outputs")) + path_to_save = [os.path.join(save_path, "results.csv"), "outputs/all_results.csv"] + + for file_name in path_to_save: + data = pd.DataFrame( + { + "round": rounds, + "loss": test_loss, + "accuracy": test_accuracy, + "auc": test_auc, + "TP": truep, + "TN": truen, + "FP": falsep, + "FN": falsen, + "attack_fn": [attack_fn for _ in range(len(rounds))], + "dataset_name": [dataset_name for _ in range(len(rounds))], + "num_malicious": [num_malicious for _ in range(len(rounds))], + "strategy": [cfg.strategy.name for _ in range(len(rounds))], + "aggregate_fn": [ + cfg.aggregate_fn.aggregate_fn.function for _ in range(len(rounds)) + ], + } + ) + if os.path.exists(file_name): + data.to_csv(file_name, mode="a", header=False, index=False) + else: + data.to_csv(file_name, index=False, header=True) + + +# pylint: disable=unused-argument +def fit_config(server_round): + """Return a configuration with static batch size and (local) epochs.""" + config = { + "epochs": 1, # number of local epochs + "batch_size": 32, + } + return config + + +if __name__ == "__main__": + main() diff --git a/baselines/flanders/flanders/models.py b/baselines/flanders/flanders/models.py new file mode 100644 index 000000000000..2fd10f5496d3 --- /dev/null +++ b/baselines/flanders/flanders/models.py @@ -0,0 +1,164 @@ +"""Models for FLANDERS experiments.""" + +import itertools + +import torch +import torch.nn as nn +import torch.nn.functional as F +from sklearn.metrics import roc_auc_score +from sklearn.preprocessing import LabelBinarizer + + +def roc_auc_multiclass(y_true, y_pred): + """Compute the ROC AUC for multiclass classification.""" + l_b = LabelBinarizer() + l_b.fit(y_true) + y_true = l_b.transform(y_true) + y_pred = l_b.transform(y_pred) + return roc_auc_score(y_true, y_pred, multi_class="ovr") + + +class MnistNet(nn.Module): + """Neural network for MNIST classification.""" + + def __init__(self): + super().__init__() + self.fc1 = nn.Linear(28 * 28, 128) + self.fc2 = nn.Linear(128, 10) + + def forward(self, x): + """Forward pass through the network.""" + x = x.view(-1, 28 * 28) + x = F.relu(self.fc1(x)) + x = self.fc2(x) + return F.log_softmax(x, dim=1) + + +def train_mnist(model, dataloader, epochs, device): + """Train the network on the training set.""" + criterion = nn.CrossEntropyLoss() + optimizer = torch.optim.Adam(model.parameters(), lr=0.001) + + for epoch in range(epochs): + for i, (images, labels) in enumerate(dataloader): + images = images.view(-1, 28 * 28).to(device) + labels = labels.to(device) + + optimizer.zero_grad() + outputs = model(images) + loss = criterion(outputs, labels) + loss.backward() + optimizer.step() + + if (i + 1) % 100 == 0: + print( + f"Epoch [{epoch+1}/{epochs}], " + f"Step [{i+1}/{len(dataloader)}], " + f"Loss: {loss.item():.4f}" + ) + + +# pylint: disable=too-many-locals +def test_mnist(model, dataloader, device): + """Validate the network on the entire test set.""" + loss = 0 + model.eval() + criterion = nn.CrossEntropyLoss() + y_true, y_pred = [], [] + with torch.no_grad(): + n_correct = 0 + n_samples = 0 + for images, labels in dataloader: + images = images.reshape(-1, 28 * 28).to(device) + labels = labels.to(device) + outputs = model(images) + # max returns (value ,index) + _, predicted = torch.max(outputs.data, 1) + n_samples += labels.size(0) + n_correct += (predicted == labels).sum().item() + loss += criterion(outputs, labels).item() + y_true.append(labels.cpu().numpy()) + y_pred.append(predicted.cpu().numpy()) + y_true = list(itertools.chain(*y_true)) + y_pred = list(itertools.chain(*y_pred)) + auc = roc_auc_multiclass(y_true, y_pred) + acc = n_correct / n_samples + return loss, acc, auc + + +class FMnistNet(nn.Module): + """Neural network for Fashion MNIST classification.""" + + def __init__(self): + super().__init__() + self.fc1 = nn.Linear(784, 256) + self.fc2 = nn.Linear(256, 128) + self.fc3 = nn.Linear(128, 64) + self.fc4 = nn.Linear(64, 10) + + # Dropout module with a 0.2 drop probability + self.dropout = nn.Dropout(p=0.2) + + def forward(self, x): + """Forward pass through the network.""" + # Flatten the input tensor + x = x.view(x.shape[0], -1) + # Set the activation functions + x = self.dropout(F.relu(self.fc1(x))) + x = self.dropout(F.relu(self.fc2(x))) + x = self.dropout(F.relu(self.fc3(x))) + x = F.log_softmax(self.fc4(x), dim=1) + + return x + + +def train_fmnist(model, dataloader, epochs, device): + """Train the network on the training set.""" + criterion = nn.NLLLoss(reduction="sum") + optimizer = torch.optim.Adam(model.parameters(), lr=0.003) + + for epoch in range(epochs): + for i, (images, labels) in enumerate(dataloader): + images = images.view(-1, 28 * 28).to(device) + labels = labels.to(device) + + optimizer.zero_grad() + outputs = model(images) + loss = criterion(outputs, labels) + loss.backward() + optimizer.step() + + if (i + 1) % 100 == 0: + print( + f"Epoch [{epoch+1}/{epochs}], " + f"Step [{i+1}/{len(dataloader)}], " + f"Loss: {loss.item():.4f}" + ) + + +# pylint: disable=too-many-locals +def test_fmnist(model, dataloader, device): + """Validate the network on the entire test set.""" + loss = 0 + model.eval() + criterion = nn.NLLLoss(reduction="sum") + y_true, y_pred = [], [] + with torch.no_grad(): + n_correct = 0 + n_samples = 0 + for images, labels in dataloader: + images = images.reshape(-1, 28 * 28).to(device) + labels = labels.to(device) + outputs = model(images) + # max returns (value ,index) + _, predicted = torch.max(outputs.data, 1) + n_samples += labels.size(0) + n_correct += (predicted == labels).sum().item() + loss += criterion(outputs, labels).item() + y_true.append(labels.cpu().numpy()) + y_pred.append(predicted.cpu().numpy()) + y_true = list(itertools.chain(*y_true)) + y_pred = list(itertools.chain(*y_pred)) + auc = roc_auc_multiclass(y_true, y_pred) + acc = n_correct / n_samples + return loss, acc, auc diff --git a/baselines/flanders/flanders/server.py b/baselines/flanders/flanders/server.py new file mode 100644 index 000000000000..622aa890a966 --- /dev/null +++ b/baselines/flanders/flanders/server.py @@ -0,0 +1,384 @@ +"""Server with enhanced functionality. + +It can be used to simulate an attacker that controls a fraction of the clients and to +save the parameters of each client in its memory. +""" + +import timeit +from logging import DEBUG, INFO +from typing import Any, Callable, Dict, List, Tuple, Union + +import numpy as np +from flwr.common import DisconnectRes, EvaluateRes, FitRes, parameters_to_ndarrays +from flwr.common.logger import log +from flwr.server.client_proxy import ClientProxy +from flwr.server.history import History +from flwr.server.server import Server, fit_clients + +from .strategy import Flanders +from .utils import flatten_params, save_params, update_confusion_matrix + +FitResultsAndFailures = Tuple[ + List[Tuple[ClientProxy, FitRes]], + List[Union[Tuple[ClientProxy, FitRes], BaseException]], +] +EvaluateResultsAndFailures = Tuple[ + List[Tuple[ClientProxy, EvaluateRes]], + List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], +] +ReconnectResultsAndFailures = Tuple[ + List[Tuple[ClientProxy, DisconnectRes]], + List[Union[Tuple[ClientProxy, DisconnectRes], BaseException]], +] + + +class EnhancedServer(Server): + """Server with enhanced functionality.""" + + # pylint: disable=too-many-arguments,too-many-instance-attributes + def __init__( + self, + num_malicious: int, + warmup_rounds: int, + attack_fn: Callable, + dataset_name: str, + *args: Any, + threshold: float = 0.0, + to_keep: int = 1, + magnitude: float = 0.0, + sampling: int = 0, + history_dir: str = "clients_params", + omniscent: bool = True, + **kwargs: Any, + ) -> None: + """Create a new EnhancedServer instance. + + Parameters + ---------- + num_malicious : int + Number of malicious clients + warmup_rounds : int + Number of warmup rounds + attack_fn : Callable + Attack function to be used + dataset_name : str + Name of the dataset + threshold : float, optional + Threshold used by the attacks, by default 0.0 + to_keep : int, optional + Number of clients to keep (i.e., to classify as "good"), by default 1 + magnitude : float, optional + Magnitude of the Gaussian attack, by default 0.0 + sampling : int, optional + Number of parameters to sample, by default 0 + history_dir : str, optional + Directory where to save the parameters, by default "clients_params" + omniscent : bool, optional + Whether to use the omniscent attack, by default True + """ + super().__init__(*args, **kwargs) + self.num_malicious = num_malicious + self.warmup_rounds = warmup_rounds + self.attack_fn = attack_fn + self.sampling = sampling + self.aggregated_parameters: List = [] + self.params_indexes: List = [] + self.history_dir = history_dir + self.dataset_name = dataset_name + self.magnitude = magnitude + self.threshold = threshold + self.to_keep = to_keep + self.omniscent = omniscent + self.malicious_lst: List = [] + self.confusion_matrix = {"TP": 0, "TN": 0, "FP": 0, "FN": 0} + self.clients_state: Dict[str, bool] = {} + self.good_clients_idx: List[int] = [] + self.malicious_clients_idx: List[int] = [] + + # pylint: disable=too-many-locals + def fit(self, num_rounds, timeout): + """Run federated averaging for a number of rounds.""" + history = History() + + # Initialize parameters + log(INFO, "Initializing global parameters") + self.parameters = self._get_initial_parameters(timeout=timeout) + log(INFO, "Evaluating initial parameters") + res = self.strategy.evaluate(0, parameters=self.parameters) + + if res is not None: + log( + INFO, + "initial parameters (loss, other metrics): %s, %s", + res[0], + res[1], + ) + res[1]["TP"] = 0 + res[1]["TN"] = 0 + res[1]["FP"] = 0 + res[1]["FN"] = 0 + history.add_loss_centralized(server_round=0, loss=res[0]) + history.add_metrics_centralized(server_round=0, metrics=res[1]) + + # Run federated learning for num_rounds + log(INFO, "FL starting") + start_time = timeit.default_timer() + + for current_round in range(1, num_rounds + 1): + # Train model and replace previous global model + res_fit = self.fit_round( + server_round=current_round, + timeout=timeout, + ) + if res_fit is not None: + parameters_prime, fit_metrics, _ = res_fit # fit_metrics_aggregated + if parameters_prime: + self.parameters = parameters_prime + history.add_metrics_distributed_fit( + server_round=current_round, metrics=fit_metrics + ) + + # Evaluate model using strategy implementation + res_cen = self.strategy.evaluate(current_round, parameters=self.parameters) + if res_cen is not None: + loss_cen, metrics_cen = res_cen + # Update confusion matrix + if current_round > self.warmup_rounds: + self.confusion_matrix = update_confusion_matrix( + self.confusion_matrix, + self.clients_state, + self.malicious_clients_idx, + self.good_clients_idx, + ) + + for key, val in self.confusion_matrix.items(): + metrics_cen[key] = val + + log( + INFO, + "fit progress: (%s, %s, %s, %s)", + current_round, + loss_cen, + metrics_cen, + timeit.default_timer() - start_time, + ) + history.add_loss_centralized(server_round=current_round, loss=loss_cen) + history.add_metrics_centralized( + server_round=current_round, metrics=metrics_cen + ) + + # Evaluate model on a sample of available clients + res_fed = self.evaluate_round(server_round=current_round, timeout=timeout) + if res_fed is not None: + loss_fed, evaluate_metrics_fed, _ = res_fed + if loss_fed is not None: + history.add_loss_distributed( + server_round=current_round, loss=loss_fed + ) + history.add_metrics_distributed( + server_round=current_round, metrics=evaluate_metrics_fed + ) + + # Bookkeeping + end_time = timeit.default_timer() + elapsed = end_time - start_time + log(INFO, "FL finished in %s", elapsed) + return history + + # pylint: disable-msg=R0915 + def fit_round( + self, + server_round, + timeout, + ): + # pylint: disable-msg=R0912 + """Perform a single round of federated learning.""" + # Get clients and their respective instructions from strategy + client_instructions = self.strategy.configure_fit( + server_round=server_round, + parameters=self.parameters, + client_manager=self._client_manager, + ) + + if not client_instructions: + log(INFO, "fit_round %s: no clients selected, cancel", server_round) + return None + log( + DEBUG, + "fit_round %s: strategy sampled %s clients (out of %s)", + server_round, + len(client_instructions), + self._client_manager.num_available(), + ) + + # Randomly decide which client is malicious + size = self.num_malicious + if server_round <= self.warmup_rounds: + size = 0 + log(INFO, "Selecting %s malicious clients", size) + self.malicious_lst = np.random.choice( + [proxy.cid for proxy, _ in client_instructions], size=size, replace=False + ) + + # Create dict clients_state to keep track of malicious clients + # and send the information to the clients + clients_state = {} + for _, (proxy, ins) in enumerate(client_instructions): + clients_state[proxy.cid] = False + ins.config["malicious"] = False + if proxy.cid in self.malicious_lst: + clients_state[proxy.cid] = True + ins.config["malicious"] = True + + # Sort clients states + clients_state = {k: clients_state[k] for k in sorted(clients_state)} + log( + DEBUG, + "fit_round %s: malicious clients selected %s, clients_state %s", + server_round, + self.malicious_lst, + clients_state, + ) + + # Collect `fit` results from all clients participating in this round + results, failures = fit_clients( + client_instructions=client_instructions, + max_workers=self.max_workers, + timeout=timeout, + ) + log( + DEBUG, + "fit_round %s received %s results and %s failures", + server_round, + len(results), + len(failures), + ) + + # Save parameters of each client as time series + ordered_results = [0 for _ in range(len(results))] + for proxy, fitres in results: + params = flatten_params(parameters_to_ndarrays(fitres.parameters)) + if self.sampling > 0: + # if the sampling number is greater than the number of + # parameters, just sample all of them + self.sampling = min(self.sampling, len(params)) + if len(self.params_indexes) == 0: + # Sample a random subset of parameters + self.params_indexes = np.random.randint( + 0, len(params), size=self.sampling + ) + + params = params[self.params_indexes] + + save_params(params, fitres.metrics["cid"], params_dir=self.history_dir) + + # Re-arrange results in the same order as clients' cids impose + ordered_results[int(fitres.metrics["cid"])] = (proxy, fitres) + + log(INFO, "Clients state: %s", clients_state) + + # Initialize aggregated_parameters if it is the first round + if self.aggregated_parameters == []: + for key, val in clients_state.items(): + if val is False: + self.aggregated_parameters = parameters_to_ndarrays( + ordered_results[int(key)][1].parameters + ) + break + + # Apply attack function + # the server simulates an attacker that controls a fraction of the clients + if self.attack_fn is not None and server_round > self.warmup_rounds: + log(INFO, "Applying attack function") + results, _ = self.attack_fn( + ordered_results, + clients_state, + omniscent=self.omniscent, + magnitude=self.magnitude, + w_re=self.aggregated_parameters, + threshold=self.threshold, + d=len(self.aggregated_parameters), + dataset_name=self.dataset_name, + to_keep=self.to_keep, + malicious_num=self.num_malicious, + num_layers=len(self.aggregated_parameters), + ) + + # Update saved parameters time series after the attack + for _, fitres in results: + if clients_state[fitres.metrics["cid"]]: + if self.sampling > 0: + params = flatten_params( + parameters_to_ndarrays(fitres.parameters) + )[self.params_indexes] + else: + params = flatten_params( + parameters_to_ndarrays(fitres.parameters) + ) + log( + INFO, + "Saving parameters of client %s with shape %s after the attack", + fitres.metrics["cid"], + params.shape, + ) + save_params( + params, + fitres.metrics["cid"], + params_dir=self.history_dir, + remove_last=True, + ) + else: + results = ordered_results + + # Aggregate training results + log(INFO, "fit_round - Aggregating training results") + good_clients_idx = [] + malicious_clients_idx = [] + aggregated_result = self.strategy.aggregate_fit(server_round, results, failures) + if isinstance(self.strategy, Flanders): + parameters_aggregated, metrics_aggregated = aggregated_result + malicious_clients_idx = metrics_aggregated["malicious_clients_idx"] + good_clients_idx = metrics_aggregated["good_clients_idx"] + + log(INFO, "Malicious clients: %s", malicious_clients_idx) + + log(INFO, "clients_state: %s", clients_state) + + # For clients detected as malicious, replace the last params in + # their history with tha current global model, otherwise the + # forecasting in next round won't be reliable (see the paper for + # more details) + if server_round > self.warmup_rounds: + log(INFO, "Saving parameters of clients") + for idx in malicious_clients_idx: + if self.sampling > 0: + new_params = flatten_params( + parameters_to_ndarrays(parameters_aggregated) + )[self.params_indexes] + else: + new_params = flatten_params( + parameters_to_ndarrays(parameters_aggregated) + ) + + log( + INFO, + "Saving parameters of client %s with shape %s", + idx, + new_params.shape, + ) + save_params( + new_params, + idx, + params_dir=self.history_dir, + remove_last=True, + rrl=False, + ) + else: + # Aggregate training results + log(INFO, "fit_round - Aggregating training results") + parameters_aggregated, metrics_aggregated = aggregated_result + + self.clients_state = clients_state + self.good_clients_idx = good_clients_idx + self.malicious_clients_idx = malicious_clients_idx + return parameters_aggregated, metrics_aggregated, (results, failures) diff --git a/baselines/flanders/flanders/strategy.py b/baselines/flanders/flanders/strategy.py new file mode 100644 index 000000000000..36dbc1182653 --- /dev/null +++ b/baselines/flanders/flanders/strategy.py @@ -0,0 +1,375 @@ +"""FLANDERS strategy.""" + +import importlib +import typing +from logging import INFO, WARNING +from typing import Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +from flwr.common import ( + FitIns, + FitRes, + MetricsAggregationFn, + NDArrays, + Parameters, + Scalar, + ndarrays_to_parameters, + parameters_to_ndarrays, +) +from flwr.common.logger import log +from flwr.server.client_manager import ClientManager +from flwr.server.client_proxy import ClientProxy +from flwr.server.strategy.aggregate import aggregate +from flwr.server.strategy.fedavg import FedAvg + +from .utils import load_all_time_series + +WARNING_MIN_AVAILABLE_CLIENTS_TOO_LOW = """ +Setting `min_available_clients` lower than `min_fit_clients` or +`min_evaluate_clients` can cause the server to fail when there are too few clients +connected to the server. `min_available_clients` must be set to a value larger +than or equal to the values of `min_fit_clients` and `min_evaluate_clients`. +""" + + +class Flanders(FedAvg): + """Aggregation function based on MAR. + + Take a look at the paper for more details about the parameters. + """ + + # pylint: disable=too-many-arguments,too-many-instance-attributes, too-many-locals + def __init__( + self, + fraction_fit: float = 1.0, + fraction_evaluate: float = 1.0, + min_fit_clients: int = 2, + min_evaluate_clients: int = 2, + min_available_clients: int = 2, + evaluate_fn: Optional[ + Callable[ + [int, NDArrays, Dict[str, Scalar]], + Optional[Tuple[float, Dict[str, Scalar]]], + ] + ] = None, + on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + accept_failures: bool = True, + initial_parameters: Optional[Parameters] = None, + fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, + evaluate_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, + num_clients_to_keep: int = 1, + aggregate_fn: Callable = aggregate, + aggregate_parameters: Optional[Dict[str, Scalar]] = None, + window: int = 0, + maxiter: int = 100, + alpha: float = 1, + beta: float = 1, + distance_function=None, + ) -> None: + """Initialize FLANDERS. + + Parameters + ---------- + fraction_fit : float, optional + Fraction of clients used during the fit phase, by default 1.0 + fraction_evaluate : float, optional + Fraction of clients used during the evaluate phase, by default 1.0 + min_fit_clients : int, optional + Minimum number of clients used during the fit phase, by default 2 + min_evaluate_clients : int, optional + Minimum number of clients used during the evaluate phase, by + default 2 + min_available_clients : int, optional + Minimum number of clients available for training and evaluation, by + default 2 + evaluate_fn : Optional[Callable[[int, NDArrays, Dict[str, Scalar]], + Optional[Tuple[float, Dict[str, Scalar]]]]], optional + Evaluation function, by default None + on_fit_config_fn : Optional[Callable[[int], Dict[str, Scalar]]], + optional + Function to generate the config fed to the clients during the fit + phase, by default None + on_evaluate_config_fn : Optional[Callable[[int], Dict[str, Scalar]]], + optional + Function to generate the config fed to the clients during the + evaluate phase, by default None + accept_failures : bool, optional + Whether to accept failures from clients, by default True + initial_parameters : Optional[Parameters], optional + Initial model parameters, by default None + fit_metrics_aggregation_fn : Optional[MetricsAggregationFn], optional + Function to aggregate metrics during the fit phase, by default None + evaluate_metrics_aggregation_fn : Optional[MetricsAggregationFn], + optional + Function to aggregate metrics during the evaluate phase, by default + None + num_clients_to_keep : int, optional + Number of clients to keep (i.e., to classify as "good"), by default + 1 + aggregate_fn : Callable[[List[Tuple[NDArrays, int]]], NDArrays], + optional + Function to aggregate the parameters, by default FedAvg + window : int, optional + Sliding window size used as a "training set" of MAR, by default 0 + maxiter : int, optional + Maximum number of iterations of MAR, by default 100 + alpha : float, optional + Alpha parameter (regularization), by default 1 + beta : float, optional + Beta parameter (regularization), by default 1 + distance_function : Callable, optional + Distance function used to compute the distance between predicted + params and real ones, by default None + """ + super().__init__( + fraction_fit=fraction_fit, + fraction_evaluate=fraction_evaluate, + min_fit_clients=min_fit_clients, + min_evaluate_clients=min_evaluate_clients, + min_available_clients=min_available_clients, + evaluate_fn=evaluate_fn, + on_fit_config_fn=on_fit_config_fn, + on_evaluate_config_fn=on_evaluate_config_fn, + accept_failures=accept_failures, + initial_parameters=initial_parameters, + fit_metrics_aggregation_fn=fit_metrics_aggregation_fn, + evaluate_metrics_aggregation_fn=evaluate_metrics_aggregation_fn, + ) + self.num_clients_to_keep = num_clients_to_keep + self.window = window + self.maxiter = maxiter + self.alpha = alpha + self.beta = beta + self.params_indexes = None + self.distance_function = distance_function + self.aggregate_fn = aggregate_fn + self.aggregate_parameters = aggregate_parameters + if self.aggregate_parameters is None: + self.aggregate_parameters = {} + + @typing.no_type_check + def configure_fit( + self, server_round: int, parameters: Parameters, client_manager: ClientManager + ) -> List[Tuple[ClientProxy, FitIns]]: + """Configure the next round of training.""" + # Sample clients + sample_size, min_num_clients = self.num_fit_clients( + client_manager.num_available() + ) + + # Custom FitIns object for each client + fit_ins_list = [ + FitIns( + parameters, + ( + {} + if not self.on_fit_config_fn + else self.on_fit_config_fn(server_round) + ), + ) + for _ in range(sample_size) + ] + + clients = client_manager.sample( + num_clients=sample_size, min_num_clients=min_num_clients + ) + + # Return client/config pairs + result = [] + for client, fit in zip(clients, fit_ins_list): + result.append((client, fit)) + return result + + # pylint: disable=too-many-locals,too-many-statements + @typing.no_type_check + def aggregate_fit( + self, + server_round: int, + results: List[Tuple[ClientProxy, FitRes]], + failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], + ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + """Apply MAR forecasting to exclude malicious clients from FedAvg. + + Parameters + ---------- + server_round : int + Current server round. + results : List[Tuple[ClientProxy, FitRes]] + List of results from the clients. + failures : List[Union[Tuple[ClientProxy, FitRes], BaseException]] + List of failures from the clients. + + Returns + ------- + parameters_aggregated: Optional[Parameters] + Aggregated parameters. + metrics_aggregated: Dict[str, Scalar] + Aggregated metrics. + malicious_clients_idx: List[int] + List of malicious clients' cids (indexes). + """ + good_clients_idx = [] + malicious_clients_idx = [] + if server_round > 1: + if server_round < self.window: + self.window = server_round + params_tensor = load_all_time_series( + params_dir="clients_params", window=self.window + ) + params_tensor = np.transpose( + params_tensor, (0, 2, 1) + ) # (clients, params, time) + ground_truth = params_tensor[:, :, -1].copy() + pred_step = 1 + log(INFO, "Computing MAR on params_tensor %s", params_tensor.shape) + predicted_matrix = mar( + params_tensor[:, :, :-1], + pred_step, + maxiter=self.maxiter, + alpha=self.alpha, + beta=self.beta, + ) + + log(INFO, "Computing anomaly scores") + anomaly_scores = self.distance_function( + ground_truth, predicted_matrix[:, :, 0] + ) + log(INFO, "Anomaly scores: %s", anomaly_scores) + + log(INFO, "Selecting good clients") + good_clients_idx = sorted( + np.argsort(anomaly_scores)[: self.num_clients_to_keep] + ) # noqa + malicious_clients_idx = sorted( + np.argsort(anomaly_scores)[self.num_clients_to_keep :] + ) # noqa + + avg_anomaly_score_gc = np.mean(anomaly_scores[good_clients_idx]) + log( + INFO, "Average anomaly score for good clients: %s", avg_anomaly_score_gc + ) + + avg_anomaly_score_m = np.mean(anomaly_scores[malicious_clients_idx]) + log( + INFO, + "Average anomaly score for malicious clients: %s", + avg_anomaly_score_m, + ) + + results = np.array(results)[good_clients_idx].tolist() + log(INFO, "Good clients: %s", good_clients_idx) + + log(INFO, "Applying aggregate_fn") + # Convert results + weights_results = [ + (parameters_to_ndarrays(fit_res.parameters), fit_res.num_examples) + for _, fit_res in results + ] + + # Check that self.aggregate_fn has num_malicious parameter + if "num_malicious" in self.aggregate_fn.__code__.co_varnames: + # Count the number of malicious clients in + # good_clients_idx by checking FitRes + clients_state = { + str(fit_res.metrics["cid"]): fit_res.metrics["malicious"] + for _, fit_res in results + } + num_malicious = sum([clients_state[str(cid)] for cid in good_clients_idx]) + log( + INFO, + "Number of malicious clients in good_clients_idx after filtering: %s", + num_malicious, + ) + self.aggregate_parameters["num_malicious"] = num_malicious + + if "aggregation_rule" in self.aggregate_fn.__code__.co_varnames: + module = importlib.import_module( + self.aggregate_parameters["aggregation_module_name"] + ) + function_name = self.aggregate_parameters["aggregation_name"] + self.aggregate_parameters["aggregation_rule"] = getattr( + module, function_name + ) + # Remove aggregation_module_name and aggregation_name + # from self.aggregate_parameters + aggregate_parameters = self.aggregate_parameters.copy() + del aggregate_parameters["aggregation_module_name"] + del aggregate_parameters["aggregation_name"] + try: + parameters_aggregated = ndarrays_to_parameters( + self.aggregate_fn(weights_results, **aggregate_parameters) + ) + except ValueError as err: + log(WARNING, "Error in aggregate_fn: %s", err) + parameters_aggregated = ndarrays_to_parameters( + aggregate(weights_results) + ) + else: + parameters_aggregated = ndarrays_to_parameters( + self.aggregate_fn(weights_results, **self.aggregate_parameters) + ) + + # Aggregate custom metrics if aggregation fn was provided + metrics_aggregated = {} + if self.fit_metrics_aggregation_fn: + fit_metrics = [(res.num_examples, res.metrics) for _, res in results] + metrics_aggregated = self.fit_metrics_aggregation_fn(fit_metrics) + elif server_round == 1: # Only log this warning once + log(WARNING, "No fit_metrics_aggregation_fn provided") + + # Add good_clients_idx and malicious_clients_idx to metrics_aggregated + metrics_aggregated["good_clients_idx"] = good_clients_idx + metrics_aggregated["malicious_clients_idx"] = malicious_clients_idx + + return parameters_aggregated, metrics_aggregated + + +# pylint: disable=too-many-locals, too-many-arguments, invalid-name +def mar(X, pred_step, alpha=1, beta=1, maxiter=100): + """Forecast the next tensor of params. + + Forecast the next tensor of params by using MAR algorithm. + + Code provided by Xinyu Chen at: + https://towardsdatascience.com/ matrix-autoregressive-model-for-multidimensional- + time-series-forecasting-6a4d7dce5143 + + With some modifications. + """ + m, n, T = X.shape + start = 0 + + A = np.random.randn(m, m) + B = np.random.randn(n, n) + X_norm = (X - np.min(X)) / np.max(X) + + for _ in range(maxiter): + temp0 = B.T @ B + temp1 = np.zeros((m, m)) + temp2 = np.zeros((m, m)) + identity_m = np.identity(m) + + for t in range(start, T): + temp1 += X_norm[:, :, t] @ B @ X_norm[:, :, t - 1].T + temp2 += X_norm[:, :, t - 1] @ temp0 @ X_norm[:, :, t - 1].T + + temp2 += alpha * identity_m + A = temp1 @ np.linalg.inv(temp2) + + temp0 = A.T @ A + temp1 = np.zeros((n, n)) + temp2 = np.zeros((n, n)) + identity_n = np.identity(n) + + for t in range(start, T): + temp1 += X_norm[:, :, t].T @ A @ X_norm[:, :, t - 1] + temp2 += X_norm[:, :, t - 1].T @ temp0 @ X_norm[:, :, t - 1] + + temp2 += beta * identity_n + B = temp1 @ np.linalg.inv(temp2) + + tensor = np.append(X, np.zeros((m, n, pred_step)), axis=2) + for s in range(pred_step): + tensor[:, :, T + s] = A @ tensor[:, :, T + s - 1] @ B.T + return tensor[:, :, -pred_step:] diff --git a/baselines/flanders/flanders/utils.py b/baselines/flanders/flanders/utils.py new file mode 100644 index 000000000000..619e685e51cd --- /dev/null +++ b/baselines/flanders/flanders/utils.py @@ -0,0 +1,182 @@ +"""Collection of help functions needed by the strategies.""" + +import os +from threading import Lock +from typing import Callable, Dict, List, Optional, Tuple + +import numpy as np +import torch +from flwr.common import NDArrays, Parameters, Scalar, parameters_to_ndarrays +from natsort import natsorted +from torch.utils.data import DataLoader +from torchvision import transforms +from torchvision.datasets import MNIST, FashionMNIST + +from .client import set_params +from .models import FMnistNet, MnistNet, test_fmnist, test_mnist + +lock = Lock() + + +def l2_norm(true_matrix, predicted_matrix): + """Compute the l2 norm between two matrices. + + Parameters + ---------- + true_matrix : ndarray + The true matrix. + predicted_matrix : ndarray + The predicted matrix by MAR. + + Returns + ------- + anomaly_scores : ndarray + 1-d array of anomaly scores. + """ + delta = np.subtract(true_matrix, predicted_matrix) + anomaly_scores = np.sum(delta**2, axis=-1) ** (1.0 / 2) + return anomaly_scores + + +def save_params( + parameters, cid, params_dir="clients_params", remove_last=False, rrl=False +): + """Save parameters in a file. + + Args: + - parameters (ndarray): decoded parameters to append at the end of the file + - cid (int): identifier of the client + - remove_last (bool): + if True, remove the last saved parameters and replace with "parameters" + - rrl (bool): + if True, remove the last saved parameters and replace with the ones + saved before this round. + """ + new_params = parameters + # Save parameters in clients_params/cid_params + path_file = f"{params_dir}/{cid}_params.npy" + if os.path.exists(params_dir) is False: + os.mkdir(params_dir) + if os.path.exists(path_file): + # load old parameters + old_params = np.load(path_file, allow_pickle=True) + if remove_last: + old_params = old_params[:-1] + if rrl: + new_params = old_params[-1] + # add new parameters + new_params = np.vstack((old_params, new_params)) + + # save parameters + np.save(path_file, new_params) + + +def load_all_time_series(params_dir="clients_params", window=0): + """Load all time series. + + Load all time series in order to have a tensor of shape (m,T,n) + where: + - T := time; + - m := number of clients; + - n := number of parameters. + """ + files = os.listdir(params_dir) + files = natsorted(files) + data = [] + for file in files: + data.append(np.load(os.path.join(params_dir, file), allow_pickle=True)) + + return np.array(data)[:, -window:, :] + + +def flatten_params(params): + """Transform a list of (layers-)parameters into a single vector of shape (n).""" + return np.concatenate(params, axis=None).ravel() + + +# pylint: disable=unused-argument +def evaluate_aggregated( + evaluate_fn: Optional[ + Callable[[int, NDArrays, Dict[str, Scalar]], Tuple[float, Dict[str, Scalar]]] + ], + server_round: int, + parameters: Parameters, +): + """Evaluate model parameters using an evaluation function.""" + if evaluate_fn is None: + # No evaluation function provided + return None + parameters_ndarrays = parameters_to_ndarrays(parameters) + eval_res = evaluate_fn(server_round, parameters_ndarrays, {}) + if eval_res is None: + return None + loss, metrics = eval_res + + return loss, metrics + + +# pylint: disable=unused-argument +def mnist_evaluate(server_round: int, parameters: NDArrays, config: Dict[str, Scalar]): + """Evaluate MNIST model on the test set.""" + # determine device + if torch.cuda.is_available(): + device = torch.device("cuda") + elif torch.backends.mps.is_available(): + device = torch.device("mps") + else: + device = torch.device("cpu") + + model = MnistNet() + set_params(model, parameters) + model.to(device) + + testset = MNIST("", train=False, download=True, transform=transforms.ToTensor()) + testloader = DataLoader(testset, batch_size=32, shuffle=False, num_workers=1) + loss, accuracy, auc = test_mnist(model, testloader, device=device) + + return loss, {"accuracy": accuracy, "auc": auc} + + +# pylint: disable=unused-argument +def fmnist_evaluate(server_round: int, parameters: NDArrays, config: Dict[str, Scalar]): + """Evaluate MNIST model on the test set.""" + # determine device + if torch.cuda.is_available(): + device = torch.device("cuda") + elif torch.backends.mps.is_available(): + device = torch.device("mps") + else: + device = torch.device("cpu") + + model = FMnistNet() + set_params(model, parameters) + model.to(device) + + testset = FashionMNIST( + "", train=False, download=True, transform=transforms.ToTensor() + ) + testloader = DataLoader(testset, batch_size=32, shuffle=False, num_workers=1) + loss, accuracy, auc = test_fmnist(model, testloader, device=device) + + return loss, {"accuracy": accuracy, "auc": auc} + + +def update_confusion_matrix( + confusion_matrix: Dict[str, int], + clients_states: Dict[str, bool], + malicious_clients_idx: List, + good_clients_idx: List, +): + """Update TN, FP, FN, TP of confusion matrix.""" + for client_idx, client_state in clients_states.items(): + if int(client_idx) in malicious_clients_idx: + if client_state: + confusion_matrix["TP"] += 1 + else: + confusion_matrix["FP"] += 1 + elif int(client_idx) in good_clients_idx: + if client_state: + confusion_matrix["FN"] += 1 + else: + confusion_matrix["TN"] += 1 + return confusion_matrix diff --git a/baselines/flanders/plotting/FLANDERS_results.ipynb b/baselines/flanders/plotting/FLANDERS_results.ipynb new file mode 100644 index 000000000000..4f3fdcc9b0d8 --- /dev/null +++ b/baselines/flanders/plotting/FLANDERS_results.ipynb @@ -0,0 +1 @@ +{"cells":[{"cell_type":"markdown","metadata":{"id":"Cg37xeuu7Xy5"},"source":["# Preliminaries"]},{"cell_type":"code","execution_count":92,"metadata":{"id":"J_Dh3sGVyb2w"},"outputs":[],"source":["import pandas as pd\n","from natsort import natsorted\n","import matplotlib.pyplot as plt"]},{"cell_type":"code","execution_count":93,"metadata":{"id":"FjlCyr_B8OdT"},"outputs":[],"source":["results_dir = \"../outputs/\""]},{"cell_type":"markdown","metadata":{"id":"VX2oCpZf7Z7y"},"source":["# Prepare data"]},{"cell_type":"markdown","metadata":{"id":"P_3Z05w0wvNB"},"source":["## Utils"]},{"cell_type":"code","execution_count":94,"metadata":{},"outputs":[],"source":["def divide_results_by_dataset(results_dir, file=\"all_results.csv\"):\n"," \"\"\"Divide csv results into multiple files distinguished by dataset and if strategy is FLANDERS or not (e.g., all_results_mnist_flanders and all_results_mnist_no_flanders).\"\"\"\n"," results = pd.read_csv(results_dir + file, float_precision='round_trip')\n"," datasets = natsorted(results[\"dataset_name\"].unique())\n"," for dataset in datasets:\n"," flanders = results[(results[\"dataset_name\"] == dataset) & (results[\"strategy\"] == \"flanders\")]\n"," no_flanders = results[(results[\"dataset_name\"] == dataset) & (results[\"strategy\"] != \"flanders\")]\n"," flanders.to_csv(results_dir + \"all_results_\" + dataset + \"_flanders.csv\", index=False)\n"," no_flanders.to_csv(results_dir + \"all_results_\" + dataset + \"_no_flanders.csv\", index=False)\n"," "]},{"cell_type":"code","execution_count":95,"metadata":{"id":"fZSDCuT497HV"},"outputs":[],"source":["def print_unique_data(results_df):\n"," for col in [\"attack_fn\", \"num_malicious\", \"dataset_name\", \"strategy\", \"aggregate_fn\"]:\n"," print(f\"Unique values in {col}: {results_df[col].unique()}\")"]},{"cell_type":"code","execution_count":96,"metadata":{"id":"8GcIZNuu8q5Y"},"outputs":[],"source":["def translate_cols(df, attack_dict, dataset_dict, strategy_dict, aggregate_dict):\n"," column_names = [\"attack_fn\", \"dataset_name\", \"strategy\", \"aggregate_fn\"]\n"," for idx, d in enumerate([attack_dict, dataset_dict, strategy_dict, aggregate_dict]):\n"," df[column_names[idx]] = df[column_names[idx]].replace(d)\n"," return df"]},{"cell_type":"code","execution_count":97,"metadata":{"id":"oHcF2pl8sdOG"},"outputs":[],"source":["attack_dict = {\n"," \"gaussian\": \"GAUSS\",\n"," \"lie\": \"LIE\",\n"," \"fang\": \"OPT\",\n"," \"minmax\": \"AGR-MM\",\n"," \"adaptive\": \"MAR-ATK\"\n","}\n","\n","dataset_dict = {\n"," \"mnist\": \"MNIST\",\n"," \"fmnist\": \"FMNIST\",\n"," \"cifar\": \"CIFAR-10\",\n"," \"cifar100\": \"CIFAR-100\"\n","}\n","\n","strategy_dict = {\n"," \"flanders\": \"FLANDERS\",\n"," \"fedavg\": \"FedAvg\",\n"," \"fedmedian\": \"FedMedian\",\n"," \"trimmedmean\": \"TrimmedMean\",\n"," \"bulyan\": \"Bulyan\",\n"," \"krum\": \"MultiKrum\",\n"," \"fldetector\": \"FLDetector\"\n","}\n","\n","aggregate_dict = {\n"," \"flwr.server.strategy.aggregate.aggregate\": \"FedAvg\",\n"," \"flwr.server.strategy.aggregate.aggregate_median\": \"FedMedian\",\n"," \"flwr.server.strategy.aggregate.aggregate_trimmed_avg\": \"TrimmedMean\",\n"," \"flwr.server.strategy.aggregate.aggregate_bulyan\": \"Bulyan\",\n"," \"flwr.server.strategy.aggregate.aggregate_krum\": \"MultiKrum\"\n","}"]},{"cell_type":"code","execution_count":98,"metadata":{},"outputs":[],"source":["divide_results_by_dataset(results_dir)"]},{"cell_type":"markdown","metadata":{"id":"y0XCCkuhwydB"},"source":["## MNIST"]},{"cell_type":"markdown","metadata":{"id":"NG2-2cpnyjkY"},"source":["### Use this shortcut"]},{"cell_type":"code","execution_count":99,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":443},"executionInfo":{"elapsed":244,"status":"ok","timestamp":1716376729975,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-120},"id":"lY85nEb6yrXu","outputId":"5439a3bc-684f-492f-b615-53e2252cd94c"},"outputs":[{"data":{"text/html":["
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
roundlossaccuracyaucTPTNFPFNattack_fndataset_namenum_maliciousstrategyaggregate_fn
00720.8331210.10450.5034220000GAUSSMNIST0FLANDERSFedAvg
11664.5222520.20890.5631160000GAUSSMNIST0FLANDERSFedAvg
22624.6338260.35600.6447310000GAUSSMNIST0FLANDERSFedAvg
33581.4764720.47730.710941010000GAUSSMNIST0FLANDERSFedAvg
44545.1142050.54300.746970020000GAUSSMNIST0FLANDERSFedAvg
..........................................
356546724.2817250.10250.5004790000AGR-MMMNIST80dncFedAvg
356647724.3501380.10240.5004210000AGR-MMMNIST80dncFedAvg
356748724.5352630.10250.5004790000AGR-MMMNIST80dncFedAvg
356849724.5888810.10280.5005980000AGR-MMMNIST80dncFedAvg
356950724.7838510.10280.5006020000AGR-MMMNIST80dncFedAvg
\n","

7548 rows × 13 columns

\n","
"],"text/plain":[" round loss accuracy auc TP TN FP FN attack_fn \\\n","0 0 720.833121 0.1045 0.503422 0 0 0 0 GAUSS \n","1 1 664.522252 0.2089 0.563116 0 0 0 0 GAUSS \n","2 2 624.633826 0.3560 0.644731 0 0 0 0 GAUSS \n","3 3 581.476472 0.4773 0.710941 0 100 0 0 GAUSS \n","4 4 545.114205 0.5430 0.746970 0 200 0 0 GAUSS \n","... ... ... ... ... .. ... .. .. ... \n","3565 46 724.281725 0.1025 0.500479 0 0 0 0 AGR-MM \n","3566 47 724.350138 0.1024 0.500421 0 0 0 0 AGR-MM \n","3567 48 724.535263 0.1025 0.500479 0 0 0 0 AGR-MM \n","3568 49 724.588881 0.1028 0.500598 0 0 0 0 AGR-MM \n","3569 50 724.783851 0.1028 0.500602 0 0 0 0 AGR-MM \n","\n"," dataset_name num_malicious strategy aggregate_fn \n","0 MNIST 0 FLANDERS FedAvg \n","1 MNIST 0 FLANDERS FedAvg \n","2 MNIST 0 FLANDERS FedAvg \n","3 MNIST 0 FLANDERS FedAvg \n","4 MNIST 0 FLANDERS FedAvg \n","... ... ... ... ... \n","3565 MNIST 80 dnc FedAvg \n","3566 MNIST 80 dnc FedAvg \n","3567 MNIST 80 dnc FedAvg \n","3568 MNIST 80 dnc FedAvg \n","3569 MNIST 80 dnc FedAvg \n","\n","[7548 rows x 13 columns]"]},"execution_count":99,"metadata":{},"output_type":"execute_result"}],"source":["# CSV pre-processing MNIST\n","results_flanders_file = results_dir + \"all_results_mnist_flanders.csv\"\n","results_no_flanders_file = results_dir + \"all_results_mnist_no_flanders.csv\"\n","results_flanders_df = pd.read_csv(results_flanders_file)\n","results_no_flanders_df = pd.read_csv(results_no_flanders_file)\n","results_flanders_df = translate_cols(results_flanders_df, attack_dict ,dataset_dict, strategy_dict, aggregate_dict)\n","results_no_flanders_df = translate_cols(results_no_flanders_df, attack_dict ,dataset_dict, strategy_dict, aggregate_dict)\n","mnist_df = pd.concat([results_flanders_df, results_no_flanders_df])\n","mnist_df"]},{"cell_type":"code","execution_count":100,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":9,"status":"ok","timestamp":1716115669854,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-120},"id":"hg3ysnqiNrms","outputId":"bae8ab71-ce7b-409a-b154-6ad42f9dfc3b"},"outputs":[{"name":"stdout","output_type":"stream","text":["Unique values in attack_fn: ['GAUSS' 'LIE' 'OPT' 'AGR-MM']\n","Unique values in num_malicious: [ 0 20 60 80]\n","Unique values in dataset_name: ['MNIST']\n","Unique values in strategy: ['FLANDERS' 'FedAvg' 'TrimmedMean' 'FedMedian' 'MultiKrum' 'Bulyan' 'dnc']\n","Unique values in aggregate_fn: ['FedAvg' 'TrimmedMean' 'FedMedian' 'MultiKrum' 'Bulyan'\n"," 'flanders.strategies.aggregate.aggregate_dnc']\n"]}],"source":["print_unique_data(mnist_df)"]},{"cell_type":"markdown","metadata":{"id":"dE_uqUeuyl6M"},"source":["### Step-by-step processing"]},{"cell_type":"code","execution_count":101,"metadata":{"id":"R9Cpe8bF8a2z"},"outputs":[],"source":["results_flanders_file = results_dir + \"all_results_mnist_flanders.csv\"\n","results_no_flanders_file = results_dir + \"all_results_mnist_no_flanders.csv\""]},{"cell_type":"code","execution_count":102,"metadata":{"id":"8nPsIraZ7nJK"},"outputs":[],"source":["results_flanders_df = pd.read_csv(results_flanders_file)\n","results_no_flanders_df = pd.read_csv(results_no_flanders_file)"]},{"cell_type":"code","execution_count":103,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":3,"status":"ok","timestamp":1707513800371,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-60},"id":"oC_C6WVMshle","outputId":"20708ffb-24d9-4d94-fc83-6ab93c8d4ed0"},"outputs":[{"name":"stdout","output_type":"stream","text":["Unique values in attack_fn: ['gaussian' 'lie' 'fang' 'minmax']\n","Unique values in num_malicious: [ 0 20 60 80]\n","Unique values in dataset_name: ['mnist']\n","Unique values in strategy: ['flanders']\n","Unique values in aggregate_fn: ['flwr.server.strategy.aggregate.aggregate'\n"," 'flwr.server.strategy.aggregate.aggregate_trimmed_avg'\n"," 'flwr.server.strategy.aggregate.aggregate_median'\n"," 'flwr.server.strategy.aggregate.aggregate_krum'\n"," 'flwr.server.strategy.aggregate.aggregate_bulyan'\n"," 'flanders.strategies.aggregate.aggregate_dnc']\n"]}],"source":["print_unique_data(results_flanders_df)"]},{"cell_type":"code","execution_count":104,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":423,"status":"ok","timestamp":1707478795736,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-60},"id":"2xSStl9-52cc","outputId":"391a3d7c-c4b5-486c-85a2-97d9a5ddb30d"},"outputs":[{"name":"stdout","output_type":"stream","text":["Unique values in attack_fn: ['gaussian' 'lie' 'fang' 'minmax']\n","Unique values in num_malicious: [ 0 20 60 80]\n","Unique values in dataset_name: ['mnist']\n","Unique values in strategy: ['fedavg' 'trimmedmean' 'fedmedian' 'krum' 'bulyan' 'dnc']\n","Unique values in aggregate_fn: ['flwr.server.strategy.aggregate.aggregate']\n"]}],"source":["print_unique_data(results_no_flanders_df)"]},{"cell_type":"markdown","metadata":{"id":"8dEepZY28raZ"},"source":["Translate strings"]},{"cell_type":"code","execution_count":105,"metadata":{"id":"zNPGc6YJ7E_J"},"outputs":[],"source":["results_flanders_df = translate_cols(results_flanders_df, attack_dict ,dataset_dict, strategy_dict, aggregate_dict)"]},{"cell_type":"code","execution_count":106,"metadata":{"id":"AQaNnF1K7TQc"},"outputs":[],"source":["results_no_flanders_df = translate_cols(results_no_flanders_df, attack_dict ,dataset_dict, strategy_dict, aggregate_dict)"]},{"cell_type":"code","execution_count":107,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":393,"status":"ok","timestamp":1707478246670,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-60},"id":"MXYnAzh8V-9t","outputId":"8f09ab5c-cd3e-4627-9f76-5f474ec09227"},"outputs":[{"name":"stdout","output_type":"stream","text":["Unique values in attack_fn: ['GAUSS' 'LIE' 'OPT' 'AGR-MM']\n","Unique values in num_malicious: [ 0 20 60 80]\n","Unique values in dataset_name: ['MNIST']\n","Unique values in strategy: ['FLANDERS']\n","Unique values in aggregate_fn: ['FedAvg' 'TrimmedMean' 'FedMedian' 'MultiKrum' 'Bulyan'\n"," 'flanders.strategies.aggregate.aggregate_dnc']\n"]}],"source":["print_unique_data(results_flanders_df)"]},{"cell_type":"code","execution_count":108,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":8,"status":"ok","timestamp":1707472989224,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-60},"id":"7xmPF5X77VQk","outputId":"f8e8331e-cde6-4413-f795-f0fe1a9cdd19"},"outputs":[{"name":"stdout","output_type":"stream","text":["Unique values in attack_fn: ['GAUSS' 'LIE' 'OPT' 'AGR-MM']\n","Unique values in num_malicious: [ 0 20 60 80]\n","Unique values in dataset_name: ['MNIST']\n","Unique values in strategy: ['FedAvg' 'TrimmedMean' 'FedMedian' 'MultiKrum' 'Bulyan' 'dnc']\n","Unique values in aggregate_fn: ['FedAvg']\n"]}],"source":["print_unique_data(results_no_flanders_df)"]},{"cell_type":"markdown","metadata":{"id":"mjTmoV2M9YTk"},"source":["Concatenate the 2 dataframes, namely FLANDERS+f and baselines:"]},{"cell_type":"code","execution_count":109,"metadata":{"id":"apvpT9Ve8wwv"},"outputs":[],"source":["mnist_df = pd.concat([results_flanders_df, results_no_flanders_df])"]},{"cell_type":"code","execution_count":110,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":424},"executionInfo":{"elapsed":5,"status":"ok","timestamp":1707513807441,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-60},"id":"aZuW73BW9Iu3","outputId":"3f558906-0d55-4ccd-e64f-5b62203ae746"},"outputs":[{"data":{"text/html":["
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
roundlossaccuracyaucTPTNFPFNattack_fndataset_namenum_maliciousstrategyaggregate_fn
00720.8331210.10450.5034220000GAUSSMNIST0FLANDERSFedAvg
11664.5222520.20890.5631160000GAUSSMNIST0FLANDERSFedAvg
22624.6338260.35600.6447310000GAUSSMNIST0FLANDERSFedAvg
33581.4764720.47730.710941010000GAUSSMNIST0FLANDERSFedAvg
44545.1142050.54300.746970020000GAUSSMNIST0FLANDERSFedAvg
..........................................
356546724.2817250.10250.5004790000AGR-MMMNIST80dncFedAvg
356647724.3501380.10240.5004210000AGR-MMMNIST80dncFedAvg
356748724.5352630.10250.5004790000AGR-MMMNIST80dncFedAvg
356849724.5888810.10280.5005980000AGR-MMMNIST80dncFedAvg
356950724.7838510.10280.5006020000AGR-MMMNIST80dncFedAvg
\n","

7548 rows × 13 columns

\n","
"],"text/plain":[" round loss accuracy auc TP TN FP FN attack_fn \\\n","0 0 720.833121 0.1045 0.503422 0 0 0 0 GAUSS \n","1 1 664.522252 0.2089 0.563116 0 0 0 0 GAUSS \n","2 2 624.633826 0.3560 0.644731 0 0 0 0 GAUSS \n","3 3 581.476472 0.4773 0.710941 0 100 0 0 GAUSS \n","4 4 545.114205 0.5430 0.746970 0 200 0 0 GAUSS \n","... ... ... ... ... .. ... .. .. ... \n","3565 46 724.281725 0.1025 0.500479 0 0 0 0 AGR-MM \n","3566 47 724.350138 0.1024 0.500421 0 0 0 0 AGR-MM \n","3567 48 724.535263 0.1025 0.500479 0 0 0 0 AGR-MM \n","3568 49 724.588881 0.1028 0.500598 0 0 0 0 AGR-MM \n","3569 50 724.783851 0.1028 0.500602 0 0 0 0 AGR-MM \n","\n"," dataset_name num_malicious strategy aggregate_fn \n","0 MNIST 0 FLANDERS FedAvg \n","1 MNIST 0 FLANDERS FedAvg \n","2 MNIST 0 FLANDERS FedAvg \n","3 MNIST 0 FLANDERS FedAvg \n","4 MNIST 0 FLANDERS FedAvg \n","... ... ... ... ... \n","3565 MNIST 80 dnc FedAvg \n","3566 MNIST 80 dnc FedAvg \n","3567 MNIST 80 dnc FedAvg \n","3568 MNIST 80 dnc FedAvg \n","3569 MNIST 80 dnc FedAvg \n","\n","[7548 rows x 13 columns]"]},"execution_count":110,"metadata":{},"output_type":"execute_result"}],"source":["mnist_df"]},{"cell_type":"code","execution_count":111,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":3,"status":"ok","timestamp":1707480685917,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-60},"id":"Ub0W-iA69LpR","outputId":"bfbcaf9e-575c-4d02-e9f0-0be7e74accb4"},"outputs":[{"name":"stdout","output_type":"stream","text":["Unique values in attack_fn: ['GAUSS' 'LIE' 'OPT' 'AGR-MM']\n","Unique values in num_malicious: [ 0 20 60 80]\n","Unique values in dataset_name: ['MNIST']\n","Unique values in strategy: ['FLANDERS' 'FedAvg' 'TrimmedMean' 'FedMedian' 'MultiKrum' 'Bulyan' 'dnc']\n","Unique values in aggregate_fn: ['FedAvg' 'TrimmedMean' 'FedMedian' 'MultiKrum' 'Bulyan'\n"," 'flanders.strategies.aggregate.aggregate_dnc']\n"]}],"source":["print_unique_data(mnist_df)"]},{"cell_type":"markdown","metadata":{"id":"E3TZ_fJuTVuU"},"source":["## Fashion MNIST"]},{"cell_type":"markdown","metadata":{"id":"45GAIKG9Tmyb"},"source":["### Use this shortcut"]},{"cell_type":"code","execution_count":112,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":443},"executionInfo":{"elapsed":327,"status":"ok","timestamp":1716376732776,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-120},"id":"Qju5S7VmTpB_","outputId":"fbe0aed8-164c-4343-9949-e9fa7cc7f0a7"},"outputs":[{"data":{"text/html":["
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
roundlossaccuracyaucTPTNFPFNattack_fndataset_namenum_maliciousstrategyaggregate_fn
0023082.3338130.06310.4795000000GAUSSFMNIST0FLANDERSFedAvg
1121920.1315610.19770.5542780000GAUSSFMNIST0FLANDERSFedAvg
2217859.0960200.42100.6783330000GAUSSFMNIST0FLANDERSFedAvg
3315559.0449260.49200.717778010000GAUSSFMNIST0FLANDERSFedAvg
4414684.1937220.50010.722278020000GAUSSFMNIST0FLANDERSFedAvg
..........................................
35654623279.5649070.10000.5000000000AGR-MMFMNIST80dncFedAvg
35664723290.9804420.10000.5000000000AGR-MMFMNIST80dncFedAvg
35674823302.2510220.10000.5000000000AGR-MMFMNIST80dncFedAvg
35684923312.5125960.10000.5000000000AGR-MMFMNIST80dncFedAvg
35695023326.1161770.10000.5000000000AGR-MMFMNIST80dncFedAvg
\n","

6884 rows × 13 columns

\n","
"],"text/plain":[" round loss accuracy auc TP TN FP FN attack_fn \\\n","0 0 23082.333813 0.0631 0.479500 0 0 0 0 GAUSS \n","1 1 21920.131561 0.1977 0.554278 0 0 0 0 GAUSS \n","2 2 17859.096020 0.4210 0.678333 0 0 0 0 GAUSS \n","3 3 15559.044926 0.4920 0.717778 0 100 0 0 GAUSS \n","4 4 14684.193722 0.5001 0.722278 0 200 0 0 GAUSS \n","... ... ... ... ... .. ... .. .. ... \n","3565 46 23279.564907 0.1000 0.500000 0 0 0 0 AGR-MM \n","3566 47 23290.980442 0.1000 0.500000 0 0 0 0 AGR-MM \n","3567 48 23302.251022 0.1000 0.500000 0 0 0 0 AGR-MM \n","3568 49 23312.512596 0.1000 0.500000 0 0 0 0 AGR-MM \n","3569 50 23326.116177 0.1000 0.500000 0 0 0 0 AGR-MM \n","\n"," dataset_name num_malicious strategy aggregate_fn \n","0 FMNIST 0 FLANDERS FedAvg \n","1 FMNIST 0 FLANDERS FedAvg \n","2 FMNIST 0 FLANDERS FedAvg \n","3 FMNIST 0 FLANDERS FedAvg \n","4 FMNIST 0 FLANDERS FedAvg \n","... ... ... ... ... \n","3565 FMNIST 80 dnc FedAvg \n","3566 FMNIST 80 dnc FedAvg \n","3567 FMNIST 80 dnc FedAvg \n","3568 FMNIST 80 dnc FedAvg \n","3569 FMNIST 80 dnc FedAvg \n","\n","[6884 rows x 13 columns]"]},"execution_count":112,"metadata":{},"output_type":"execute_result"}],"source":["# CSV pre-processing FMNIST\n","results_flanders_file = results_dir + \"all_results_fmnist_flanders.csv\"\n","results_no_flanders_file = results_dir + \"all_results_fmnist_no_flanders.csv\"\n","results_flanders_df = pd.read_csv(results_flanders_file)\n","results_no_flanders_df = pd.read_csv(results_no_flanders_file)\n","results_flanders_df = translate_cols(results_flanders_df, attack_dict ,dataset_dict, strategy_dict, aggregate_dict)\n","results_no_flanders_df = translate_cols(results_no_flanders_df, attack_dict ,dataset_dict, strategy_dict, aggregate_dict)\n","fmnist_df = pd.concat([results_flanders_df, results_no_flanders_df])\n","fmnist_df"]},{"cell_type":"code","execution_count":113,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":1,"status":"ok","timestamp":1716047458204,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-120},"id":"08YXqMTZNpN6","outputId":"9de6cdc8-241e-4867-cd53-645434b99ce2"},"outputs":[{"name":"stdout","output_type":"stream","text":["Unique values in attack_fn: ['GAUSS' 'LIE' 'OPT' 'AGR-MM']\n","Unique values in num_malicious: [ 0 20 60 80]\n","Unique values in dataset_name: ['FMNIST']\n","Unique values in strategy: ['FLANDERS' 'FedAvg' 'TrimmedMean' 'FedMedian' 'MultiKrum' 'Bulyan' 'dnc']\n","Unique values in aggregate_fn: ['FedAvg' 'TrimmedMean' 'FedMedian' 'MultiKrum' 'Bulyan']\n"]}],"source":["print_unique_data(fmnist_df)"]},{"cell_type":"markdown","metadata":{"id":"9vVX6wsxT-rc"},"source":["### Step-by-step processing"]},{"cell_type":"code","execution_count":114,"metadata":{"id":"j0ZnLmVnUBT3"},"outputs":[],"source":["results_flanders_file = results_dir + \"all_results_fmnist_flanders.csv\"\n","results_no_flanders_file = results_dir + \"all_results_fmnist_no_flanders.csv\""]},{"cell_type":"code","execution_count":115,"metadata":{"id":"qsYaQiAWUBOw"},"outputs":[],"source":["results_flanders_df = pd.read_csv(results_flanders_file)\n","results_no_flanders_df = pd.read_csv(results_no_flanders_file)"]},{"cell_type":"code","execution_count":116,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":250,"status":"ok","timestamp":1709217712591,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-60},"id":"m1VKWq_jUHyY","outputId":"5bd5b442-4ab4-473d-bc60-b6f5fdc6320d"},"outputs":[{"name":"stdout","output_type":"stream","text":["Unique values in attack_fn: ['gaussian' 'lie' 'fang' 'minmax']\n","Unique values in num_malicious: [ 0 20 60 80]\n","Unique values in dataset_name: ['fmnist']\n","Unique values in strategy: ['flanders']\n","Unique values in aggregate_fn: ['flwr.server.strategy.aggregate.aggregate'\n"," 'flwr.server.strategy.aggregate.aggregate_trimmed_avg'\n"," 'flwr.server.strategy.aggregate.aggregate_median'\n"," 'flwr.server.strategy.aggregate.aggregate_krum'\n"," 'flwr.server.strategy.aggregate.aggregate_bulyan']\n"]}],"source":["print_unique_data(results_flanders_df)"]},{"cell_type":"code","execution_count":117,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":4,"status":"ok","timestamp":1709217720407,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-60},"id":"IxPT9D6DUJN3","outputId":"ded1f8b2-baf5-437b-abe3-25bad7a3c4ad"},"outputs":[{"name":"stdout","output_type":"stream","text":["Unique values in attack_fn: ['gaussian' 'lie' 'fang' 'minmax']\n","Unique values in num_malicious: [ 0 20 60 80]\n","Unique values in dataset_name: ['fmnist']\n","Unique values in strategy: ['fedavg' 'trimmedmean' 'fedmedian' 'krum' 'bulyan' 'dnc']\n","Unique values in aggregate_fn: ['flwr.server.strategy.aggregate.aggregate']\n"]}],"source":["print_unique_data(results_no_flanders_df)"]},{"cell_type":"markdown","metadata":{"id":"X8k98LNrUaLp"},"source":["Translate strings"]},{"cell_type":"code","execution_count":118,"metadata":{"id":"zHNwpvZMUaLq"},"outputs":[],"source":["results_flanders_df = translate_cols(results_flanders_df, attack_dict ,dataset_dict, strategy_dict, aggregate_dict)"]},{"cell_type":"code","execution_count":119,"metadata":{"id":"9zMOOjCiUaLr"},"outputs":[],"source":["results_no_flanders_df = translate_cols(results_no_flanders_df, attack_dict ,dataset_dict, strategy_dict, aggregate_dict)"]},{"cell_type":"code","execution_count":120,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":4,"status":"ok","timestamp":1709217802421,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-60},"id":"tygeoDz6UaLr","outputId":"2441a31a-ead0-4adf-8b95-73d75c6b3739"},"outputs":[{"name":"stdout","output_type":"stream","text":["Unique values in attack_fn: ['GAUSS' 'LIE' 'OPT' 'AGR-MM']\n","Unique values in num_malicious: [ 0 20 60 80]\n","Unique values in dataset_name: ['FMNIST']\n","Unique values in strategy: ['FLANDERS']\n","Unique values in aggregate_fn: ['FedAvg' 'TrimmedMean' 'FedMedian' 'MultiKrum' 'Bulyan']\n"]}],"source":["print_unique_data(results_flanders_df)"]},{"cell_type":"code","execution_count":121,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":3,"status":"ok","timestamp":1709217803343,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-60},"id":"b8Xo1VseUaLr","outputId":"1fbb0a64-0695-4a89-eec1-e0a18ba54c40"},"outputs":[{"name":"stdout","output_type":"stream","text":["Unique values in attack_fn: ['GAUSS' 'LIE' 'OPT' 'AGR-MM']\n","Unique values in num_malicious: [ 0 20 60 80]\n","Unique values in dataset_name: ['FMNIST']\n","Unique values in strategy: ['FedAvg' 'TrimmedMean' 'FedMedian' 'MultiKrum' 'Bulyan' 'dnc']\n","Unique values in aggregate_fn: ['FedAvg']\n"]}],"source":["print_unique_data(results_no_flanders_df)"]},{"cell_type":"markdown","metadata":{"id":"zkmqmUTzUaLr"},"source":["Concatenate the 2 dataframes, namely FLANDERS+f and baselines:"]},{"cell_type":"code","execution_count":122,"metadata":{"id":"m-wRVa9eUaLr"},"outputs":[],"source":["fmnist_df = pd.concat([results_flanders_df, results_no_flanders_df])"]},{"cell_type":"code","execution_count":123,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":423},"executionInfo":{"elapsed":4,"status":"ok","timestamp":1709217813677,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-60},"id":"IhpjH5n1UaLs","outputId":"73f9a359-9f74-4e5e-b518-25af272b2207"},"outputs":[{"data":{"text/html":["
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
roundlossaccuracyaucTPTNFPFNattack_fndataset_namenum_maliciousstrategyaggregate_fn
0023082.3338130.06310.4795000000GAUSSFMNIST0FLANDERSFedAvg
1121920.1315610.19770.5542780000GAUSSFMNIST0FLANDERSFedAvg
2217859.0960200.42100.6783330000GAUSSFMNIST0FLANDERSFedAvg
3315559.0449260.49200.717778010000GAUSSFMNIST0FLANDERSFedAvg
4414684.1937220.50010.722278020000GAUSSFMNIST0FLANDERSFedAvg
..........................................
35654623279.5649070.10000.5000000000AGR-MMFMNIST80dncFedAvg
35664723290.9804420.10000.5000000000AGR-MMFMNIST80dncFedAvg
35674823302.2510220.10000.5000000000AGR-MMFMNIST80dncFedAvg
35684923312.5125960.10000.5000000000AGR-MMFMNIST80dncFedAvg
35695023326.1161770.10000.5000000000AGR-MMFMNIST80dncFedAvg
\n","

6884 rows × 13 columns

\n","
"],"text/plain":[" round loss accuracy auc TP TN FP FN attack_fn \\\n","0 0 23082.333813 0.0631 0.479500 0 0 0 0 GAUSS \n","1 1 21920.131561 0.1977 0.554278 0 0 0 0 GAUSS \n","2 2 17859.096020 0.4210 0.678333 0 0 0 0 GAUSS \n","3 3 15559.044926 0.4920 0.717778 0 100 0 0 GAUSS \n","4 4 14684.193722 0.5001 0.722278 0 200 0 0 GAUSS \n","... ... ... ... ... .. ... .. .. ... \n","3565 46 23279.564907 0.1000 0.500000 0 0 0 0 AGR-MM \n","3566 47 23290.980442 0.1000 0.500000 0 0 0 0 AGR-MM \n","3567 48 23302.251022 0.1000 0.500000 0 0 0 0 AGR-MM \n","3568 49 23312.512596 0.1000 0.500000 0 0 0 0 AGR-MM \n","3569 50 23326.116177 0.1000 0.500000 0 0 0 0 AGR-MM \n","\n"," dataset_name num_malicious strategy aggregate_fn \n","0 FMNIST 0 FLANDERS FedAvg \n","1 FMNIST 0 FLANDERS FedAvg \n","2 FMNIST 0 FLANDERS FedAvg \n","3 FMNIST 0 FLANDERS FedAvg \n","4 FMNIST 0 FLANDERS FedAvg \n","... ... ... ... ... \n","3565 FMNIST 80 dnc FedAvg \n","3566 FMNIST 80 dnc FedAvg \n","3567 FMNIST 80 dnc FedAvg \n","3568 FMNIST 80 dnc FedAvg \n","3569 FMNIST 80 dnc FedAvg \n","\n","[6884 rows x 13 columns]"]},"execution_count":123,"metadata":{},"output_type":"execute_result"}],"source":["fmnist_df"]},{"cell_type":"code","execution_count":124,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":6,"status":"ok","timestamp":1709217818750,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-60},"id":"XYwnEFa2UaLs","outputId":"44dcb4de-f892-4555-b5d8-8cfcacd37137"},"outputs":[{"name":"stdout","output_type":"stream","text":["Unique values in attack_fn: ['GAUSS' 'LIE' 'OPT' 'AGR-MM']\n","Unique values in num_malicious: [ 0 20 60 80]\n","Unique values in dataset_name: ['FMNIST']\n","Unique values in strategy: ['FLANDERS' 'FedAvg' 'TrimmedMean' 'FedMedian' 'MultiKrum' 'Bulyan' 'dnc']\n","Unique values in aggregate_fn: ['FedAvg' 'TrimmedMean' 'FedMedian' 'MultiKrum' 'Bulyan']\n"]}],"source":["print_unique_data(fmnist_df)"]},{"cell_type":"markdown","metadata":{"id":"1TUxrAF6w6cY"},"source":["## Unify datasets"]},{"cell_type":"code","execution_count":125,"metadata":{"id":"R2wOP2Eex7X2"},"outputs":[],"source":["all_datasets_df = pd.concat([mnist_df, fmnist_df])"]},{"cell_type":"code","execution_count":126,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":443},"executionInfo":{"elapsed":428,"status":"ok","timestamp":1716376740426,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-120},"id":"jwDN17ygyFK7","outputId":"c3db739f-98b7-4070-f826-4344553e9ab7"},"outputs":[{"data":{"text/html":["
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
roundlossaccuracyaucTPTNFPFNattack_fndataset_namenum_maliciousstrategyaggregate_fn
00720.8331210.10450.5034220000GAUSSMNIST0FLANDERSFedAvg
11664.5222520.20890.5631160000GAUSSMNIST0FLANDERSFedAvg
22624.6338260.35600.6447310000GAUSSMNIST0FLANDERSFedAvg
33581.4764720.47730.710941010000GAUSSMNIST0FLANDERSFedAvg
44545.1142050.54300.746970020000GAUSSMNIST0FLANDERSFedAvg
..........................................
35654623279.5649070.10000.5000000000AGR-MMFMNIST80dncFedAvg
35664723290.9804420.10000.5000000000AGR-MMFMNIST80dncFedAvg
35674823302.2510220.10000.5000000000AGR-MMFMNIST80dncFedAvg
35684923312.5125960.10000.5000000000AGR-MMFMNIST80dncFedAvg
35695023326.1161770.10000.5000000000AGR-MMFMNIST80dncFedAvg
\n","

14432 rows × 13 columns

\n","
"],"text/plain":[" round loss accuracy auc TP TN FP FN attack_fn \\\n","0 0 720.833121 0.1045 0.503422 0 0 0 0 GAUSS \n","1 1 664.522252 0.2089 0.563116 0 0 0 0 GAUSS \n","2 2 624.633826 0.3560 0.644731 0 0 0 0 GAUSS \n","3 3 581.476472 0.4773 0.710941 0 100 0 0 GAUSS \n","4 4 545.114205 0.5430 0.746970 0 200 0 0 GAUSS \n","... ... ... ... ... .. ... .. .. ... \n","3565 46 23279.564907 0.1000 0.500000 0 0 0 0 AGR-MM \n","3566 47 23290.980442 0.1000 0.500000 0 0 0 0 AGR-MM \n","3567 48 23302.251022 0.1000 0.500000 0 0 0 0 AGR-MM \n","3568 49 23312.512596 0.1000 0.500000 0 0 0 0 AGR-MM \n","3569 50 23326.116177 0.1000 0.500000 0 0 0 0 AGR-MM \n","\n"," dataset_name num_malicious strategy aggregate_fn \n","0 MNIST 0 FLANDERS FedAvg \n","1 MNIST 0 FLANDERS FedAvg \n","2 MNIST 0 FLANDERS FedAvg \n","3 MNIST 0 FLANDERS FedAvg \n","4 MNIST 0 FLANDERS FedAvg \n","... ... ... ... ... \n","3565 FMNIST 80 dnc FedAvg \n","3566 FMNIST 80 dnc FedAvg \n","3567 FMNIST 80 dnc FedAvg \n","3568 FMNIST 80 dnc FedAvg \n","3569 FMNIST 80 dnc FedAvg \n","\n","[14432 rows x 13 columns]"]},"execution_count":126,"metadata":{},"output_type":"execute_result"}],"source":["all_datasets_df"]},{"cell_type":"code","execution_count":127,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":2,"status":"ok","timestamp":1716376741411,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-120},"id":"VWrdDPtPyHHA","outputId":"1ef8b853-63f8-4eac-a785-578684dbf0a6"},"outputs":[{"name":"stdout","output_type":"stream","text":["Unique values in attack_fn: ['GAUSS' 'LIE' 'OPT' 'AGR-MM']\n","Unique values in num_malicious: [ 0 20 60 80]\n","Unique values in dataset_name: ['MNIST' 'FMNIST']\n","Unique values in strategy: ['FLANDERS' 'FedAvg' 'TrimmedMean' 'FedMedian' 'MultiKrum' 'Bulyan' 'dnc']\n","Unique values in aggregate_fn: ['FedAvg' 'TrimmedMean' 'FedMedian' 'MultiKrum' 'Bulyan'\n"," 'flanders.strategies.aggregate.aggregate_dnc']\n"]}],"source":["print_unique_data(all_datasets_df)"]},{"cell_type":"markdown","metadata":{"id":"OlES57TEn2Ng"},"source":["# Tables\n"]},{"cell_type":"markdown","metadata":{"id":"hcHkTXfGbapg"},"source":["## Accuracy"]},{"cell_type":"markdown","metadata":{"id":"7F1YDs12sZbE"},"source":["### Best with improvment w.r.t. baseline"]},{"cell_type":"code","execution_count":128,"metadata":{"id":"GuQM8bzXnIGx"},"outputs":[],"source":["def accuracy_table(input_df, b):\n"," # Define strategies and attacks\n"," strategies = ['FedAvg', 'FLANDERS + FedAvg', 'FedMedian', 'FLANDERS + FedMedian', 'TrimmedMean', 'FLANDERS + TrimmedMean', 'MultiKrum', 'FLANDERS + MultiKrum', 'Bulyan', 'FLANDERS + Bulyan']\n"," attacks = ['GAUSS', 'LIE', 'OPT', 'AGR-MM']\n"," dataset_names = [\"MNIST\", \"FMNIST\"]\n","\n"," # Create MultiIndex for the columns\n"," columns = pd.MultiIndex.from_product([dataset_names, attacks], names=['Dataset', 'Attack'])\n","\n"," # Create an empty DataFrame with the defined columns and strategies\n"," df = pd.DataFrame(index=strategies, columns=columns)\n","\n"," filtered_df = input_df[(input_df['num_malicious'] == b) & (input_df['round'] >= 3)]\n"," baseline_df = filtered_df[filtered_df['strategy'] != 'FLANDERS']\n"," flanders_df = filtered_df[filtered_df['strategy'] == 'FLANDERS']\n","\n"," # Populate the DataFrame\n"," for strategy in ['FedAvg', 'TrimmedMean', 'FedMedian', 'MultiKrum', 'Bulyan']:\n"," for dataset in dataset_names:\n"," for attack in attacks:\n"," df.loc[strategy, (dataset, attack)] = round(baseline_df[(baseline_df['strategy']==strategy) & (baseline_df['attack_fn']==attack) & (baseline_df['dataset_name']==dataset)]['accuracy'].max(), 2)\n"," df.loc[f\"FLANDERS + {strategy}\", (dataset, attack)] = round(flanders_df[(flanders_df['aggregate_fn']==strategy) & (flanders_df['attack_fn']==attack) & (flanders_df['dataset_name']==dataset)]['accuracy'].max(), 2)\n","\n"," return df\n"]},{"cell_type":"code","execution_count":129,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":457},"executionInfo":{"elapsed":1243,"status":"ok","timestamp":1715943873268,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-120},"id":"PruXJg2vA87b","outputId":"98d83851-626b-4877-fd09-8f2f01200c65"},"outputs":[{"data":{"text/html":["
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
DatasetMNISTFMNIST
AttackGAUSSGAUSS
FedAvg0.860.68
FLANDERS + FedAvg0.840.64
FedMedian0.830.71
FLANDERS + FedMedian0.760.73
TrimmedMean0.850.69
FLANDERS + TrimmedMean0.780.7
MultiKrum0.680.66
FLANDERS + MultiKrum0.740.73
Bulyan0.860.62
FLANDERS + Bulyan0.870.65
\n","
"],"text/plain":["Dataset MNIST FMNIST\n","Attack GAUSS GAUSS\n","FedAvg 0.86 0.68\n","FLANDERS + FedAvg 0.84 0.64\n","FedMedian 0.83 0.71\n","FLANDERS + FedMedian 0.76 0.73\n","TrimmedMean 0.85 0.69\n","FLANDERS + TrimmedMean 0.78 0.7\n","MultiKrum 0.68 0.66\n","FLANDERS + MultiKrum 0.74 0.73\n","Bulyan 0.86 0.62\n","FLANDERS + Bulyan 0.87 0.65"]},"execution_count":129,"metadata":{},"output_type":"execute_result"}],"source":["# Table 19\n","acc_0 = accuracy_table(all_datasets_df, 0).dropna(axis=1, how='all')\n","acc_0"]},{"cell_type":"code","execution_count":130,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":457},"executionInfo":{"elapsed":858,"status":"ok","timestamp":1715944158330,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-120},"id":"8bV7hABWbyMS","outputId":"6d24f87d-da15-40b2-8880-3b6dcefda1fd"},"outputs":[{"data":{"text/html":["
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
DatasetMNISTFMNIST
AttackGAUSSLIEOPTAGR-MMGAUSSLIEOPTAGR-MM
FedAvg0.20.170.670.450.250.170.570.11
FLANDERS + FedAvg0.880.870.480.880.660.670.570.64
FedMedian0.80.660.790.590.660.650.670.6
FLANDERS + FedMedian0.850.850.660.830.710.690.630.73
TrimmedMean0.860.520.730.610.690.540.620.58
FLANDERS + TrimmedMean0.810.850.780.830.690.70.630.73
MultiKrum0.780.770.810.820.740.650.70.67
FLANDERS + MultiKrum0.820.860.840.820.730.70.730.71
Bulyan0.820.840.840.830.710.720.690.76
FLANDERS + Bulyan0.90.840.790.850.650.650.660.65
\n","
"],"text/plain":["Dataset MNIST FMNIST \n","Attack GAUSS LIE OPT AGR-MM GAUSS LIE OPT AGR-MM\n","FedAvg 0.2 0.17 0.67 0.45 0.25 0.17 0.57 0.11\n","FLANDERS + FedAvg 0.88 0.87 0.48 0.88 0.66 0.67 0.57 0.64\n","FedMedian 0.8 0.66 0.79 0.59 0.66 0.65 0.67 0.6\n","FLANDERS + FedMedian 0.85 0.85 0.66 0.83 0.71 0.69 0.63 0.73\n","TrimmedMean 0.86 0.52 0.73 0.61 0.69 0.54 0.62 0.58\n","FLANDERS + TrimmedMean 0.81 0.85 0.78 0.83 0.69 0.7 0.63 0.73\n","MultiKrum 0.78 0.77 0.81 0.82 0.74 0.65 0.7 0.67\n","FLANDERS + MultiKrum 0.82 0.86 0.84 0.82 0.73 0.7 0.73 0.71\n","Bulyan 0.82 0.84 0.84 0.83 0.71 0.72 0.69 0.76\n","FLANDERS + Bulyan 0.9 0.84 0.79 0.85 0.65 0.65 0.66 0.65"]},"execution_count":130,"metadata":{},"output_type":"execute_result"}],"source":["# Table 15\n","acc_20 = accuracy_table(all_datasets_df, 20)\n","acc_20"]},{"cell_type":"markdown","metadata":{},"source":["Bulyan is NaN because it cannot work when the number of malicious clients is > 25%"]},{"cell_type":"code","execution_count":131,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":477},"executionInfo":{"elapsed":1126,"status":"ok","timestamp":1716115710006,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-120},"id":"xSvgEwLoPmh3","outputId":"1c65e66e-7374-46f0-a2a5-0964bc40e49a"},"outputs":[{"data":{"text/html":["
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
DatasetMNISTFMNIST
AttackGAUSSLIEOPTAGR-MMGAUSSLIEOPTAGR-MM
FedAvg0.190.150.20.160.280.10.190.1
FLANDERS + FedAvg0.760.880.850.850.690.670.710.67
FedMedian0.80.190.160.290.650.10.10.1
FLANDERS + FedMedian0.80.860.830.860.710.690.710.71
TrimmedMean0.250.20.330.10.330.10.170.1
FLANDERS + TrimmedMean0.780.870.840.830.70.710.730.74
MultiKrum0.790.140.220.150.710.10.120.1
FLANDERS + MultiKrum0.880.880.860.780.720.710.730.69
BulyanNaNNaNNaNNaNNaNNaNNaNNaN
FLANDERS + Bulyan0.890.870.90.850.680.640.60.69
\n","
"],"text/plain":["Dataset MNIST FMNIST \n","Attack GAUSS LIE OPT AGR-MM GAUSS LIE OPT AGR-MM\n","FedAvg 0.19 0.15 0.2 0.16 0.28 0.1 0.19 0.1\n","FLANDERS + FedAvg 0.76 0.88 0.85 0.85 0.69 0.67 0.71 0.67\n","FedMedian 0.8 0.19 0.16 0.29 0.65 0.1 0.1 0.1\n","FLANDERS + FedMedian 0.8 0.86 0.83 0.86 0.71 0.69 0.71 0.71\n","TrimmedMean 0.25 0.2 0.33 0.1 0.33 0.1 0.17 0.1\n","FLANDERS + TrimmedMean 0.78 0.87 0.84 0.83 0.7 0.71 0.73 0.74\n","MultiKrum 0.79 0.14 0.22 0.15 0.71 0.1 0.12 0.1\n","FLANDERS + MultiKrum 0.88 0.88 0.86 0.78 0.72 0.71 0.73 0.69\n","Bulyan NaN NaN NaN NaN NaN NaN NaN NaN\n","FLANDERS + Bulyan 0.89 0.87 0.9 0.85 0.68 0.64 0.6 0.69"]},"execution_count":131,"metadata":{},"output_type":"execute_result"}],"source":["# Table 17\n","acc_60 = accuracy_table(all_datasets_df, 60)\n","acc_60"]},{"cell_type":"code","execution_count":132,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":477},"executionInfo":{"elapsed":1188,"status":"ok","timestamp":1716050662469,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-120},"id":"dM_AMm_jcCye","outputId":"90533ef1-500f-40a3-f565-2c2ca1d68aaa"},"outputs":[{"data":{"text/html":["
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
DatasetMNISTFMNIST
AttackGAUSSLIEOPTAGR-MMGAUSSLIEOPTAGR-MM
FedAvg0.210.160.310.130.240.10.180.1
FLANDERS + FedAvg0.850.860.880.850.690.70.690.66
FedMedian0.340.170.140.090.30.10.140.1
FLANDERS + FedMedian0.870.840.80.80.730.740.720.72
TrimmedMean0.170.150.210.140.210.10.120.1
FLANDERS + TrimmedMean0.810.850.810.820.740.730.70.69
MultiKrum0.820.210.320.110.720.10.150.1
FLANDERS + MultiKrum0.870.830.870.850.680.730.720.7
BulyanNaNNaNNaNNaNNaNNaNNaNNaN
FLANDERS + Bulyan0.840.840.830.80.690.720.690.68
\n","
"],"text/plain":["Dataset MNIST FMNIST \n","Attack GAUSS LIE OPT AGR-MM GAUSS LIE OPT AGR-MM\n","FedAvg 0.21 0.16 0.31 0.13 0.24 0.1 0.18 0.1\n","FLANDERS + FedAvg 0.85 0.86 0.88 0.85 0.69 0.7 0.69 0.66\n","FedMedian 0.34 0.17 0.14 0.09 0.3 0.1 0.14 0.1\n","FLANDERS + FedMedian 0.87 0.84 0.8 0.8 0.73 0.74 0.72 0.72\n","TrimmedMean 0.17 0.15 0.21 0.14 0.21 0.1 0.12 0.1\n","FLANDERS + TrimmedMean 0.81 0.85 0.81 0.82 0.74 0.73 0.7 0.69\n","MultiKrum 0.82 0.21 0.32 0.11 0.72 0.1 0.15 0.1\n","FLANDERS + MultiKrum 0.87 0.83 0.87 0.85 0.68 0.73 0.72 0.7\n","Bulyan NaN NaN NaN NaN NaN NaN NaN NaN\n","FLANDERS + Bulyan 0.84 0.84 0.83 0.8 0.69 0.72 0.69 0.68"]},"execution_count":132,"metadata":{},"output_type":"execute_result"}],"source":["# Table 3\n","acc_80 = accuracy_table(all_datasets_df, 80)\n","acc_80"]},{"cell_type":"markdown","metadata":{"id":"CZX8c37MsgFL"},"source":["### Best w.r.t. number of attackers"]},{"cell_type":"code","execution_count":133,"metadata":{"id":"xgIKM1obsmd2"},"outputs":[],"source":["def accuracy_table_attackers(input_df, aggregate_fn):\n"," # Define strategies and attacks\n"," attacks = ['GAUSS', 'LIE', 'OPT', 'AGR-MM']\n"," dataset_names = [\"MNIST\", \"FMNIST\"]\n"," num_malicious = [0, 20, 60, 80]\n","\n"," # Create MultiIndex for the columns\n"," columns = pd.MultiIndex.from_product([dataset_names, num_malicious], names=['Dataset', '# Malicious'])\n","\n"," #######\n"," #columns = pd.MultiIndex.from_product([['MNIST', 'CIFAR-10'], ['GAUSS', 'LIE', 'OPT', 'AGR-MM'], ['LAST', 'BEST']])\n"," #######\n","\n","\n"," # Create an empty DataFrame with the defined columns and strategies\n"," df = pd.DataFrame(index=attacks, columns=columns)\n","\n"," filtered_df = input_df[(input_df['aggregate_fn'] == aggregate_fn) & (input_df['round'] >= 3)]\n"," baseline_df = filtered_df[filtered_df['strategy'] != 'FLANDERS']\n"," flanders_df = filtered_df[filtered_df['strategy'] == 'FLANDERS']\n","\n"," # Populate the DataFrame\n"," for dataset in dataset_names:\n"," for attack in attacks:\n"," for b in num_malicious:\n"," if b == 0:\n"," df.loc[attack, (dataset, b)] = round(flanders_df[(flanders_df['num_malicious']==b) & (flanders_df['attack_fn']=='GAUSS') & (flanders_df['dataset_name']==dataset)]['accuracy'].max(), 2)\n"," else:\n"," df.loc[attack, (dataset, b)] = round(flanders_df[(flanders_df['num_malicious']==b) & (flanders_df['attack_fn']==attack) & (flanders_df['dataset_name']==dataset)]['accuracy'].max(), 2)\n","\n"," return df"]},{"cell_type":"code","execution_count":134,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":206},"executionInfo":{"elapsed":305,"status":"ok","timestamp":1715954054792,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-120},"id":"0n9vhQiQuxk_","outputId":"500dfb31-f525-4289-f337-2c945bf50cd2"},"outputs":[{"data":{"text/html":["
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
DatasetMNISTFMNIST
# Malicious02060800206080
GAUSS0.740.820.880.870.730.730.720.68
LIE0.740.860.880.830.730.70.710.73
OPT0.740.840.860.870.730.730.730.72
AGR-MM0.740.820.780.850.730.710.690.7
\n","
"],"text/plain":["Dataset MNIST FMNIST \n","# Malicious 0 20 60 80 0 20 60 80\n","GAUSS 0.74 0.82 0.88 0.87 0.73 0.73 0.72 0.68\n","LIE 0.74 0.86 0.88 0.83 0.73 0.7 0.71 0.73\n","OPT 0.74 0.84 0.86 0.87 0.73 0.73 0.73 0.72\n","AGR-MM 0.74 0.82 0.78 0.85 0.73 0.71 0.69 0.7"]},"execution_count":134,"metadata":{},"output_type":"execute_result"}],"source":["# Table 20\n","acc_att = accuracy_table_attackers(all_datasets_df, 'MultiKrum')\n","acc_att"]},{"cell_type":"markdown","metadata":{"id":"g6yDorrubUw1"},"source":["## Precision and Recall"]},{"cell_type":"code","execution_count":135,"metadata":{"id":"y0PlDlG0bKek"},"outputs":[],"source":["def pr_table(input_df, b):\n"," strategies = ['FLANDERS']\n"," attacks = ['GAUSS', 'LIE', 'OPT', 'AGR-MM']\n"," dataset_names = [\"MNIST\", \"FMNIST\"]\n","\n"," # Create MultiIndex for the columns\n"," columns = pd.MultiIndex.from_product([strategies, attacks, ['P', 'R']], names=['Strategy', 'Attack', 'P/R'])\n","\n"," # Create an empty DataFrame with the defined columns and strategies\n"," df = pd.DataFrame(index=dataset_names, columns=columns)\n","\n"," filtered_df = input_df[(input_df['num_malicious'] == b) & (input_df['round'] == 50) & (input_df['aggregate_fn']=='FedAvg')]\n"," flanders_df = filtered_df[filtered_df['strategy'] == 'FLANDERS']\n"," strat_dfs = [flanders_df]\n","\n"," # Populate the DataFrame\n"," for dataset in dataset_names:\n"," for attack in attacks:\n"," for idx, strategy in enumerate(strategies):\n"," tp = strat_dfs[idx][(strat_dfs[idx]['attack_fn']==attack) & (strat_dfs[idx]['dataset_name']==dataset)]['TP'].iloc[0]\n"," fp = strat_dfs[idx][(strat_dfs[idx]['attack_fn']==attack) & (strat_dfs[idx]['dataset_name']==dataset)]['FP'].iloc[0]\n"," fn = strat_dfs[idx][(strat_dfs[idx]['attack_fn']==attack) & (strat_dfs[idx]['dataset_name']==dataset)]['FN'].iloc[0]\n"," df.loc[dataset, (strategy, attack, 'P')] = round(tp / (tp+fp), 2)\n"," df.loc[dataset, (strategy, attack, 'R')] = round(tp / (tp+fn), 2)\n","\n"," return df"]},{"cell_type":"code","execution_count":136,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":257},"executionInfo":{"elapsed":248,"status":"ok","timestamp":1716367268457,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-120},"id":"rpQyziNRh3dn","outputId":"26e2c0f9-144d-4cad-d13f-1e2351aa2081"},"outputs":[{"data":{"text/html":["
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
StrategyFLANDERS
AttackGAUSSLIEOPTAGR-MM
P/RPRPRPRPR
MNIST1.01.01.01.00.150.151.01.0
FMNIST1.01.01.01.00.160.161.01.0
\n","
"],"text/plain":["Strategy FLANDERS \n","Attack GAUSS LIE OPT AGR-MM \n","P/R P R P R P R P R\n","MNIST 1.0 1.0 1.0 1.0 0.15 0.15 1.0 1.0\n","FMNIST 1.0 1.0 1.0 1.0 0.16 0.16 1.0 1.0"]},"execution_count":136,"metadata":{},"output_type":"execute_result"}],"source":["# Table 1\n","pr_20 = pr_table(all_datasets_df, 20)\n","pr_20"]},{"cell_type":"code","execution_count":137,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":257},"executionInfo":{"elapsed":327,"status":"ok","timestamp":1716367273542,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-120},"id":"ccW0Ups3iMvZ","outputId":"ba945e17-1bbe-414f-9aa0-b03fb0fe107f"},"outputs":[{"data":{"text/html":["
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
StrategyFLANDERS
AttackGAUSSLIEOPTAGR-MM
P/RPRPRPRPR
MNIST1.01.01.01.01.01.01.01.0
FMNIST1.01.01.01.01.01.01.01.0
\n","
"],"text/plain":["Strategy FLANDERS \n","Attack GAUSS LIE OPT AGR-MM \n","P/R P R P R P R P R\n","MNIST 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0\n","FMNIST 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0"]},"execution_count":137,"metadata":{},"output_type":"execute_result"}],"source":["# Table 2\n","pr_60 = pr_table(all_datasets_df, 60)\n","pr_60"]},{"cell_type":"code","execution_count":138,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":257},"executionInfo":{"elapsed":308,"status":"ok","timestamp":1716376750779,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-120},"id":"05a0Gv5piS2v","outputId":"0cf6555a-4cc8-4aa5-f9b6-c8286afa130a"},"outputs":[{"data":{"text/html":["
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
StrategyFLANDERS
AttackGAUSSLIEOPTAGR-MM
P/RPRPRPRPR
MNIST1.01.01.01.01.01.01.01.0
FMNIST1.01.01.01.01.01.01.01.0
\n","
"],"text/plain":["Strategy FLANDERS \n","Attack GAUSS LIE OPT AGR-MM \n","P/R P R P R P R P R\n","MNIST 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0\n","FMNIST 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0"]},"execution_count":138,"metadata":{},"output_type":"execute_result"}],"source":["# Table 3\n","pr_80 = pr_table(all_datasets_df, 80)\n","pr_80"]},{"cell_type":"markdown","metadata":{"id":"bN7dTn2u0r6K"},"source":["# Plots"]},{"cell_type":"markdown","metadata":{"id":"xZ0wiadBsVUh"},"source":["## Accuracy over rounds"]},{"cell_type":"code","execution_count":139,"metadata":{"id":"LQ_uYJCtjdJS"},"outputs":[],"source":["df_mnist_acc_flanders = all_datasets_df[(all_datasets_df['strategy']=='FLANDERS') & (all_datasets_df['num_malicious']==80) & (all_datasets_df['dataset_name']=='MNIST') & (all_datasets_df['aggregate_fn']=='MultiKrum')]\n","df_mnist_acc_fedavg = all_datasets_df[(all_datasets_df['strategy']=='FedAvg') & (all_datasets_df['num_malicious']==80) & (all_datasets_df['dataset_name']=='MNIST')]\n","df_no_attack = all_datasets_df[(all_datasets_df['strategy']=='FedAvg') & (all_datasets_df['num_malicious']==0) & (all_datasets_df['dataset_name']=='MNIST')]"]},{"cell_type":"code","execution_count":140,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":408},"executionInfo":{"elapsed":651,"status":"ok","timestamp":1714668544212,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-120},"id":"NDkD_Qnd1iT7","outputId":"928cfa90-04a2-4624-9825-adb0d124aaf8"},"outputs":[{"data":{"image/png":"iVBORw0KGgoAAAANSUhEUgAAA68AAAGHCAYAAABf+GSbAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy80BEi2AAAACXBIWXMAAA9hAAAPYQGoP6dpAADc1klEQVR4nOzdd3wURRvA8d9eTe8NElIIvUvvTUABFREsYMWKvXfF3iv2jq8aQFEQKdJFgdB7C4SQhJBKer+67x8bgjEEQggkwPP1c5/kdmdn5y6RyXMz84yiqqqKEEIIIYQQQgjRiOkaugFCCCGEEEIIIcTJSPAqhBBCCCGEEKLRk+BVCCGEEEIIIUSjJ8GrEEIIIYQQQohGT4JXIYQQQgghhBCNngSvQgghhBBCCCEaPQlehRBCCCGEEEI0ehK8CiGEEEIIIYRo9CR4FUIIIYQQQgjR6DXa4HXfvn18/PHH3HLLLXTs2BGDwYCiKLz66qunVe+yZcsYNWoUAQEBuLq60qZNG5599lmKi4vrqeVCCCFEw5I+VAghxPnI0NANqMnnn3/O1KlT67XODz74gEceeQRFURgwYADBwcGsWrWK119/nd9++43Vq1cTEBBQr/cUQgghzjbpQ4UQQpyPGu3Ia4cOHXjssceIiYlh79693HjjjadV39atW3n00UfR6/UsWLCAv//+m19++YWEhAQuvvhi9u3bx+TJk+up9UIIIUTDkT5UCCHE+ajRjrzefvvtVZ7rdKcXZ7/xxhuoqsqkSZMYOXJk5XE3Nze+/fZbmjdvzm+//UZcXBxt2rQ5rXsJIYQQDUn6UCGEEOejRjvyWp+sVisLFiwAYOLEidXOR0RE0K9fPwDmzJlzVtsmhBBCNGbShwohhGgsLojgdf/+/ZSWlgLQvXv345Y5enzr1q1nrV1CCCFEYyd9qBBCiMbiggheExMTAfDx8cHT0/O4ZZo1a1alrBBCCCGkDxVCCNF4NNo1r/WpqKgIAHd39xrLeHh4AFBYWHjCuiwWCxaLpfK50+kkNzcXf39/FEWph9YKIYQ416iqSlFREU2bNj3t9aWNjfShQgghzqRT6UMviOC1Pr3xxhu89NJLDd0MIYQQjVBKSgphYWEN3YxGS/pQIYQQNalNH3pBBK9HpzmVlJTUWOboButeXl4nrOvpp5/mkUceqXxeUFBAeHg4KSkpJ71WCCHE+amwsJBmzZrVOK32XCZ9qBBCiDPpVPrQCyJ4jYyMBCA/P5+ioqLjvjEpKSlVytbEbDZjNpurHffy8pKOVwghLnDn49RX6UOFEEKcDbXpQ8+vhTk1aN26NW5ubgBs2rTpuGWOHu/atetZa5cQQgjR2EkfKoQQorG4IIJXk8nE6NGjAZg+fXq188nJycTGxgIwduzYs9o2IYQQojGTPlQIIURjcV4Fr5988glt2rThpptuqnbuqaeeQlEUpk2bxqJFiyqPl5aWctttt+FwOBg3bhxt2rQ5m00WQgghGgXpQ4UQQjR2jXbN65YtW7jnnnsqnyckJADw5ZdfMn/+/Mrjc+bMoUmTJgBkZ2ezb98+QkJCqtXXtWtX3nvvPR555BFGjRrFoEGDCAoKYtWqVaSnp9O6dWu++OKLM/yqhBBCiDNP+lAhhBDno0YbvBYWFrJ+/fpqxw8fPszhw4crn/97v7iTefjhh+nYsSPvvfceGzZsoKSkhPDwcJ5++mmefvrp8zJLpBBCiAuP9KFCiMZGVVUcDgd2u72hmyLOIqPRiF6vr7f6FFVV1Xqr7QJUWFiIt7c3BQUFkilRCCEuUNIX1I28b0Kc/1RVJT8/nyNHjuBwOBq6OaIB+Pj4EBISUmM24VPpCxrtyKsQQgghhBDi3JaRkUF+fn7lllgGg+G83FZMVKeqKqWlpWRlZQFULlM5HRK8CiGEEEIIIeqdw+GgoKCAwMBAAgICGro5ogG4uroCkJWVRVBQ0GlPIT6vsg0LIYQQQgghGgebzYaqqri7uzd0U0QDOrpXuM1mO+26JHgVQgghhBBCnDEyTfjCVp8/fwlehRBCCCGEEEI0ehK8CiGEEEIIIYRo9CR4FUIIIYQQQgjR6EnwKoQQQgghhBCnITExkfvuu49WrVrh5uaGm5sb7dq1495772XHjh3HveaJJ55AURSuvfba455fuXIliqLw66+/Hvf8fffdV209qdVqZerUqVx00UV4eXnh4+ND+/btufPOO4mLi6tSdufOnYwfP56IiAhcXFwIDQ1l+PDhfPzxx3V4B84O2SpHCCGEEEIIIepo/vz5XHvttRgMBq6//no6d+6MTqcjLi6O2bNn8/nnn5OYmEhERETlNaqqMmPGDCIjI5k3bx5FRUV4enqedlvGjRvHn3/+yYQJE7jjjjuw2WzExcUxf/58+vbtS5s2bQCIjY1lyJAhhIeHc8cddxASEkJKSgrr1q1j6tSp3H///afdljNBglchhBBCCCGEqIOEhASuu+46IiIiWL58OU2aNKly/q233uKzzz5Dp6s64XXlypUcPnyYFStWcMkllzB79mxuvvnm02rLxo0bmT9/Pq+99hrPPPNMlXOffPIJ+fn5lc9fe+01vL292bhxIz4+PlXKZmVlnVY7ziSZNiyEEEIIIYQQdfD2229TUlLCtGnTqgWuAAaDgQceeIBmzZpVOR4TE0O7du0YMmQIw4YNIyYm5rTbkpCQAEC/fv2qndPr9fj7+1cp2759+2qBK0BQUNBpt+VMkeBVCCGEEEIIIepg/vz5tGjRgl69etX6GovFwm+//caECRMAmDBhAitWrCAjI+O02nJ0WnJMTAx2u/2kZTdv3syuXbtO655nmwSvQgghhBBCiLNGVVVKrfZG81BVtU6vo7CwkLS0NDp06FDtXH5+PtnZ2ZWPsrKyynPz588nPz+f6667DoArr7wSo9HIzJkz6/aGVujduzeDBg3i66+/JiwsjIkTJ/LZZ59x6NChamUfe+wxSktL6dKlC3379uXJJ59kyZIl2Gy202rDmSZrXoUQQgghhBBnTZnNQbspixu6GZX2vHwJbqZTD4sKCwsB8PDwqHZu8ODBbN++vfL5O++8w2OPPQZoI6Pdu3enRYsWAHh6ejJ69GhiYmJ46KGH6vAKNIqisHjxYt59911++uknZsyYwYwZM7j33nu55ppr+PLLLyunCQ8fPpy1a9fyxhtvsHjxYtauXcvbb79NYGAg33zzDVdccUWd23EmycirEEIIIYQQQpyio9mBi4uLq5378ssvWbp0KT/99FOV4/n5+SxcuJBBgwZx4MCByke/fv3YtGkT+/fvP602mc1mnn32Wfbu3UtaWhozZsygd+/e/PLLL9x3331Vyvbo0YPZs2eTl5fHhg0bePrppykqKmL8+PHs2bPntNpxpsjIqxBCCCGEEOKscTXq2fPyJQ3djEquRn2drvP29qZJkybHXTd6dA1sUlJSleOzZs3CYrHw3nvv8d5771W7LiYmhpdeegkAFxcXgCpTjv+ttLS0sszxNGnShOuuu45x48bRvn17fvnlF77//nsMhqohoMlkokePHvTo0YNWrVoxadIkZs2axQsvvFDzi28gErwKIYQQQgghzhpFUeo0TbcxGj16NN988w0bNmygZ8+eJy0fExNDhw4djhsYfvnll0yfPr0yeD2agGnfvn3HrWvfvn1V9o6tidFopFOnTsTHx5OdnU1ISEiNZbt37w5Aenr6SettCDJtWAghhBBCCCHq4IknnsDNzY1bb72VzMzMauf/nQwqJSWFf/75h2uuuYbx48dXe0yaNIkDBw6wfv16QBs57dKlCz/99FOVPVoBNm/ezLp16xg5cmTlsfj4+OMmZ8rPz2ft2rX4+voSGBgIwF9//XXcRFULFy4EoHXr1qf+ZpwF58dHHkIIIYQQQghxlrVs2ZLp06czYcIEWrduzfXXX0/nzp1RVZXExESmT5+OTqcjLCyM6dOno6pqjcmQRo0ahcFgICYmpnLa8fvvv88ll1xCly5duOWWW2jatCl79+7lq6++okmTJjz99NOV12/fvp2JEycycuRIBgwYgJ+fH6mpqfzvf/8jLS2NDz/8EL1emyJ9//33U1paytixY2nTpg1Wq5XY2Fh+/vlnIiMjmTRp0pl/8+pAUeuaG1oAWpYxb29vCgoK8PLyaujmCCGEaADSF9SNvG9CnN/Ky8tJTEwkKirqhGszzwcJCQm89957LF26lMOHD6MoChEREQwePJjJkyfTuXNnOnXqREFBAcnJyTXWM2TIEPbs2UNqamrl2tT169fz6quvsmbNGoqKiggODmbkyJG8+OKLhIaGVl6blZXFtGnT+PPPP9m/fz9HjhzB09OTiy66iHvuuYdx48ZVll20aBGzZs0iNjaWw4cPY7VaCQ8PZ+TIkTz33HMEBQXV23tzst+DU+kLJHg9TdLxCiGEkL6gbuR9E+L8diEFr6Jm9Rm8yppXIYQQQgghhBCNngSvQgghhBBCCCEaPQlehRBCCCGEEEI0ehK8CiGEEEIIIYRo9CR4FUIIIYQQQgjR6EnwKoQQQgghhBCi0ZPgVQghhBBCCCFEoyfBqxBCCCGEEEKIRk+CVyGEEEIIIYQQjZ6hoRsghBDi7Ci3OXAx6hu6GXXmVJ3kW/LJLssmpyyH7LJscstzK58XWYtoF9COIc2G0Nq3NYqiNHSThRBCCFGPJHgVQojzXEpuKQ/9vI3NyXn4uZsI93Mjwt+NCD83mvm5EeHvjq+bkSPFFrIKLWQWlpNZaCGzqJyswnKKyu14uhjwcjFqX12NeLkY8XI14G42oFcUdIqCooBOUdDptK8ApVYHJRY7JRYHpVY7xRY7pVYHVoeTYE8XQn1dCfN1JdTHlWa+bij6MjZlbiK9JJ3M0kwySzLJKMkgszSTrNIsbE7bCV/rysMr+WzbZzRxb8LgZoMZHDaYcLf2JB8pJdjDiI+LAQ93N1zMJgCKS8soKS0jOMDvjP8chBBCCHF6JHgVQojz2KJd6Tz+6w6Kyu0A5JZYyS2xsi0l/xRrUlH0xSjGPHSmPHTGPBRjLoq+BJwuqA63iofrv76647QGgXqyrsaOwWMfBu9tGDz2oujsJyztY/LB2+SDt8EHd50n7ngQGRSJTudKbNo69uZrwe+MuBnMiJuB6jBjL2mN0xKAwelCuHcgXZpF0qlJUzytTrIO5XLb1Vec4vshhBBCiLNNglchhGgAdocTAIP+zKQesNgdvLEwju9jk1AMhQS3nIfONYkozzaEunTA3dmG8pKmHM61kJxbQkGpjSAvF4I8zQR6GjC6ZWDRJ5Ln2E9meSI55RnYVMspt0NBj4fSDF9Dc4LM0TR1aUWYRyRGnYldOdvYWfAX2c4NOHWlldc4LIE4LSGoNi+cdm/0Dh+CXP3RFzjxshuwq0ZKVQM5qoEy1UCpasBa2Z1dAcpI9O7xGDz2YvDci85QjNFrR2X9qUDqEVhw5Fg7Z8T8zLLrY+r4bgshhLgQff/990yaNImNGzfSvXv3aueTkpKIiorinXfe4bHHHgNg5cqVDBkypMY6Z8yYwXXXXXfG2nyuk+BVCCHOsr3phdz54yaKy+3cP7QlN/SOwGSovyA2OaeE+6ZvZWdqAXr3ffhG/EapWgg22Jm7kZ1sBMDN4EbXyK7c2rsnoR6h7MrZzPas7cTm7KE8r7xavQoKQW5BhHqEag/PUPxc/CixlVBgKaDQWkhuWR65pXkUWgvILj9Csb2YIjWJIlsSh2wroBiMuUa8TF7klOdoFevAZHcjqCQan6I2OC0hFDhdcQsOI7HYRl6pjUMFJ3/dLkYdUQEehPuaCTA3ITpwFC1DPFFd09hdsJWMkiMcyj9CRnEOeeV5lDoKceqKURQndrup3t5/IYQQ4mQeeOABevToUe14nz59GqA15w4JXoUQoh7ZnDa2Zm5l5eGVrEldg4fRg4ltJ3JJ5CUYdAZWxGVy//StlFgdALw8fw/fxybx+CWtuaxTk9NOMrRwZzpP/rqDIks5Xk2XoXqvxKJCG782PNLtEQ4WHGRD+gY2ZW6i0FrI6tTVrE5dXa0eL5MXnQI70TmwM00N4ViyFQwWV8rLrBTllVGUUkp2SSnJpblMumY0bTtGArB+625mrl5GEBCNSrmhmEJTNkWmbArNRyj3KKDUUUJOeQ4uOhd8C5vR3NGe5ubW+AZ44hXlgZeHG26uLrRtGUmgnw8puWVsSsxiz86DFB9KxR7RnMAQf5p6u9DE25UmPi409XbFx81Yw/sXzkB6H/f9yiuxsPFQGiaDelrvuxBCCHEqBgwYwPjx4xu6GeccCV6FEOI0FVgKWJO6hpWHV7I6dTVF1qIq53es2sHHWz+mpfkyFsSG4XQa6Rvtz8gOIXy04gCHcku5f8ZWvll1kKdHtaV3c/9TbkNGQTnvLdnHrM2HUYw5BLSahUWfBMDENhN5pPsjmPVm+jTtw/Vtrycnv4BV8evYnLWJPYU7ybfl4eMIwqs0EHOhLw9dcyNtoiMBWLdlFz+vXF7tnmZLOV4lxVgsx6YTu5hNuLm6YDIaMBmNeLiH4unRFU93N7w83GjfujkOtzIySjJo69sWV6Mbel3VUWfV4cCalET52liOxO2FvXF0iIujbU7FSK3RiHuP7ngMGYrHkCGYmnqf8vt1lK+7mRFto+p8vRBCCCHOHglehRDnFJvTxvr09Zj1Zlr6tMTHxadB2mF32vk75W9+2f8L69PX41Adled8zb4MCBvAoLBBJBUm8dOen0gtTiW1+Etco91p6zaKj8c8gL+bD+O6hfHNqkS+/PsA29PTmPj9fro1N3BDr5aMbt0Dg+7E/0wXlNn48u8EvluTSLnNicFrOx6hc7BQjpvOnSu9ricstSXf7pvPZRf3Izw0BIB9CYf4e2Ec4EE4fQj/T73FJcemDYcE+tO1Y2s83FzxdDHhG78P19hVKBs3gN2O8cBWCu6+G6+Rl9K5XUs6t2t50vevmWezKs8dBQUUr1xJ4ZKllMTGopaVVb9Ip0Pv54cjO5uS2LWUxK4l87XXMLdsiceQIXgOHYJL586yRY4QQohGr6ioiOzs7GrH/f39pR87AQlehRDnBIfTwYLEBXy+7XMOFx+uPO7v4k8L3xa08Dn2aB/QHqPOeEbakVOWw+z42fyy/xcySjIqj0d7RzOo2SAGNxtMp4BO6HXafqoFpTb+2tCa9JylmPz+QWfKY591FqPmLOCioIvIs+SRXZaNqUUuOlXLshsHPLcBnl/nQphrR4ZE9Gd826FEekegKApl5RYOpWfzw9ok5iUcplSXhM7nMH5eKdhMCTgAn/JgOhwZQpbDSRb7AMg4klsZvAYH+BHZrAl+Pl74eXvi4+2Jl4e79vB0w9PDvfK1RTZrQkhxPvlz5lA4fwGO3NzKc4rRiC0hgbTHHiP7008JuHsyXqNGoRhO3r3Yc3MpWraMoiVLKVm3DuzHsgwrrq64tG6NS7u2mNu0waVtW8wtW6JzccFyMJHiv/6i+K+/KN2yBUt8PJb4eHK++gpTVBS+EybgPfZK9J6etfqZ2vPyUMvLMTZpUqvyQgghTpOqgq305OXOFqMbnOWA8dZbbz3u8fT0dEJCQs5qW84lErwKIRo1p+pkcdJiPtv2GUmFSYA2sulmdCO1OJWc8hxy0nNYn76+8poIrwhe7PMi3UOqZ/6rC1VV2Zm9kxlxM1ictLhyr1Efsw9XtbyKcS3HEe51bOzS6VQpKLORklvKAzO2cjC7BDdTf96/7B7srlv5dte3xOfFsyZtTbV7eRq9cNg8KHHkg76UFMtGfti/kR/2f4DB7oOvJRyHxY1sUy64pKELL8a14lobgAqd7H3p5zYc7/aeeHq4V07ZjWh2LDiLjgjlwVuvqfH12lJTKdy1i/Lduyn+ZxWWffsqz+sDAvC+7DK8x16JsWlT8n76iZzv/4c1MZG0J54k+9PP8J88Ge/LLwO9Hkd+PrbUNGxpqdjS0rClpmGJi6N082ZwOivrNbdsgefwEXgOH4a5VSsUvf647TM3j8LcPAr/227FkZ9P8apVWjC78m+siYlkvv46WR9+iPcVl+M7cSIurVpVud5ZXk7Zli2UrF1LyZpYyvfuxfuqsTR97bUT/BYIIYSoN7ZSeL1pQ7fimGfSwOR+8nL1aMqUKQwYMKDacT8/2Xf8RCR4FUI0SqqqsvzQcj7d9ikH8g8A4G32ZlL7SUxoMwE3oxultlIS8hM4kH+g8rErexfJhclMWjyJa1tfy0NdH8LD5FGn+yfkJ7D80HKWHVpGXG5c5blWPu3p5jMaV1tXEvZZeGxTKoVlSRSV2ykst1FssaP+K/9PU28Xvrm5B+2aegFNuSTiUpYn/MXB7CSMdlcUixG1VI+tSKWooJxh/XvQPDqCGdvX8fuepaRYdqJzO4TdkM8RQz64w9FVogo6Wvq2pGNAB9r5t6NHSA+ivKuv4VRVFbW0VBtltFhQLRacFguqxYpqtWA/kk357t2VD0dB1fS+itGIx8UX433lGDz6968yshpw99343ngjeTHTyZ02DWtyMulPP03W229r9yit+dN1l/bt8Rw+HM8RIzA3P/W1p3ofH7wvvxzvyy/HUVxCwR9zyZs+HeuBBPJn/kz+zJ9x69EDn/HjsGVlUbp2LaWbt6Baqm77Y886UsMdhBBCiPrXsWNHhg0b1tDNOOdI8CqEaBRUVeVw0WF25+xmV/YuYtNjic+LB8DT6MlN7W/ihrY3VAlE3YxudAzsSMfAjpXHCq2FvL/pfX6L/42f9/3MypSVTOkzhYFhA0/aBqfqZMeRHaw4tILlh5ZzqOhQ5TkdRtxs3SjI7MnmvU3ZDEDSCevrkH+ISH8XhvVqj7uzFPAC4EDSYRbN2F1RqqTadTn74mlnVri7z2AmtO/OvOVrSLPr2Fh4gP0lO0FfTv/wztzSbQCdgtrhYnA57v1Vm43SzZspWr6C4hUrsKWmnvQ9qGQ04tKqFS7t2+PauROew4ah9645MZLew4OAu+7E9/rryZ85g5xvv8ORl3fsfGAAxqZNMYWGYmzaFGNYM9z798cUFlr7Np2E3sMdv4kT8Z0wgdL1G8ibPp2i5csp3biR0o0bq5Q1BAXh3rcv7n374Na7N8agoHprhxBCiJMwummjnY2F0a2hWyBqSYJXIcRpU1X1pMkFHE4HxbZiim3FFFmLKLIWkVeeR1xuHLtzdrM7ZzcFlqqjfe5Gd25oewM3tb8JL5NXrdriZfLixb4vMjJqJC/Gvsjh4sPcu/xeRkaN5KmeT+Hnok3HsTgspBSmkFyYTFJhEgcLDrImdc2xvUcBk86Ej9KOQynR2IvbUeDQphQZdApNPfW0DvGiW/MgwnxdsZSWsOyv1TisFkx2CwO2r6HT/p0AxG9uyb577iMiTFvD4u/rjUGvx8/HCx9vD3y8PPHx8sDXbMT7z/nw4tMk2u249eqF/x13cMOVIyre3+Enfa8dRUWUrFpF0Yq/KP7nH5yFhdULGY3oTCYUsxnFbEZnMqHz8sKlbVtc2rfHpX17zK1aojOd+t6neg93/G+/Hd+JEynfuxe9nx/Gpk3Rmc2nXFddKYqCe+9euPfuhS0jg7yff6Z4+QqMYWG49+mDe7++mKKitPdRVWHzNPAcC66+Z62NQghxQVOUsz5NV5wfJHgVQpwSp+okuTCZHUd2aI/sHcTnxaOiolf0GHQGDIoBvU77XoeOEnsJJbbqI4z/ZdQZae3bmvYB7Wnv354hzYbUOZtwrya9mD1mNp9t+4wf9vzAn4l/sjZtLe3825FcmExacRoq1ff2dDe4M6jZIPo1GUzMChPrE8tRUIk2FOJrzMBLZ8FDsaGzwojInowcFA1AZnYuqxcX4l2Yz8WxywjI0zIIqkDLpHjUl58lrzwfn/HjCfD15q1n70VXEYSqqkrxihVkPPca9rR0rSGKQun69ZSuX4+5XVsC7rgDzxEjqq0DVR0Oynfv1rLvrl1L6ZYtYLNVntf7+eExeDCeQ4fg1rMnOnf3GteS1iedmxtu3brVb6VxC2DXbxDSETpdB14nT7BkDAkh6MEHCXrwweonnU5Y8hys+xS2zYBJC0F/ZhJ9CSGEEOL0SfAqxDkoNi2WFYdW0K9pP/qF9sOkP/URstpyOB3szN7J2rS1bM/ezs4jOym0Hmc0Dy2wPZrMqCZmvRlPk2flo4VPC9r7t6d9QHta+bTCWI/Bg6vBlXs73s/gJkN4bfOrxOfFE5sWW3nehBlPpy9uNm/M5R54lgVyTc8x9LqoB7d+v5GdqQXocdLdlElTfQk6RcHL0x0f7wB8vTwJCTq2H6uftycPRPhimfo/1LIy9D4+NHnjdQz+/qS/8CKWvXvJmPICBb/PJeTFFyqTCFkPHybz1dcoXrkSAEPTJoQ8+ywubduS+7//kffLLCx79pL68CMYw8Pxv/VW3Hp0p3TDBi1gXb++2uiqKToaz6FD8BgyFNfOnc5KsHpG5R+ChU/A/j+157t+g+UvQ/RQ6DIRWo8G4/GnTtfIYYc/7oft07Xn7cdK4CqEEKJOvvvuOxYtWlTt+JgxY2q8ZtWqVZSXl1c73qlTJzp16lSv7TufNPrgddasWXz66ads374dq9VKixYtuP7663n44YcxGk/tD42SkhI++ugjfvvtN/bv309ZWRn+/v50796dO++8kyuuuOIMvQoh6s/ipMU89c9T2FU7P+/7GQ+jB0PDh3JJ5CX0adKnXoK/3PJc1qSuYVXqKmLTYqtN5zXrzbT3b0/HgI50CuxEO/92mPVmHKoDu9OO3Wmv/N6hOnAzuFUGq2ci0HY4HKRn5ZCacYTUjCPk5BeSX1BEXkERZeUWhg/oyc+jf2ZR0iJyiwr5a9Eu3GxemJyuKBybgqsAh3OtvPX5GlJyy/BzM/Jov0D6tOyGn4+2lYxOp6t+/+ISsl56ifJ58wBw69mTpu+8jTE4GICoWb+Q+9NPHPnoY8q2bCHxqnH433IzOnd3sr/4UkseZDTiP2kSAZPvQuemrb0Jfvpp/CdPJi9mOnk//ojt0CEyXnyx2v11np649+qFW7gJj+AyTF0GQkQ/8GlWrWy9sFshYwekrIeyPOh6E/j8d6fYeuCwwdpP4e+3tMyUOiN0vRGy9sKhtXBgmfZw8YYO46HL9RDa9eTbHdjK4ddbYd8CUPQw5lPoMqH+298ISB8qhBBn3ueff37c44MHD67xmo8++ui4x1944QUJXk9AUVW1+ry5RuKhhx5i6tSpGAwGhg4dioeHBytWrCA/P5/+/fuzZMkSXF1dT14RkJOTw8CBA9mzZw8eHh707dsXHx8fDhw4wJYtWwB44IEHmDp16im1sbCwEG9vbwoKCvDyqt2aPCHqal7CPJ5b8xzNDzsYlRHMvG5OEjm2wbWnyZOhzYYyqNkg3AxuKCgoSsWj4j8Aq9OKzWHD6rRidVixOW1YHVayy7KJTYtlV/auKlNqvUxe9G3al67BXekU2IlWvq3O2D6qAA6nyoGsYnQK+Lqb8HE1YtDrtOm1xSVkLFqMobAQL1cX8orKSMnMY/OuA9gcCjZVx94m0ZT6+eCnK8dLsdKveweuuexiACxWG8tWbcTD3RV3N1fc3Vy0r64uJORZufOnreSX2ojwd+P7ST2JCjjxmpyy7dtJffwJbIcOgV5P4P334X/HHccd7bSlp5Px2msUL1te5bhb796ETHkec/PmNd7HWVpK/q+/kjPte+zZ2bhddBHuffvg3qcPLi2jUP58BHbOqnqRTwREDoDI/tqjrsFsSTakbNCC1ZQNkLYF7P/6tNjoBoOegN73gqEWH05kx8PBleARDP4twK959ZHT5LUw/2E4sld7HtEPRr8PQW205zkJsH2GNt238Ni+v4R2h4GPQatLjx/ElhfCzImQtAr0Zrj6e2gz6lTejeNqjH2B9KFCiIZWXl5OYmIiUVFRuLic4gwZcd442e/BqfQFjTZ4/f333xk7diweHh78/fffdO3aFYDs7GyGDh3Kzp07efTRR3n33XdrVd+DDz7IRx99RLdu3ViyZEmVPZQWLlzImDFjsNvtrF27lt69e9e6ndLxipM5VHiIzNJMugR2Oa1R0Tnxc3gh9gXCM528HgNGiwNjeDOKX32ARexiSdISjpTV33Yfbf3a0j+0PwPCBtAxoCMG3ZmdqJFVWM4/8dn8vf8Iq+KPkF9adfqxSacSVJbHA5t+puORhBPWZVP0fNnxChZE9cXNbKBzmA/dInzpGuGDv7sZp6pWPLQ9WZ0qJOWU8OIfu7HYnXRu5sO3N3cnwOP4SYZsaWkULlxIwYKFWPZqwZWhaRNC330Xt4p/q06kaMUKMl99DdVuJ+jJJ/AaNeqkCa+OUlUVHI5jW9XkHoSfb4TMXdooYufr4Mg+SNsKqqPqxV5hENwOgtpCUDsIbAOBrcFYEcA4nZCXqNWVsevY14JDVOPqC816QVk+pKzTjgW2gdHvaYFy9YZD8hqI/eTY9N9KCng3g4AWWjBblg87f9FOufnDiFeh84TjB6NOJyT+Ddumw94/jgXVwR1gwCPQ7krQVXyQUJINP42D9G1g8oQJMyCq+h57ddHY+gLpQ4UQjYEErwIukOC1Z8+ebNy4kVdffZVnn322yrnVq1czYMAAzGYzmZmZeJ9g+4ajOnbsyK5du/jll1+4+uqrq50fMWIES5cu5f333+fhhx+udTul4xU1OZB3gC93fMnipMWoqHiaPBkeMZxLIy+lZ0hP9Lrar0P8Zd8vvLLuFXyLVD6IMeGWVwY6HTid6Dw8CH3/PdwG9Gdr1lYWJy1m+5HtOFUnqqriRPsKVH416o2YdCaMeiNGnRGT3oRJZ8LN6Eb34O70C+1HkNuxrUNUVcV+5Aj2rCPYs7KOPY5oz50lJXheeim+11yNUssMtU6nyvrEHBbvTOXv/UdIzK2676a7SY/VasWG9j51z9zLo5tn4mMtoVxvZHtAC+w6Papej6uLCVdXMx5uZvyKc/HYvRWAfyK6837HsVgMtc90e3GbID6eeBFupqrBuj03l8JFiyhcsJCyzZuPnTAY8Bo1kpBnnz3hVjL/pTqdoKrVR2hVFXb8DAWHod0YCGhZcyX7l8Ds26G8ANyDtFHEyH7aOUsRHFqvjTAmr4HULdWDWQBFB75RWjB6JA6sxce/V0BraNZTC1jDe2tB5tFsvdtnaomPSitmAXSeAMNfAY9AbervnrkQ+7EWNGo31QJcWylkH4D/TEuv1PVmGPYiuNVyw/biLFj7CWz89tjr8G8B/R+BiL4QczXkxGsB8Q2/QdOLaldvLTS2vkD6UCFEYyDBq4D6DV4b5ZrX1NRUNlbsyTdx4sRq5/v370+zZs1ISUlh4cKFTJhw8rVKtf0fJiAg4NQaKxpEobWQ3+N/p9ReSqRXJBFeEUR4ReB2nH26CiwF7M3dy56cPezO3k1cbhx+Ln7c3P5mhoYPRadUX8N4Ovbl7uPLHV+yNHlp5TEfsw/5lnxmx89mdvxs/F38GRE5glFRo+gU2OmEbfhxz4+8vfFtzFaVt+b74JaXgyk6mmaffUr6s89RumkTKZPvJuiJx+l68810Cz69DK/OkhIs++PJ27cSy759WPbvp3z//uNvufIvpZs2kfv99wQ++ABeo0ejHGdtqKqqrN60i4W7MliSWE6O9d9lVILNTq7p15qBrQLp0syH6XMWYUAlcsVS/NYuBsAa0Zz8m66jZ4CFprp8vOxH0BWlQ1EyFKajluSQ26wjWUtTGZi8iYH6fPbc9QxrrW5sT8mn1OpApwOdoqBXFBSl4nudwtA2QTwyvBUG/bF2WVNSyHzrLYr/WgmOiuBPUXDr0QOv0aPxHDEcg++pb7FyvPcHSzHMe0BLSASw4hVo0hk6Xg3trwLvij1RnU74521Y+SagQlgPuOYH8Gp6rC6zJ7Qcpj1AC2YzdmrrRSsfe6AsF3L/NZKtN2sjsyEdILhjxdf2NW8joyjaetFWl2hJlDZ/r03n3bdQW4O6dx4UpGhlDS5agqXe92qjrKAFvyXZkHPg2KMsT7s2vNepvakeQTD8Zej3EGz4CtZ9rtU39x60Fc2qNvp84xwIbHVqdZ9DpA8VQghxvmqUI6/z58/n8ssvx8/Pj5ycnOOWueqqq5gzZw6PP/44b7/99knrnDJlCq+88soJpzwFBAQQFxdXq0+hj5JPjc+uQmshP+35iZ/2/ESRraja+SDXICK8Iwj3DKfEVsLunN2kFKXUWF8Lnxbc0fEOLom85KQjoaqqYlftGBTDcad47s3Zy5c7vmT5oWPrGYdHDOeuTnfRwqcFmzM382fSnyxNXlolAZK/iz8tfFvQ3Ls5Ud5RRHlH0dy7OYGugXy36zs+3PIhilPlo6VhBG9JRu/nR+QvP2MKC0O1Wkl/+WUKftWCHe/x42gyZUqNo5+qw4EtNbVy5NRWOXqqjaDa0tKwpdTwful0GAICMAQFVTwCMQQGYggKwllSQs633+I4UjHyFhlF4bhryAhvjtFoZNTQPpRa7czckMLHf2wmPOcwHbIP0qIglTJ3d8rCwgjo3Iaorh0Z2PvYaJg1OZnURx6lfPduAHyvn0hQ50J0W7464c8KoMTagtSVJhy5+eg8PGj61pt4XnzxSa/793uV++OPHJn6EWpZGQAu7dvjddlleI0aWZmMqd4c2adN/83ep03/De8Nh9b9a7RU0dZ9dhwH+xfD/oqshj1uh0veqN1a0/9SVW208sheLWAMbKuNVOpP43PNw5tg/kNaoHyUWwD0vBN63AbuZzG4sRTBpu+0qcolWeDfUgtcz0Aiq8bUF0gfKoRoLGTkVcAFMPKamJgIQHh4zdkrmzVrVqXsyTz55JNs2LCBxYsXExERQb9+/SqTTWzevJl+/frx7bffnrTTtVgsWCzHpjcWnmQ0StSPAksBP+3VgtZimzYd8OgWK4eKDpFcmExueS5ZZVlklWWxMWNjletDPUJp79+edv7taOvXls1Zm5m+dzoH8g/w5Kon+Wz7Z9zW4TYui76sMhFRXnkeO7N3ao8j2tejW8QYdAaMOuOxr4qBrLIsABQULom8hDs73UlL32NTPns26UnPJj15ptczrEtbx5+Jf7IiZQU55TnkpOewPn19lTa7GdwotZcC8M6ujgRv2YZiMhH26SeYwsK0e5lMNHnlFVxatiTzrbcp+PU3rElJhH30ETp3dyz74ynfu4fyvXsp37MHy779qMdJy/5fhsBAzK1aYW7dmryQcJZZPFlYYKYMPaqqpXJSVXAWqyiFDpqWZZLZ/25G7F/DpfvX4JqUiNd7b1EY2JQdXfuSt/0gqf/E0iIjgR/yUzCozqo3TNgIf4Pe25tDnTrh2rEjOk9Psj/+GGdpKXpvb5q8/Dye2f+DLdoILBH9wCtU2+vTs+Lh1RSsJTBnMu7FB4ga5kPqrg6U7TnI4Xu1REqBDz5wbL1oDcr37yf9+ecp375D+1n07EnI889hblnDFF6nA0qOQGEqFKZVPCq+dw/UMuGeKAvuzl/hjwfAVqK9jvHTIKKPNiK553ft/KG1kLxae4A2QnrZB3DR9Sf9edZIUcAzWHvUl7DucMdK2PStNvrafix0uvbYutqzyewJ/R7UAufEVdpIrkvtA6tzlfShQgghzleNMngtKtJG1Nzda87y6eHhAdS+43N3d2fevHk888wzvPfeeyxevLjynL+/P8OGDSM0NPSk9bzxxhu89NJLtbqnOH01Ba2TO09meMTwKtNtCywFHCo8RFJhEsmFybgYXGjn3452fu3wcfGpUm/f0L7c3P5mZuydwY97fyS5MJkpsVP4YvsXdArsdNIR26PbwfybTtFxaeSl3NnpTqJ9oiuPq6oKTmfl2kajzsiAsAEMCBtAub2cuNw4kgqTOFhwkMSCRJIKkkgpSqkMXN/IHkz4gmUANH3zDdwuqrpOT1EU/G6+GVNUFKmPPErZps0kjLgEZ3k52Ku2EUAxmzGEBGMMPDp6GnRsNDU4CHOLFhSZPfhjexq/bj7Mzj0FgApUD3o7Kwd4zfgdHXRJ7DRG8kire5gRMYir9//FmIOrCTuSRtjiXwH4dxojfXAw7j164NqxA7a0NMq276B8zx4cBQWUrFpFyapVlWXdunen6ZRHMC6/T0sgZHCBsV9oQVFN7lwJP9+AMXUTER1jyQy9hLyl28n5+msKFszHvXcf3Hv1xK1XL4whIcd+VlYr2V9+RfZXX4HNhs7Dg6AnHsdn/Pjq03wtRdra1M3fa1NwndXf60rrPgO/aC2I63S1ll0XtC1nljyrTXEFiBoI477Vpr+CNkrZ43btkZ+iTSfe9Zt2rys/h6Zdar5nQ9IboNdd2qMxMLpCqxEN3YqzRvpQIYQQ56tGGbyeCenp6YwZM4YdO3bw6quvMmHCBIKCgtizZw/PPfccL730Er///jurVq3C09OzxnqefvppHnnkkcrnhYWFlZ9gi/r1Z+KfvLLuFYqs2h9iLX1bMrnTZIZFDDvuGlFvszcdAzvSMbBjrer3MnlxV+e7uLHdjfyy7xe+3/09aSVppJWkVZaJ9IqkU2AnOgR0oFNAJ8I8w7A77dictsqvR7/3c/EjxD2kyj3s2dkcfuBBynfvxr13bzyGDMFjyODKKacuBhe6BHWhS1CXKtdZHVZSilLQb9hB2dvPA2hrSUfVvKWHx8CBRP48k5S779G2bQH0Pj64tGuLuW1bXNq1w6VtO0wR4cfdxqWwzMq6pDx++zORZXuysDq00VEFlRBdCc0MRZhxoABulPJCqzhCDvyMUrGlTkddEn+6PMvi8NuY3uYGnjs8jIs3zGPwoc0UePlj7tqNVsP649GzJ8bQptWmXqtWK+X79lO2cwfl23dgSUrEc+jF+I+8COWXa6E4U0tKNGEmhJ1kXa9XE7hlASx4BGVbDCH+f+J63cVkzDuEPS2dgtmzKZg9GwBjRDjuPXvh0qEDeT/9iCX+gPZ+Dh1KyAtTqk8PztoLG7/RkhT9O7mRojs2+uvVVBsV9gzRps/una+tK135uvYI6wEdxmmB6OGKWQIDHoUhzx7LjPtfPs2g/0PaQ4izTPpQIYQQjUGjDF6PdnwlJSU1liku1v5orO0amZtvvpmNGzfy9ttv8/jjj1ce79GjB/Pnz6dbt25s376dd99994SfCpvNZszm2mcuFaeu2FrM6+tfZ97BeYA20npPl3u4OPziek+uBOBmdOOWDrdwXZvrmH9wPrnluXTw70D7gPZ4m+s+xdCSkEDKnXdhS00FoPjvvyn++294UVs7eTSQdWnXDux2HMXFOIuKcBQW4SwuIuDIETJeeh0cDrzHjMF/8uSqN3A6tYzD/2KOjqb573Mo370bY2gohiZNqgWJFquNuKQMNsZnsuNwPgeyy0gtdlBg1wHHyjZ1VwiwZhGmL8LTpCck0I+wkEAucm4lKu4z9Acq1tJ1ug763gfLX8EQv5jRmV8wOmwLPPA5Nt+x5JVY6expPulWMIrJhGvHDrh27ABHk8zsmQs/XA72MghqDxN/rv16RaMLjPkUQjrB4mfwVpfjOakLpS2epXRXAiXrN1C+eze25EPkJx+CWdoeqXo/P0Kefw7PSy891ma7FeLma1lsj07bBW0NZY/boe1l4BFS81pRSzHELdBGag/+pQWsR4NWF28Y+xW0vrR2r0uIk5A+VAghxPmqUQavkZGRAKTUlDjmX+eOlj2R1NRUli7VMr8eL6ui0Whk/Pjx7Ny5k2XLlsmUpga0NWsrT696mtTiVHSKjjs73cmdne6sXId6JrkYXBjfany91FWyfgOH778fZ2EhxvBwQp5/nvLduyn+6y/KduygfPduynfvJvuTT1CMRlSbrca63Lp3J+SVl48FUg4bzL0P4pfA+G8hemiV8jo3N9x69AC0KcuqqlZeO+XHFfy8uwhLtf/1tdG+QHcjl3UJZXy3MLwVC4XFJYQE+uPj7YnuyD5Y8Oix4C2gtban59F9Mif+DNti4M+n4PAG+KI/xmEvEtTzzuprPZ1OKM3RRlMdFu256tCmwzod2veH1sPfb2rlW46A8d9paxhPhaJA78kQ1AZm3YIuexse+Xfj0fN2mPwpDtWF0k2bKF2/gbLt2zG3akXgQw8eyx5cXqBNC173ORSlV9SphzajtKA1alDN61j/zewBna/VHkWZFdN/fwWjG4z5BHwjT+11CXEC0ocKIYQ4XzXK4PWiijV9OTk5lZmp/mvTpk0AlRuvn8ihiimUUPOnzEeTTOTm5p5ye8XpszltfLH9C77Z+Q1O1UmoRyhvDHiDi4Lqbx/Gs6Vg7lzSnnsebDZcu3Qh7LNPMfj54TGgPwGT78KenU3x339T9NdflKyJrcxkC1rgqfP0RO/lic7TC1PzKIIefRTd0ezBDhv8eivs/UN7/vNNcNtibSsToKS0jLTMbFLSskg6nE5iShr33Tye4EA/NiTmErOnDAcGFFT8TCrNvAy0DvagczM/erUKoXkTv+ojpOUFsPR5LYBz2sHgCoOegD73Vc1wqyhw0Q1aQPfHfXBwJSx6UmtrYBstUC1Kh6IM7fsTrRH9t16TYcRrp5cBt/lguOMvmH2HNuIZ+zFs/BZ9j9vx7PcgnkOGVC1flKG93k3fgaViTaB7EHS7RXt4n3xtX408g6HPPdpDiDNA+lAhhBDnq0YZvIaFhdGjRw82btzI9OnTj7vBekpKCmazmVEnWAN41L+TSKxfv57hw4dXK7Nu3TqA43by4sxwOB3YnDZSi1N5fs3z7MzWtta4IvoKnu75NB4mjwZu4alRVZXszz4j++NPAPC89FKavvkGuv+kBDcEBOAzbhw+48bhtFiwHzmCzt0dvafnibPg/jtw1Zu0kc/Mndh+uIqfg5/mQLaDgqLiapclpqThNLlyT8xmHCqMaBvI1AndcDWdeGsgnA7Y8j9Y8RqUVmyB02okjHwLfCNqvs6nGdwwR8s2u3QKJK/RHtUo4OavJdNRdNpaT51BG9nU6cFghq43Q7ebT9zO2vKLgtuWQvxSbd1p2laI/UibCtzzDuj7gLbnaexH2npWh1W7LqA19HsAOl5Tt+1ohDjLpA8VQghxvmqUwSvAM888w9ixY3nzzTcZOXJk5afDOTk53HOPNmJx3333VUnLP2fOHJ5++mlCQ0NZvvzYXpvh4eGVHfmDDz7IwoULq0yV+umnn/j555+B42/oLuouuTCZBQcXsOzQMnLLcrE6rdiddqwOK47K/Ss1niZPpvSewqVRZ2btn6OoiNLNmynbtAmduzveY8dWyTR7MuX79mNLPYzOzR2duzs6D+2r3t0djEYyprxAwe+/A+B/x+0EPvxw9Qy1/6Ezmyu3vTlh220WLNNvxC1xMQ7FQMbgDwjtPhq+GY4xJ57BB99kJ7eCYsbPx4vQkEAiw5oQFd6EgAB/rv92I9nFVto28eLDCV1PHrge/BsWP6Nl9wUIaAWXvA4tq//RevwXptMCwuihsOUHLdj2DK7Y0iZEWx/qEQT6Mz8dvApF0bLOthyu7ZW68g1I3wZrPoT1X4K9HCoSUNGsF/R7CFpdWm1tsRCNnfShQgghzkeNNni98soreeCBB/joo4/o3bs3F198Me7u7ixfvpz8/Hz69evHK6+8UuWagoIC9u3bR/lx9rH87rvvGDJkCHv37qVt27b07t2bgIAA9u7dy+7duwG44YYbuP7609gzUQCQU5bDoqRFLDi4oHI09WT6NOnDy/1erpat93Q4S0oo3bKF0vXrK5Pz4Dy2v+iRTz7F8+KL8b3+etx69jhuQiF7bi6F8+eTP3sOlri4k99UrydkyhR8r73mtNputdlJSknjUFomiUnJ9Dr4IZ2cO7GjZ5p6LSGWSEJdfeH6WTi/uZiw0nSmNFuDfuIMXFzdKutRVZVHZ21nx+ECfN2MfHVjN9xMJ/jfPidBGy2Nm689d/GBwU9Dj9vqFmj6R8PwRrj+TVG0BEmtLoH9i+Cv1yFD29OVViO1jL7hvRu0iUKcDulDhRBCnI8abfAKMHXqVPr168enn35KbGwsNpuN6OhonnrqKR5++GFMptpP4evQoQO7du3igw8+4M8//2Tjxo1YLBZ8fX255JJLuPXWW7nmmtMLOC5kqqqyJHkJcw/MJTYttnJUVa/o6d20N6OjRtPKtxUmvQmjzohJb8KkM2HUGyu/Hrdeux17Ti7OwgIcBRWP/GPfO0tKcJaW4iwrRS0tw1lW8SgpwZqcDI6qo7vatig9sSYlU7pxI0VLllC0ZAnmli3wnTgRr8uvQGc2UbxqFfmzZ1O88u/KvVIVoxFz69Y4y8twlpTiLC7GWVJSGRDrvLwIfe9dPAYMqPX7ZrPbScvMJj0zGx9vT9pEa9NxC4tL+PzHOehUBzfyK53YjR09y0IfIrrdaNq2rJi26xeFbsLP8L/LcE9ZCX+9ACPfrkwiNG1NErO3pKLXKXwysSvN/NyO35DUzbDxOy0brtOmTd3tcZsWuLr51fr1nHMUBVqP1EZXUzeDq68WcAtxHpA+VAghzp7du3fzxhtv8Ndff5GdnY2/vz9DhgzhmWeeoX379pXlvv/+eyZNmlT53Gw2Ex4ezogRI3j++ecJDg4mMjKS5OTkk95z2rRp3HLLLWfi5TRaiqqqakM34lxWWFiIt7c3BQUFtd5y4HyjqirvbnqXH/b8UHmsg38HLou+jEsiLyHANaBO9VoSE0m57XZsaWknL1wDY2gobr164d6rJ269elWZJly+bz95M6ZT8Mc81NJSAHTu7ihmM45/JR1x6dAB77FX4j16NHofnyr1q6qKWhEs67y80J1kCwiL1UbS4XQSklI5eCiV5MMZ2CsC7C7tW3LzeG39mVNV+eCLH7mq9Eeiijag6oyo1/yArk0N69N2/w6zKtaGXvIG9LmH2APZ3PjdBhxOlecva8dt/f+zFs1aqmW93fSttv7zqOih2hThoLYnfC1CiGOkL6gbed+EOL+Vl5dXJo5z+U8OkPPJ7NmzmTBhAn5+ftx2221ERUWRlJTEt99+S05ODjNnzmTs2LHAseD15ZdfJioqivLyclavXs2PP/5IREQEu3btYsmSJZVbmgEsXLiQGTNm8MEHHxAQcOzv6r59+9K8efOz/npP1cl+D06lL2jUI6/i3PDVjq8qA9db2t/CVS2vIsr79JJ2OMvLSX3oYS1w1enQe3mh9/ZG5+ON3tsbvbcPei8vdJ4e6Fzd0Lm6onNzRefmhuLqis7VDWNYGKawmrPCurRuRZMXXyTo0UcpmPM7edOnY01KgpIS9P7+eF9xBd5jr8SlVasa61AUBcXNjXSrwpGsMjqHmapMP3Y6neh0OrCWYi/J4ZPPvkFvK8aVcrwopyfl+BjtBLo5Cc5ZBT99B6U56EqzebQkB2wloDehXPMjyon2AW1/JeS/rE35XfwM2YZg7v3TE4dT5aquodzaL1Irp6qQc0DLorstRsskDNqa1HZXaqOtzXrVbvsXIYQQQogLXEJCAjfeeCPNmzfnn3/+ITAwsPLcgw8+yIABA7jxxhvZsWNHlUBz5MiRdO/eHYDbb78df39/3n//febOnVttW7KMjAxmzJjBlVdeWastzs5nEryK0xKzN4ZPtmnZdZ/s8SQ3tLuhXurNfO01LPv2offzI2rOHIzBQfVS7/HoPT3xu+lGfG+4nrLNm3Farbj37IlirN0azzUHsrnrx80UW+y0DfHksjZehOmLSTyUigE797kvhLj5GIBHj1eBDSioePyXyRPGfaOtzzyZvg9AbiJsnobPgjtYprphdnXiHg/Kq3Zta5r/JMnCJwK636ptceNetxFyIYQQQogL1TvvvENpaSlfffVVlcAVICAggC+//JJBgwbx9ttv88UXX9RYz9ChQ3n//fdJTEw8000+p0nwKursj4Q/eHPDmwDc0/meegtc83//nfxZv4KiEPruO2c0cP03RafDrUePU7rm962pPDZrO3anNvt+b0YRezOKcMFOC0MJrxm+AvZW3ECPavYCV28UF28we4GLt5YUyd1f2zbmvw/PJmCqYZ1qtRegUHTxG8Tv3kvX8nX4K0Va4lxbtRcKLS/RRlmjL5ZMukIIIYQQdTRv3jwiIyMZUEPOk4EDBxIZGcmCBQtOWE9CQgIA/v7+9d7G84kEr6JOlicv5/k1zwNwQ9sbmNx5cr3Ua4mPJ+OllwEIuPde3Pv2rZd664vVZiP5cAbREaF8vSqRN/7UMhCH6ovoaMzmkN2LRKcPZU49NzKXzuzFppg4dOk0jNFDcDUbcDXpcTHoMOjrN2hMzS/jtu83Epd/P+2MV/PW2PZ0DA+o2D/VqGUL1hnA6Fb7gFgIIYQQop6pqkqZvayhm1HJ1eB63F0nTqagoIC0tDTGjBlzwnKdOnXijz/+oKioqMq12dnZlJeXs2bNGl5++WVcXV257LLLTrkdFxIJXsUpW5u2lsf/eRyn6mRM9Bge7/F4nf6H/y9nSQmHH3oYtawM9759CLi7fgLi02G320k6nEF8YgrxiSkcSs3E7nBibtefn7dkAHB5G296eOlpFdmBFpFheHh6kDzzcVonrMShKtxrvZclcxRgZZW6TXodLkYdob5uPHhxSy5pH1zn93HH4Xxu+98mjhRZCPR04a2br6ZjmPfJLxRCCCGEOMvK7GX0mt6roZtRaf3E9bgZT/2D/aPBqKen5wnLHT1fWFhYeWzYsGFVykRERBATE0NoaM35WoQEr+IUbcvaxoN/PYjNaWNY+DBe7PsiOuX0RxBVVSX9xZewJiRgCAyk6TvvoOj19dDiqtLyy3h2zk4UReGZUW1oEXT8f2ziE1NYtnojiYfSsNmPrRN1qAo71FCSKgLX50a35fYB/8nytvpDWid8B0Bi3zdxpHfDPyWfMpuDMpuDo/m9rQ4nVoeTwvRCJv+0mQEtA3jh8nY1tqkmi3Zl8NDPWym3OWkT4sm3t/Qg1Mf1lOoQQgghhBCn5mhQ+u8R1eM5XpD76aef0qpVKwwGA8HBwbRu3VpL8ilOSIJXUSvZZdnMPTCXb3d9S5m9jD5N+vDWwLcw6OrnVyh/1iwK580DnY7Q99/DcAbm+6/cl8XDP28jr1RbBLoq/gg39AxlRISJrMwjdGwbTfNw7dMum93O/oMpAHh6uNEyqhlNmoTw+eYCkg4XY9QrvHdNF67o3LTqTbb8AMte0L4f8Sot+k7m23+dVlUVi91JeUUgW2Jx8PvWVL765yCr4rO59MNV3Nw3kgeHtcTL5cQJo1RV5etVB3njzzhUFQa1CuSTiRfheZLrhBBCCCEakqvBlfUT1zd0Myq5Gur2ob+3tzdNmjRhx44dJyy3Y8cOQkNDq2wD07Nnz8psw6L2JHgVNXI4HcSmxTI7fjYrU1ZiV+0AdAnswodDPsSkr/0G9ydSvncvma++BkDgQw+dctKkk3E4VT5ctp9P/jqAqkKEpw5XtZy4YhPT1h7m53U2OhuPYDAYKoPX5uGhTBjSnpamLGzpOyk5OIPAHXvpozrYaW5F085DCfd0A4sXmD20G+35A+Y9qH3f/2Hoe3+1tiiKgotRj4tRj0/Fsccuac3V3cN4dcFelu7J5NvViczdlsoTl7RhfLcwdLqqU4kdTpUSq503/4xj+vpDANzYO4IXLm9X7+tohRBCCCHqm6IodZqm2xhddtllfP3116xevZr+/ftXO79q1SqSkpK46667GqB15x8JXkUVdqedjJIM5iXMY86BOaSXpFee6xTYiXEtxzG6+WjMenOd7+EoLsaakIDlwAEsBxIoXLQI1WrFfdBA/G+/rbJcuc3BO4v3EehpZlK/SMyGU59GnFVUzoMztrH2YA4AXfUpfGF7myByKDebyMWDfNWTfNUd40YfSvOicLPlYzi8iZ4lmdUrVKA3O2HHTtgxFRQ9NOkMTbvA1p9AdULXm+DiF06pnRH+7nx9U3f+2X+El+btJuFICU/8toOP/4rHbNBTZnVQYrVTanVgtTuPNUeB50a349Z+kfWy7lgIIYQQQtTe448/zk8//cRdd93FP//8UyVbcG5uLpMnT8bNzY3HH3+8AVt5/pDg9QKUWZLJR1s/4kjpEYptxdrDqn39b+Y3L5MXl0dfzlUtr6KVb6tTvpfTaqVs82ZK1qyhPG4floQE7Onp1coZmjah6Ztvovxrrv+rC/bw0zptZPHXzYd546qO9Ij0O+k98wuL2ROfyJ9bEvn9kJ4iG7iZ9Lx5RUu6LnqGILsWyLooVpqSS1MlV7uwHNgVq7UHbX3rfrUZO2mBLeQimnceQLeoAEyp6+HQOkheC4WHIW2L9gBoezlc9qEWVdbBwFaBLHpoIP+LTWLqsnhScmvOxOfvbuKNqzoyon1Ine4lhBBCCCFOT8uWLfnf//7H9ddfT8eOHbntttuIiooiKSmJb7/9luzsbGbMmEF0dHRDN/W8IMHrBejLHV/yR8IfNZ5XUOge0p1xLccxLGLYKY+yWg8fpmTVKor/WUXJ+vWopaXVyhgCAzG1iMbcoiXm6Gg8LxmBwde38vyCHemVgauvm5EDWcVc/cVaJvYK58lL2+Dtemxdp1NVSUnLZM/+RHbvT2R/Wh7JDi/22bX6WgV58Nn1XWmx+hGwJ2n7qt78B5g9oSwfyvJITU9jwfo9FOVlUYoLO9XmeEV149KuLRjZPrjqOtKmHaHH7dr3+SlaIHsoFgyuMOwFbWua02DU67h9QHOu6hrG9sP5uBj0uJm0h6tJj5vJgJtJj9mgk9FWIYQQQogGdvXVV9OmTRveeOONyoDV39+fIUOG8Mwzz9ChQ4eGbuJ5Q1HVo7lPRV0UFhbi7e1NQUFBlUXYjVWZvYyhvwyl2FbMAxc9QAufFniYPPAwVjwqvjfqTy3pj6O4mJwvvqBo+QqsiYlVzukDA/DoPwDXLl0wt2yBuXlz9D4+NdZ1KKeU0R+toshi5+7B0dw1sDlv/hnHzI1aAqVATzMvXdGekR1CUBSFz374jS0JmaQ5PEhzuJOvulTWNbKtP+9e1w33DR/B8pe1ab43zoHmg6rd1+lUWbQ7g7xSK8PbBRPk6VKtjBBCHM+51hc0FvK+CXF+Ky8vJzExkaioKFxc5O+qC9XJfg9OpS+QkdcLzLLkZRTbign1COW2jrfVyzY31sOHSZk8GeuBBO2AXo/rRV3wGDAQj4EDMLduXWU68Anrsju5b8YWiix2ukX48sjwVhj1Ot4c14kxXUJ5evYOknJKuSdmC8PaBtOuqRezkt1It0RU1qFToEekHxN7hXNF56Yoe+dpgSvAqHeOG7gC6HQKozo2Oa33QgghhBBCCHFmSPB6gZkdPxuAsS3G1kvgWrp5M4fvux9HXh6GwECCn34K9/790dfxE/S3FsWx43AB3q5GPppwEcaK7Llpmdkc2rOdbpZ9mA2eHHD4sWxvJsv2akmVDDqFvi0CGNkhhOHtggnwqJjqnL4d5lRkd+t5F/S47Xi3FUIIIYQQQjRyErxeQA4VHmJT5iYUFMa0GHPa9RXMnUv6c8+j2myY27Wl2WefYQype/Kgo9vEALx3dWeaeLuwMy6Bf9Zv40DS4cpyw8MUXg3LhLh52NGjtBlJ2/5j8fb5TzKnogyYMQFspRA9FC55vc5tE0IIIYQQQjQsCV4vIL8f+B2AvqF9CXGve5CpOp0c+XAqOV99BYDn8OE0fetNdG51368rNb+Mx2ZtB+C2/lH0ifTi7c9/IvOIlglYpyh0bxnECM99+B34FmVT8rGLNy2FrU9B5ABoPRJajwI3P5h5PRSmgn9LGD8N9PLrLoQQQgghxLlK/pq/QNidduYemAtoU4bryllaStqTT1K0dBkA/nfdReCDD9R6Tevx2BxOHpixlYIyG53CvHny0jYY9Qpuri64mIxc3tpAN+sazPELwWHVLnLxhi7Xa5l94xZCbgIkLNceCx8Dj2AoztQyC0/8GVx96tw+IYQQQgghRMOT4PUCEZsWS1ZZFj5mH4Y0G1KraxxFRdjS0rRHejr29HSKV/6NJT4exWikyWuv4n3FFafVLodT5e1Fe9mcnIdJUXl7bDtMBi0QnjiiO34LbkO3Y/OxC5p21dattr8KTBUjvcNfgex42LcA9v0JKRu0wFVngGt/BH/ZV0sIIYQQQohznQSvF4g58XMAuKz5ZZj0phrLle3cRearr2JJSMBZXHzcMno/P8I++QS3rhedcjtsDie70wpZfzCH1fsz2ZScT5ld262pizGDlIQDtAntCUDAutcgbbO2f2rHcdD9NgjtWr1SRYHAVtqj/8NQfEQbgfWJgIg+p9xGIYQQQgghROMjwesFIKcsh5UpKwEY27LmKcNlu3dz6LbbcBYWVh7T+/pibNIEQ9MmGJs2xdi0KV4jR2IMDq71/W0OJz+sTWblviw2J+dRanVUOW/ASSf3Eu6/pAe9LmqvHdz9O+ycBYoObpkPYd1rfT88AqHzdbUvL4QQQgghhGj0JHi9AMw/OB+7aqeDfwda+baqdl5VVfau3oztwbsxlRaz2y+SXwbfyJBBnRjXpwVBXnXfVLrc5uDemC0sj8uqPGbEgb+ujABdOV3DvRnTtx2d20Zj0Ou1AsVZMP9h7fv+j5xa4CqEEEIIIYQ4L0nwep5TVbVyyvC/R11VVWX74QL+3JXOjn+28OCCD/C2lrDXN4IpfW6n1OnChr+SeffvQwxtE8SEns0Y1CoIvU6p9b2LLXZu+jqWLYeLMBt0PDqiFf1bBLJ3+xZcTP706tqBAF/v/zYY5j0IZbkQ3AEGPVkv74MQQgghhBDi3CbB63luR/YOEgoScNG7MDJqJABxGYXc8cMmUnLLCC/M4K3Vn+NtLSGjSXN0r7zPX50iWBWfzcwNh9iUnMfSPZks3ZNJiJcL13QP4/reEQSfYDRWVVW2xCVx3y+7SC/TYcDJO2Nac0UPLXFSu6aDa27w9pmwbyHojDD2SzDUvD5XCCGEEEIIceGQ4PU8d3TUdXjEcDxNngC8+MduUnLLaFmew5vrv8bNWoKxbVsGfj8Nvbc2Ejq+Wxjju4URn1nEzI0pzN5ymIzCcj5acYAv/jnIdT2acffgaJp4u1bey6mq7N53kLkrN/FLspFC1YwJB7e1N9E3OuDkjS04DH9WjLQOeRpCOtTvmyGEEEIIIYQ4Z9V9c07R6JXaSlmUtAg4NmV4/cEc1h3MJbw0m483fY1bSQHmNm2I/O7bysD131oGe/L8Ze1Y98zFfDzhInqHe2Cz2/lhbTKD3l7Js3N2kppfRuaRXN79IoaPZyxiRrKJQtWMl0kh5rYePHnjpQT4+Zy4saoKc+8DSwGEdoe+D9b32yGEEEIIIcQZ8dlnn6EoCr169aqxTFZWFk899RQdO3bEw8MDFxcXWrRowaRJk1i9enWVst9//z2KolQ+DAYDoaGh3HLLLaSmptaqTS+++CKKoqDT6UhJSal2vrCwEFdXVxRF4b777qs8npSUVHnfV1999bh1X3/99SiKgoeHR63aUl9k5PU8tjR5KSW2Epp5NqN7sJb06OMVBwgszeP99V+jFuRgbtmS8GnfYfD1PWFd5tx4Lt//FpdlzcESEMYfDGZqdndi1jv5ZVMKV3ZuSmEhrLKGUaYaCPNxYfodfQj3d6tdYzd9Cwf/0rbFGfsF6OVXUwghhBBCnBtiYmKIjIxkw4YNHDhwgBYtWlQ5v2HDBkaPHk1RURHXXXcdkydPxmw2k5iYyO+//87333/P33//zcCBA6tc9/LLLxMVFUV5eTnr1q3j+++/Z/Xq1ezatQsXl9olVTWbzcyYMYMnnniiyvHZs2ef8DoXFxdmzJjBc889V+V4SUkJc+fOrfX965OMvJ7HZsdrv5BXtrgSRVHYnJzL6gPZ3LZnAe4FOZiiown/ftqJA9cj++DXW+Gz3rB7NgoqLsUpXFP8I6tdHmKe9zuMVFfzx5ZEFuf5UqYaaBnkwW/39Kt94Jp7EJY8r30/7EUIaHl6L1wIIYQQQoizJDExkdjYWN5//30CAwOJiYmpcj4vL48rr7wSg8HAtm3b+P7777n33nu5/fbbee2119i1axfTp0/H1dW1Wt0jR47khhtu4Pbbb+ebb77hscceIyEhgT/++KPW7Rs1ahQzZsyodnz69OmMHj36hNft2bOH7du3Vzk+d+5crFYrw4cPr3Ub6osEr+eppIIktmRtQafoGBM9BoCPlh8gvDCDganaL2Doe+9i8Pc/fgVH9sNvt8OnvWDXb4AKbS+HO1bA2K8oDuqBgkpHy1Y+Mn3KFtd7edXwLWNCcvjlrj4nTOhURWkuzLkbbKUQOQB63lkPr14IIYQQQoizIyYmBl9fX0aPHs348eOrBa9ffPEF6enpfPjhh7Rp06ba9YqiMGHCBHr06HHSew0YMACAhISEWrdv4sSJbNu2jbi4uMpjGRkZrFixgokTJ9Z4XZ8+fYiKimL69OlVjsfExHDppZfi5+dX6zbUFwlez1NH17r2bdqXYPdgtqfk8/f+I0zcvwxFVfEcPhyX4/zPQ3kBzL4LPusFO2cBKrS5DO5aBdf+hDWoE7NSAnn+yOW8wsNs8L4C1TsMd7WEGwzLmZp/P76/XQMJK7R1rDXJT4E/n4IP2kPKOjB5wphPQSe/kkIIIYQQ4twRExPDVVddhclkYsKECcTHx7Nx48bK8/PmzcPV1ZWrrrrqtO+VlJQEgO9Jlvz928CBAwkLC6sShP788894eHiccOQVYMKECcycORO14u/67OxslixZcsKg90yShYXnqd05uwHoH9ofgI9XxFcZdQ24957jX7jwCdgxU/u+9WgY/CQ06QzAkZw8vv15PplHcgHo3G8YXYc+j6IokPQPbP4e9szV1q4e/Evbp7Xv/dBhHOiNWp1ZcbBmKuz8BZx27ViTznDJ6+AbUf9vhBBCCCGEaFRUVUUtK2voZlRSKpIW1cXmzZuJi4vj448/BqB///6EhYURExNTOZIaFxdH69atMRqNVa4tKirCYrFUPnd1dcXd3b1KmYKCArKzsykvL2f9+vW89NJLmM1mLrvsstq/PkXhuuuuY8aMGbz88svAsYDbbDaf8NqJEyfy+uuvs2bNGvr3788vv/yCi4sLV1xxBYsWLap1G+qLBK/nqX25+wBo7duaXakFLNubxVMnG3VNWFERuCpw01xoPqjyVHJqBl9P/4OS0jI8Pdy4fuwltG4efuza5oO1R14SrPsctvwImbtgzl2w/GXoPglSt8K+BceuiRoI/R+G5kOgjv9gCCGEEEKIc4taVsa+rt0auhmVWm/ZjOJWy1wt/xETE0NwcDBDhgwBtEDx2muv5aeffuK9995Dr9dTWFh43Ky8N954I3Pnzq18fu+99/LJJ59UKTNs2LAqzyMjI/npp58ICws7pXZOnDiRd999l40bN+Lr68vGjRt5/fXXT3pd+/bt6dSpEzNmzKB///5Mnz6dMWPG4FbH9+t0yRzN81CBpYD0knQAWvu1PjbqevgEo67WUpj/sPZ9r7uqBK5FxSV8/sNsSkrLCGsSxON3TawauP6bbySMfAse3gUXTwGPYChMhRWvVgSuCrS9Qls7e/M8iB4qgasQQgghhDjnOBwOZs6cyZAhQ0hMTOTAgQMcOHCAXr16kZmZyfLlywHw9PSkuLi42vUvv/wyS5cuZenSpTXe49NPP2Xp0qX8+uuvjBo1iuzs7CqjpVarlYyMjCoPh8NRrZ6LLrqINm3aMH36dGJiYggJCWHo0KG1ep0TJ05k1qxZHDhwgNjY2AabMgwy8npeOjrqGuoRyuEclcW7M3ly/zIUTjDq+vdb2qipVygMrZoO29PDnUsG9WJ/Ygq3XD0Ks8l08ka4+cGAR6HPfbDjF239rG+kNo1YsgkLIYQQQlywFFdXWm/Z3NDNqKQcJ8tvbaxYsYL09HRmzpzJzJkzq52PiYlhxIgRtGnThu3bt2Oz2apMHe7UqdNJ79GzZ0+6d9e2vLzyyivp378/EydOZN++fXh4eBAbG1s56ntUYmIikZGR1eqaOHEin3/+OZ6enlx77bXoaplrZsKECTz99NPccccd+Pv7M2LEiFpddyZI8Hoe2pu7F4A2fm34ZIWWYXjQiUZdM3ZCrDZPn9HvgdkTVVWxWK24VHyyM7hPVwb1vqjWv+SVDGboeqP2EEIIIYQQFzxFUeo8TbcxiYmJISgoiE8//bTaudmzZzNnzhy++OILLrvsMtatW8ecOXO45ppr6nw/vV7PG2+8wZAhQ/jkk0946qmn6Ny5c7WR25CQkONeP3HiRKZMmUJ6ejo//vhjre8bHh5Ov379WLlyJXfffTcGQ8OFkHW6s9PpPPUgRpw1R0deA03N+WZXOk/sO8Goq9MB8x4E1QHtxkDrkTicTn5d8BeHUjO475bxuLqYtX9kZHqvEEKcNulDhRDi3FdWVsbs2bO5+uqrGT9+fLXzTZs2ZcaMGfzxxx/cfffdfPzxxzz88MN06dKFVq1aVSmrnmiHjv8YPHgwPXv25MMPP+Shhx7C19e32rrYmkRHR/Phhx9SVlZGz549a31PgFdffZW//vqLa6+99pSuq2916j0jIiJ47bXXyMrKqu/2iHoQl6ft4bQ7yYNmBRkMOlGG4Y3fQOpmMHvBpW9htdmY9vN81m3ZRXpWDgeSDp/NpgshxHlP+lAhhDj3/fHHHxQVFXHFFVcc93zv3r0JDAwkJiYGPz8/5syZg8VioXPnzkyaNInPPvuMr7/+milTptCuXTtAG+Gsjccff5zMzEy+//77U273gw8+yFNPPXXK1w0aNIgXX3yRtm3bnvK19alOwWtqaipTpkwhPDycG2+8kXXr1tV3u0QdWRwWEvMTAVgXZ2bCiUZdCw5rmYABhr2I0yOYH39bxO79iRgNeiZdM5qObaLP8isQQojzm/ShQghx7ouJicHFxYXhw4cf97xOp2P06NEsWrSInJwc+vTpw65du7j//vvZsGEDjz32GPfffz8xMTH07NmTf/75hyeeeKJW977qqquIjo7m3XffPW5ypvOZop7KOHWFjRs38sknn/DLL79gsVhQFIWuXbty3333cd111510v6DzSWFhId7e3hQUFODl5dXQzWFPzh6unX8tZsUDt3W388WK91BQifp9TtXgVVVhxgTY/yc064U66U9mL/qH1Rt3YNDrueuGK2kReWopuIUQ4kJ1Kn2B9KHHNLY+VAhRv8rLy0lMTCQqKgoXF5eGbo5oICf7PTiVvqBOI689evTgf//7H4cPH+b111+nWbNmbN68mVtvvZWwsDCefvppDh06VJeqxWk6ut7VlfATj7ru/UMLXHVGuHwqf63dxuqNO1CA66+6RAJXIYQ4Q6QPFUIIIermtDJG+Pv789RTT5GYmMjvv//OsGHDyM3N5a233iI6OpqxY8dW7m8kzo6jmYZD0rwYWNNa1/ICWFgxLaH/w5R7N+fvdVsBuGLEALq0k61shBDiTJM+VAghhDg19ZLuUFEUrrjiChYvXkxcXBx33nknDoeDP/74gxEjRtC+fXu+/fZbnE5nfdxOnMDRkdf+G46gQ0XtP6j6qOvGb6E4A/xbwIBHcTGbefC2a7h8WH8G9+naAK0WQogLl/ShQgghRO3Ua67+5ORkvvnmG3777TdAS/scHBzM3r17ufPOO+nWrRuHD0v22jPFqTrZl7cPRVXpflB7nwOuPc5eUvv+1Mr3vgeM2rxzPx8vhvbrdtbaKoQQoirpQ4UQQogTq5fgdcmSJVxxxRW0aNGCd955h5KSEm699Va2bdtGWloaS5YsoXfv3mzfvp2HH364Pm4pjiO1KJUSWwnt0gz4lZdQYnQlYFD/qoVKcuDwRgA+iy1mZ1xCA7RUCCHEUdKHCiGEELVjqOuFhYWFTJs2jc8//5z4+HhUVSU0NJS7776bu+66C39//8qyw4YNY+jQoXTp0oUVK1bUS8NFdUf3dx0c7w5YiIvqRHeTqWqhA8sAlSxDKAn5ULw8lnYtI9Hr9We7uUIIccGSPlQIIYQ4dXUaeb377rsJCwvjkUceYf/+/fTu3ZsZM2aQlJTEM888U6XTrbyRTkf37t3Jz88/pXvNmjWLwYMH4+vri7u7O507d+btt9/GZrPVpekAzJ07lyuuuIKQkBBMJhNBQUH07duXl19+uc51NgZxuXGgqnTeWwpARuc+1QvFLwFgu705Hu6u3D7xCglchRDiLJI+VAhxoanDzpziPFKfP/86jbx++eWXmEwmJk6cyIMPPkj37t1rdd3AgQNPqfEPPfQQU6dOxWAwMHToUDw8PFixYgVPPvkk8+bNY8mSJbi6uta6PqvVyg033MCsWbNwdXWlT58+BAcHk5GRwe7du/noo4+YMmVKretrbOJy44hOB5/8csr0JhzdelUt4LDjjF+KDthLK24aN5IAX+8GaasQQlyopA8VQlwojEYjiqJQUlJySv/eiPNLaak2sGY0Gk+7rjoFr1OmTOHuu+8mODj4lK675ZZbuOWWW2pV9vfff2fq1Kl4eHjw999/07WrlgU3OzuboUOHsnr1ap5//nnefffdWt//jjvuYNasWVx55ZV8/fXXBAQEVJ5zOp1s2LDhlF5PYxOXG8ewfVo2yo3BbWga4lvlvHp4AzpLASW44tV2CC2jmjVEM4UQ4oImfagQ4kKh1+vx9vbmyJEjWCwWvLy8MBgMKIrS0E0TZ4GqqpSWlpKVlYWPj0+9zPZU1EY6jt+zZ082btzIq6++yrPPPlvl3OrVqxkwYABms5nMzEy8vU8+erh8+XKGDRtGhw4d2LJlS71E/qCtW/L29qagoAAvL696qbMucstzGTRzIB994SAkH17vcQO3PXcHg1sHVZbJ+vkhgvZOY4vSmYgH5uEvo65CCFEvGktfcJT0oUKIxkJVVQoKCsjKysLhcDR0c0QD8PHxISQkpMYPLU6lL6jTyGteXh47d+4kOjqa0NDQ45ZJTU0lISGBTp064ePjc0r1p6amsnGjlhF34sSJ1c7379+fZs2akZKSwsKFC5kwYcJJ6/z4448BbRpVfXW6jcm+3H1EZEFIPlj0RjYGt+UlP7cqZVwO/QWArvUlErgKIUQDkT5UCHEhURQFHx8fvL29cTgc2O32hm6SOIuMRmO95tepU/A6depUXnnlFdavX19jx5uens6QIUN4+eWXq33qezJbt24FwM/Pj6ioqOOW6d69OykpKWzduvWkHa/D4WD58uWAtmYoIyODmTNnsm/fPsxmMxdddBHjxo3Dw8PjlNrZmOzL3UeviinDmwNbYTGaCfX919qC/BS8SpJQFR1tR97eQK0UQgghfagQ4kKkKAoGgwGDoc6bnQhRt+B14cKFNG/e/IRJJrp3705UVBTz588/5Y43MTERgPDw8BrLNGvWrErZEzl48CDFxcUArFu3jnvuuafy+VGPP/44M2fOZOjQoafU1sYiLi+Oofu0GeBrmnYi2NMFs+Ffn3JUZBlWwnpi9j61dVZCCCHqj/ShQgghRN3UaaucpKQkWrdufdJybdq0qVXH+F9FRUUAuLu711jm6Ce8hYWFJ60vJyen8vvbbruNbt26sXHjRoqKiti2bRujRo3iyJEjjBkzhvj4+BPWZbFYKCwsrPJoDHL2bqdZNjj1etaHtKOZ37FR13/Wb8Oye4H2pOXwBmqhEEIIkD60MfahQgghzg11Cl6PLqo9GS8vr1Pek+5M+HdOqtDQUBYvXkz37t3x8PCgc+fO/PHHH3To0IHi4mLefPPNE9b1xhtv4O3tXfk4+ul1Qyq3l9N0cwoAR1q3p8TkSrOK9a7JqRnM/3MZStI/WuFWlzRUM4UQQiB9aGPrQ4UQQpw76hS8BgYGEhcXd9Jy+/btw8/P75Tr9/T0BKCkpKTGMkenLNUmO+HR+kDbasBsNlc5r9frueuuuwBYtmzZCet6+umnKSgoqHykpKSc9P5n2oH8A/SK07K3HWjdG4Bmvm6oqsqcRX8TTSImbOAVCsEdGrKpQghxwZM+tHH1oUIIIc4ddQpee/fuzbZt2/jnn39qLLNq1Sq2bt1K7969T7n+yMhIgBN2akfPHS17svqOpmZu3rz5ccscPZ6enn7CusxmM15eXlUeDS1hTyxRmeDUwbomWnDazM+NLTv3kXw4g466A1rBlsNB9tUSQogGJX1o4+pDhRBCnDvqFLzefffdqKrK+PHjmTt3brXzc+fOZfz48SiKwuTJk0+5/osuugjQ1tnUtN5n06ZNAJUbr5+Ih4dH5fqi7Ozs45Y5evxczJZoWbYSgNw2TdlfruXgCvE0Mm/ZalBVLjIlaQVbypRhIYRoaNKHCiGEEHVTp+B16NCh3HfffWRnZ3PVVVcRHBzMwIEDGThwICEhIVx11VUcOXKEyZMnM2LEiFOuPywsjB49egAwffr0audXr15NSkoKZrOZUaNG1arOq6++Gqh5StPSpUsBbWP3c43fuv0AOAf1Ii2/DICMQ8kUFJXQyqsM1/IM0Juh+aCGbKYQQgikDxVCCCHqqk7BK8BHH33E1KlT8ff358iRI6xevZrVq1eTlZWFv78/H3zwAZ9++mmdG/bMM88A8Oabb7Jly5bK4zk5Odxzzz0A3HfffVWSXsyZM4c2bdpw8cUXV6vvgQcewNfXl4ULF/Lll19WOTdz5kxiYmIqy51LytNSCT1UghMwDhyJ3ali0utIqfi0/dLQikyOkf3BVHPmSSGEEGeP9KFCCCHEqVPUf6cRrAOHw8HmzZtJTk4GtH3lunfvjl6vP8mVJ/fggw/y0UcfYTQaufjii3F3d2f58uXk5+fTr18/li5diqvrsS1hvv/+eyZNmkRERARJSUnV6lu6dClXXHEF5eXltG/fnrZt25KQkFC5ofvzzz/Pyy+/fEptPJo1sqCgoEHW7hz4aiq2979gXzMd/p+v4MZvNhHp78ajFxnZte8gj7vMwJASCyPfhl53nfX2CSHEhaCufYH0oQ3Xh5bby9matZVuwd0w6U1n9d5CCCGOOZW+wHC6N9Pr9fTs2fOMTBWaOnUq/fr149NPPyU2NhabzUZ0dDRPPfUUDz/8MCbTqXU2w4cPZ/v27bz++ussW7aMuXPn4uXlxahRo3jwwQfrND2roRUtWYILkNS1CdY8K6Ala7p8WC8u798R3n5YK9jy3HttQghxvpM+tOFM2z2Nz7Z9xuPdH+em9jc1dHOEEELUwmmPvF7oGvJTY/uRI+wfOBBFhfnvXo7OdBsfrzjA9b3CeW1sR9g9B2bdAv4t4f5NZ7VtQghxIWnoWTjnqoZ83+5ccidr09cyvtV4Xujzwlm9txBCiGPO6shrXFwc+/bto7CwkJri4Jtukk80z4Si5StQVDjQBMJbdGPttlIAzE4Ldrsdw/4lWsFWkmVYCCEaI+lDG87+PC3ZYW5ZbgO3RAghRG3VOXhdt24dd955J7t3766xjKqqKIoiHe8ZUrZ9OwBboxWu8GvNz7nahvS7tm/jg6y9PF6sZX+UKcNCCNG4SB/asLLLsskpzwEgt1yCVyGEOFfUKXjdv38/w4cPp6SkhD59+pCZmUliYiLXXXcd8fHxbNu2DYfDwdixY2X61BlUEr8HgFR/hVbbZzMsO4uL9DrClRK6mzyh5AiYPCG8TwO3VAghxFHShza8+Lz4yu8leBVCiHNHnYLXt956i5KSEj777DMmT57MpEmTSExMrEyVv3v3bm666Sbi4+NZu3ZtvTZYHGNNPIAOUDwduK2Zyj0AxoqTKRVfo4eAQbIoCiFEYyF9aMM7OmUYJHgVQohzSZ32ef3rr7+Ijo5m8uTJxz3fvn175s+fT0JCAq+99tppNVAcnyMnE12JAwDf0Cbkd5zEDPsQ5jr6skvXAbXFMGh5CQx6ooFbKoQQ4t+kD214/w5ei23FWByWBmyNEEKI2qpT8Jqenk6HDh0qnx/dj85qtVYea9KkCYMGDWL27Nmn2URxPNZVPwOQ7w7R3Saypf3TPG2/gyn2O9jW4XmUG36D63+BkI4N3FIhhBD/Jn1ow4vPi8doV+mQ5ETvUMkrz2voJgkhhKiFOgWvrq6uGAzHZhx7enoCkJmZWaWcl5cXKSkpiPpn3bAQgHRfaOXXmpTcMgDcFBvtWzdvyKYJIYQ4AelDG5bdaedA/gHGrFWZMsPJJZvVyuRNQgghGrc6Ba+hoaEcOnSo8nmLFi0AqqzNUVWVLVu24Ovre5pNFNWU5lJWkawpw0+hlW8r9qVpa3Y8dHbatohoyNYJIYQ4AelDG1ZyYTI2p40uyQoAQQWqbJcjhBDniDoFr7169WLPnj2UlWmjfZdeeikADz/8MAsWLGDnzp3cfffdJCQk0KNHj/prrdDsnUduiTbNLC/QlWC3YLJLnQAM7NICF7O5IVsnhBDiBKQPbVj78/ajd6hEpWv9pqsFGXkVQohzRJ2C11GjRlFeXs78+fMBiI6O5s477yQ9PZ0rrriCLl268NVXX2EymXj11VfrtcEC2DmL8iJtypk+PAxFUUjJ0/4I6tchsgEbJoQQ4mSkD21Y+/P2E5kJRrsKgKtVMg4LIcS5ok5b5Vx11VXYbLYqxz799FNatmzJrFmzyM3NpW3btjzzzDO0b9++XhoqKhSmQdJq9IUhAHhHt0FVVQ7nlgLQzNetIVsnhBDiJKQPbVj78/bTMk2tfO5ihVSZNiyEEOeEOgWvx6PT6XjkkUd45JFH6qtKcTy7ZuOwgMmirdVp0rorq7bGUWSxAxAmwasQQpxzpA89e/bn7eea1GPBq6tVlZFXIYQ4R9Rp2vCtt97KE0/I/qENYucsLMXaZw45ntCyaQf+2Z4AgJdZwdWkb8jWCSGEOAnpQxtOgaWAjJKMKiOvrhaZNiyEEOeKOgWvP/30E4mJifXdFnEy2QcgfRvZJVpCpgxfhXD3SPYcOgJAuJ+MugohRGMnfWjDic+Lx7tEJTj/2DFZ8yqEEOeOOgWvISEhKIpS320RJ7PrVwCO6JoBUBzixaFDWRTYtB9jdJB3gzVNCCFE7Ugf2nD25+2nZcWUYcXFBdCCV8k2LIQQ54Y6Ba/Dhw9nzZo11RJOiDNIVWHnLABKylwB0IU1Zfe+g5SqRgDC/WXkVQghGjvpQxvOv5M1uVVsQ+RihdyyHFRVPdGlQgghGoE6Ba8vvvgiFouFO+64g6Kiovpukzie9G2QcwAMrpCtbYvj3rwlu/YfpKQieJVMw0II0fhJH9pw4vPiaVUx8urery8ABicoNjtFNvlZCCFEY1enbMPTpk3j0ksv5YcffmDBggUMGzaMyMhIXF1dq5VVFIXnn3/+tBt6wdupTRlWW12Cx6+bADCFRFOUWkoZgQCE+VV//4UQQjQu0oc2DKfqJCF3P9Hp2nP3Pn0rz7laIKcsBy+TVwO1TgghRG3UKXh98cUXK9fr5OTk8PPPP1croygKqqpKx1sfnA7Y9RsAZc2G41q+AScQEN6Z0NSDlCbJyKsQQpwrpA9tGClFKQSmleFiA52HB+aWLVDc3FBLSyuTNkV5RzV0M4UQQpxAnYLXKVOmSLKJsyl5DRSlg4s3SaUmFCDPW0e/jj2JjOjEJ2+swKBTaOLt0tAtFUIIcRLShzaMf693de3UCUWnQ+fuhqO0VFv3KhmHhRCi0avzyKs4iyqmDNNuDOnxO2kKFAd7oigKh3JKAWjq44pBX6clzEIIIc4i6UMbxv68/ZXrXV27dAFA7+aOg2xt5LVMglchhGjsJNpp7OwW2DNX+77j1RQf3A+AMzQYu8NBSp6WvKmZrHcVQggharQ/99g2Oa5dOgPa9GEAV4sqI69CCHEOqNPIqziLDiyH8nzwCIGIfjhTngHAGdyUJ1//jEz3CEAv612FEEKIEzicupemedr3rp06AaBzd9eey16vQghxTqhT8Pryyy/XuqwkmzhNB5ZpX9tfiarocEvPB0D1a4Yzz0mBTRs8b+YnwasQQpwLpA89+0psJbjvTwVAHxmO3scHqBq8ysirEEI0fqeVbfh4G3r/OwmFZEqsB6UVnwT7NSezJJPAXAcAFtemkFdAqWoE7BK8CiHEOUL60LPv3/u7enTpWnn8aPDqYoUMCV6FEKLRq1Pw+sILLxz3uNPpJDk5mb/++ouUlBRuu+02wsLCTquBFzxLxabpZi8OJGzE3wpOBQ47te1x8q3a6Wa+suZVCCHOBdKHnn1apmHt+6PJmgB0Hlrw6iZrXoUQ4pxQr8HrUWVlZdxxxx0sXryYLVu21KlhooKlUPtq9iRtxyb8gRJ/N9Jz8nGoCrlldgDCZeRVCCHOCdKHnn37c+IYmVY1WRNUHXnNKZM1r0II0didkWzDrq6ufPXVV1gsFqZMmXImbnHhODry6uJFwYE47VCIP1abHavejAq4mfT4uZsaro1CCCHqjfSh9S9/707crOB0MWFu2bLy+L/XvBZaC7E5bA3VRCGEELVwxrbKcXNzo3v37syfP/9M3eLCUH5s5NV+KAUAR1AIACYvXwCa+brJhvdCCHEekT60/qiqin5vAgC69q1R9PrKc/qK4NXNqvWheZa8s99AIYQQtXZG93nV6XRkZWWdyVuc/ypGXi0GF1wz8gHwbt6Wizq0wt03CJA9XoUQ4nwkfWj9SC9JJ/xQOQC+XXtWOXd05NXLpuWRkHWvQgjRuJ2xfV7T0tJYvXo1wcHBZ+oW5z+nA6xa8JpgySUk1wlAi+59uGjwYF6YuwsoIUz2eBWizhwOBzabTBUUNTMajej/NVp3NkgfWn/25+2vzDTsflG3Kud0Hh4AeNj1gIPcMglehRCiMatT8PrPP//UeK6oqIi9e/fy6aefUlhYyE033VTnxl3wrMWV3+4rPkxUxWwmc2QkFruDeTvSAejfIqAhWifEOU1VVTIyMsjPz2/opohzgI+PDyEhIfWyREP60LMr4fAO+lbkYnLt3KnKOd1/pg3nlEvSJiGEaMzqFLwOHjz4pB24qqp0796dV155pU4NExxb76o3cSh5N23s4NQp5Lt5sH53JrklVoK9zAxuHdiw7RTiHHQ0cA0KCsLNTdaNi+NTVZXS0tLK6btNmjQ57TqlDz27CrduBqA8xAeDv3/l8bwSK/8kFdMKcLFqI7MybVgIIRq3OgWvAwcOrLHjNZlMhIaGMmzYMK655hoMhjM2M/n89689XvMO7NEOBXrzxucxbHCGAyau6d4Mg/6MLl0W4rzjcDgqA1f/f/0xK8TxuLpqeQWysrIICgo67SnE0oeeXfo9BwBQ2reucvzLfw6yKPYwnwPmcm1ZjgSvQgjRuNWpV1y5cmU9N0McV8Uer6rZA2tSMgDWoGBKnAZSLSYUBa7p3qwhWyjEOenoGlc3N1kvLmrn6O+KzWY77eBV+tCzp9xeTuBBbc2NX7feVc7FZRRSanABwFCu7ZkuwasQQjRuMmTXmFWMvGa5eOJ9pAQAq08wSQ4vQFvr2sxP/vgWoq5kqrCoLfldOTcl5MXTIk2bEhzYs3+Vc0nZJZQZzQDobXZ0TlWCVyGEaOQkeG3MygsA2Gc206SiPz1i8uCQXQteJ/QMb6iWCSGEEI1e0s5YPMrBZtTh0vrYtGGbw0lKXhllBnPlMVcL5JRJwiYhhGjM6hS8fvLJJ+j1eubNm1djmXnz5qHX6/nyyy/r3LgLXsXI636DQkie9snxNqcH5RjwdTUwrK1soSCEEOca6UPPnsKtm7SvzQNRjMbK44fzyminJvCt+V0cOm0auItVpg0LIURjV6fgde7cuQQGBjJ69Ogay4waNYqAgADmzJlT58Zd8CrWvO7HTnDFNjlbTE0BGN+9GSaDDJwLIURtrVy5EkVR+PXXXxu0HdKHnj3m+MMA2NpEVjmelF3CjfqlDNLvQDVqU8LdKoJXVVXPdjOFEELUUp2in7i4ODp06IBOV/Pler2ejh07snfv3jo37oJXsVVOVn4ZJgc4DXr2umjbNMiUYSHE8Xz//fcoioKLiwupqanVzg8ePJgOHTqckXs/8cQTKIrCtddee9zzsbGxvPjii8fdW/f111/n999/PyPtamykDz17dIVavgglqOqWconZJUTr0gBQDVrw6mIBi8NCqb307DZSCCFErdUpeD1y5AghISEnLRcSElK5N56oA0sRFgXs2VrnW+QbgFPR0ybARPNAjwZunBCiMbNYLLz55ptn7X6qqjJjxgwiIyOZN28eRUVF1crExsby0ksvXfDBq/ShZ4+uzAKA2cunyvGk7GKiFS14VYzaSKuPwwRAbplMHRZCiMaqTsGrp6cnaWlpJy2XlpYmW1GcDkshCUYjQRXrXQ+atU+O7x7WriFbJYQ4B3Tp0oWvv/66Vv9W14eVK1dy+PBhvvvuO+x2O7Nnzz4r9z0XSR969hjKrAC4ePlVOZ6TlYqPon0wrDNoe7wGqO7auXJJ2iSEEI1VnYLXzp07ExsbS0pKSo1lUlJSiI2NpWPHjnVu3AXPUsQ+k4kmuUeDVz983Ixc0v7kn9gLIS5szzzzDA6Ho1ajr3a7nVdeeYXo6GjMZjORkZE888wzWCyWWt8vJiaGdu3aMWTIEIYNG0ZMTEyV8y+++CKPP/44AFFRUSiKgqIoJCUloSgKJSUl/O9//6s8fssttwCQnJzMPffcQ+vWrXF1dcXf35+rr76apKSkam3Iz8/n4YcfJjIyErPZTFhYGDfddBPZ2dk1tttisXDZZZfh7e1NbGxsrV/v6ZA+9Owxlmn7t7r5+Fc5rsvZX/m9weAAIMCpBa+StEkIIRqvOgWvEydOxGq1ctVVV5GRkVHtfEZGBuPGjcNmszFx4sTTbuQFq7yAeJORJhXJmtI8AhjdPgizJGoSQpxEVFQUN910U61GX2+//XamTJlC165d+eCDDxg0aBBvvPEG1113Xa3uZbFY+O2335gwYQIAEyZMYMWKFVX6h6uuuqry/AcffMCPP/7Ijz/+SGBgID/++CNms5kBAwZUHr/rrrsA2LhxI7GxsVx33XV89NFHTJ48meXLlzN48GBKS4+tTSwuLmbAgAF8/PHHjBgxgqlTpzJ58mTi4uI4fPjwcdtdVlbG5ZdfTmxsLMuWLaNv3761er2nS/rQs0NVVUzlWmDq7h1Qedxqd+JVnFT53Gy0AeDj0LbNkeBVCCEaL0NdLrr55puZNm0aa9asITo6mtGjR9OmTRtAS0SxcOFCSktL6dOnD7feemu9NviCYikiS6+na8XIa5pHIPr9W4DODdsuIc5zFqutxnM6nYLRYKhVWUVRMBnrVrY+PPvss/zwww+89dZbTJ069bhltm/fzv/+9z9uv/12vv76awDuuecegoKCePfdd/nrr78YMmTICe8zf/588vPzK4PdK6+8kjvvvJOZM2fy0EMPAdCpUye6du3KjBkzuPLKK4mMjKy8/oYbbmDy5Mk0b96cG264oUrdo0ePZvz48VWOXX755fTp04fffvuNG2+8EYB33nmHXbt2MXv2bMaOHVtZ9rnnnjtu9tji4mIuu+wydu/ezYoVK+jSpcsJX2N9kj707Cizl+Fq0X72nr7HtpY7lFtKc+XYBzomo40yTHgfXfMqwasQQjRadfpLSa/Xs2DBAiZNmsScOXP49ddfURQtW9/RPxLGjBnDtGnTMBhO74+xWbNm8emnn7J9+3asVistWrTg+uuv5+GHH8b4rz3b6mLhwoWVWxVcfPHFLFu27LTqq3eWQgoMeoLztaclnh60DfWpfK+FEGfGU298VuO5ti0juXPimMrnU979CqvNftyy0RGh3HfLscDrlanTKCktO27ZZk2DeOSOCXVs8fE1b96cG2+8ka+++oqnnnqKJk2aVCuzcOFCAB555JEqxx999FHeffddFixYcNLgNSYmhu7du9OiRQtAW9M5evRoYmJiKoPXunJ1da383mazUVhYSIsWLfDx8WHLli2Vwetvv/1G586dqwSuR/3338yCggJGjBjBwYMHWblyJe3btz+tNp4q6UPPjgJLAa7akldcvY9NG07KLqlM1gSgN1QEuHbtvc4pkzWvQgjRWNW5V/Ty8uK3335jx44dLFq0iOTkZADCw8O59NJL6dz59EcHH3roIaZOnYrBYGDo0KF4eHiwYsUKnnzySebNm8eSJUuq/GFzKvLy8rjjjjtQFKXx7ulmKUIp12NwgkWvx8tDoWlwwMmvE0KICs899xw//vgjb7755nFHX5OTk9HpdJWB51EhISH4+PhU/ttek/z8fBYuXMh9993HgQMHKo/369eP3377jf3799OqVas6t7+srIw33niDadOmkZqaWuXf64KCgsrvExISGDduXK3qfOihhygvL2fr1q1nPXA9SvrQM6+wKJuKXEzoPTwrjyfllHCp7tg2UrqKbMPuVm1Jjoy8CiFE43Xac9Q6depEp06d6qMtVfz+++9MnToVDw8P/v77b7p27QpAdnY2Q4cOZfXq1Tz//PO8++67dar//vvvJzMzk8mTJ/P555/XZ9PrT3khZov2aXGGhzdNDSUSvApxFrz59D01ntPpqo7ivfzYnTWW/e+I3/MPTqp12fpydCru0dHX+r7/rFmzsFgsvPfee7z33nvVzsfExPDSSy/VqW7Q/q2eNm0aDz30EH369MHb2xtFUbjuuutwOp11qnPMmDHMnDmTN998kx9++OGE+62eadKHnjlFeVm4A04FdG7HgvTDWTmEKVoSr3ydD7qKNa9HR2kleBVCiMar0Wb+ef311wF46qmnKjtdgICAAD77TJvS98knn1T55L225syZQ0xMDI888gg9e/asnwbXN7sFHBY887U/KPM8fTEoKk0keBXijDObjDU+jP+Zxnmisv9dw3oqZevTc889h91u56233qp2LiIiAqfTSXx8fJXjmZmZ5OfnExERccK6Y2Ji6NChA7Nmzar2GDZsGNOnT68se6IAuaZzv/76KzfffDPvvfce48ePZ/jw4fTv37/aXrHR0dHs2rXrhG096sorr+S7775j+vTp3HvvvbW65lxzwfehQGmBFqBazXqUf31AYc3UMg1bjD5kmsLRVUwbdrFqXyV4FUKIxqtOweuiRYsYOnQoK1asqLHM8uXLGTp0KEuXLj3l+lNTU9m4cSPAcTMt9u/fn2bNmmGxWCrXa9VWdnY2kydPpnXr1rz88sun3LazxlJEuaLgn689zffwQacohAT6nfAyIYT4r+joaG644Qa+/PLLatltR40aBcCHH35Y5fj7778PULmm8XhSUlL4559/uOaaaxg/fny1x6RJkzhw4ADr168HwN1d24rkv4Hn0XPHO67X66tNS/34449xOBxVjo0bN47t27czZ86canUcb1rrTTfdxEcffcQXX3zBk08+WeNrPBOkDz07jgavNteqHwwZ8rTp7TbfFpQbfSqDV2NFZmIJXoUQovGq00f906ZN+3979x0eRbk2cPg3W7K7yW46JEBIQkeRKkWlWLCCIioooEex93b0eET97McuiopiB4+hiAeUpqJYAFGaCDZUIMHQScKWZPvufH/MZiEmgSSEZBOe+7r22s3MOzPPhtE3z7yNVatW0a9fv2rL9O/fn5UrVzJ16lTOOOOMWp1/3bp1AKSmptKuXbsqy/Tt25fCwkLWrVsXXX6hJm688UaKioqYM2cOZrO5VnE1KJ8Tu05HsraGOmXmRFqkp1Rq9RFCiJq4//77+e9//8vvv/9eYZxnz549ueKKK3jjjTew2+2cfPLJrFq1imnTpjFy5MiDTtY0ffp0VFVlxIgRVe4fNmwYBoOBvLw8BgwYwPHHHx+NZcyYMRiNRs477zwSEhI4/vjj+eKLL5g4cSKtW7emXbt2DBgwgHPPPZf//ve/JCUlceyxx/Ldd9/xxRdfkJZWcd3Of/3rX3z44YeMHj2aq666iuOPP56SkhLmzZvHlClTqhxDesstt+B0Orn//vtJSkrivvvuq8uvttakDm0YXoeWhAYtcfu3BUKkegrAAIaMLvh3lKIzat3PDR6t3/A+7z5C4RB6nb7BYxZCCHFwdcqE1qxZQ69evbDZbNWWsdls9O7dm1WrVtX6/Pn5+YA2cUV12rZtW6FsTcycOZMPP/yQ22+/nYEDB9Y6rgbldeLQ67BGJibteFxX2gzs27gxCSGarI4dO3LZZZcxbdq0Svveeust2rdvz9SpU5k7dy6ZmZlMmDCBhx566KDnzMvLIzs7u9rJhZKTkxk0aBCzZs1i4sSJ9OvXj8cee4wpU6bw6aefEg6Hyc/PJyEhgYkTJ3LdddfxwAMP4PF4uOKKKxgwYACTJk1Cr9eTl5eH1+tl4MCBfPHFF5x11lkVrmW1Wlm2bBkPPfQQc+fOZdq0abRs2ZKhQ4eSlZVV7Xe47777cDgc0QS2IboRSx3aMHxOOwDheFN0218lbjpGZho2ZXYlWLIlOmGT4vGioKCiYvfZSbOkVTqnEEKIxlWn5HXnzp0MGDDgkOXatm3Ljz/+WOvzu1wuYH8Xs6pYrVYAnE5njc65a9cubr75Zjp06BAdC1QXPp8Pn88X/bmm16/9hVzs0+lIdGuVaodunene85gjcy0hRLMxfvx4xo8fX+W+qVOnMnXq1ErbDQYDDz74IA8++GCtrrVhw4ZDlvnqq68q/PzAAw/wwAMPVCrXpUsXvvnmm0rbk5OTeeeddyptLygoqLQtNTWVl19+mZdffrnKWE455ZQquxA//fTTVY4HPlKkDm2AOhQIOLXxvOoBkzXlH7BMjpLembDFHu02rJa5STYls8+3jxJviSSvQggRg+o05jUuLi5aOR5MaWlpo87ieKDrrruOffv28dZbbxEfH1/n8zz55JMkJSVFX+VPr+udz4ldr8cWaXm1ZbQ4MtcRQgjRoKQObYA6FAiVar9jJWF/vFv3Ommn7NR+aNEZ4tPQR7oNh8vKSDVr80rIuFchhIhNdaoVO3XqxLfffovb7a62jNvt5ttvv6V9+/a1Pn95V6qysrJqy5SWlgLaWnmHMm3aNObPn88NN9zAKaecUut4DjRhwgQcDkf0VVhYeFjnq5bPhUPRRZNXRyhEOEbX0hNCCFFzUoc2QB0KhEu176+LtDID2HdtwawECCpGSM5Bl5AW7TYcLisj1ZQCQLGn+IjFJYQQou7q1G34vPPO4+GHH+aWW27h7bffrrTEgaqq3HrrrTgcDs4///xanz83NxfgoJVa+b7ysgdTPvvk6tWrK1W85TNvrl27Nrpv5syZZGZmVnkuk8mEyWSqcl+98jpxqjrigtqPH379PT0GVT+5hxBCiKZB6tAGqEMBNZK8Gw4YWxzeoy2TU2bNJUmnx2hrEe02DJChSwKk5VUIIWJVnZLX2267jTfeeINp06axYcMGrrrqKrp27QrAxo0beeedd1i3bh2ZmZncfvvttT5/7969ASguLiY/P7/K2RLXrFkDUGH9ukMpP6Yqdrs9Ot7K6/XWJtwjw+fA49dmOvTrdWRktTroGolCCCGaBqlDG4bi1uKIsyVFt1kcmwEIp3WK7GuBoldBUUFVaKFqrbSSvAohRGyqU7fh5ORkFi5cSJs2bfjhhx+49dZbOeOMMzjjjDO49dZb+eGHH2jTpg0LFy4kNbX265JmZWVFlxA4cIH7csuXL6ewsBCTyRRdo/BgPvroI1RVrfL17rvvAjB06NDotpo8iT7ifC58keS11GSklYx5FUKIZkHq0Iahc2sTQ8UlJgPg8Ydo4dsKgKWVNgGiOTkDRSHa+pqmapNcSfIqhBCxqc4zQfTs2ZONGzfywgsvcNZZZ9G1a1e6du3KmWeeyQsvvMBvv/1Gr1696hxY+Xp7Tz31FD/88EN0e3FxMTfddBOgrdGXlLT/iercuXPp2rUrQ4cOrfN1Y4bXSdCn/fO4TGbSU5MOcYAQQoimQurQI6983VZzovYAoKC4jA46bbImcyR5tVpteFVjdK3X1JC2dm2xV8a8CiFELKpTt+Fy8fHx3H777VV2ayouLuaNN97gnXfe4aeffqr1uUeOHMltt93GSy+9xAknnMDQoUNJSEhgyZIl2O12Bg4cyGOPPVbhGIfDwe+//x4zXZYOi89FOJK8lsaZaWmt++yOQgghYo/UoUeOP+TH5AsBEJ+cDkBBURn9IsvkkK51G7ZZjJRgi7a8Joe08bjS8iqEELHpsJLXv1NVlU8//ZS3336bBQsWEAgEDut8kyZNYuDAgUyePJkVK1YQCATo0KED9957L3feeSdxcXH1FHkM8jnBp41xdZss2BIkeRVCiOZM6tD64/Q7sUSWk01I0tZr3bFrO+lKZF3ZSPKaEGfgL9VGvFGb3CkpqP1OSjySvAohRCyql+Q1Pz+fd955h6lTp7Jjx47oIvB9+vTh8ssvP6xzX3zxxVx88cU1Kjt+/HjGjx9fq/PX5ZgG4XNh8EaS17gErAmWQxwghBCiKZI6tP45fU7iI8mr3qotB+TdsVHbZ8okMU4b26rTKTh1idgM2tJB1qA214S0vAohRGyqc/Lq8/n48MMPefvtt1m6dGl0ogZFUbjnnnu4/PLLOfbYY+sz1qNKwOsgzqsAKrbMNqQmH3otPiGEEE2D1KFHltPvxKINeUVnjSSqxX8C4E3qwIE1aqkuCZ1xGwDx/shD46AbT9CDxSAPjoUQIpbUOnldu3Ytb7/9NjNnzsThcKCqKgaDgWHDhrFhwwa2bt3KU089dSRiPao4/KXYPNrn1KwcTM24e5cQQhwtpA5tGE6/k6Roy6u2/E2CawsAuhadK5R1G5KjY14NngBxljj8YT/7vPuwWCV5FUKIWFKj5HXfvn28//77vP3229GJI1RVpWvXrlx11VVcfvnltGzZksGDB7N169YjGvDRwhF0YfNo41xNaWmNHI0QQoi6kjq04TnLSmihzdeEzmrF5Q3QOlgIekhoc0yFst645Ohsw2F3Gampqewq20Wxp5jW1tYNHboQQoiDqNFSOa1ateKOO+5gw4YNJCQkcNVVV/Htt9/y66+/cvfdd9OyZcsjHefRRVWxBz3YPNqTYLehXufVEkIcJfLz87nlllvo3Lkz8fHxxMfHc+yxx3LzzTezYcOGKo+55557UBSFSy65pMr9X3/9NYqi8OGHH1a5/5ZbbkFRlArb/H4/kyZNonfv3iQmJpKcnEy3bt247rrr2LhxY4WyP/30E6NGjSInJwez2UybNm0444wzePnll+vwG4gNUoc2vDJ7UfSzLiGBrcVuOkRmGra0qtgd2xeXEm15DZe5STVrS+vIuFchhIg9NcqK/H4/iqKQlZXFf//7X04++eQjHdfRzV+GXadgc2s//rlH1psTQtTOggULuOSSSzAYDFx66aX07NkTnU7Hxo0bmTNnDq+99hr5+fnk5OREj1FVlRkzZpCbm8v8+fNxuVzYbLbDjuWiiy7ik08+YezYsVx77bUEAgE2btzIggULOOmkk+jatSsAK1as4NRTTyU7O5trr72WzMxMCgsL+f7775k0aRK33nrrYcfSGKQObXheh5Z4BkwGFL2erbtLOFvZo+1Mr9htOGRKRWcsT17LJHkVQogYVqPktXv37vz0009s27aN0047je7du3PVVVdx6aWXkiZdWuufz4Vdp6NDZMxrfFpK48YjhGhSNm/ezJgxY8jJyWHJkiW0atWqwv6nn36aV199FZ2uYuebr7/+mm3btvHll19y1llnMWfOHK644orDimX16tUsWLCA//znP9x3330V9r3yyivY7fboz//5z39ISkpi9erVJCcnVyi7Z8+ew4qjMUkd2vC8Ti3xDFm0+SLs2zaiV1Q8OisWa8WW7rAldX+34dLSaPJa7JUHx0IIEWtq1G14/fr1rFq1iuuuuw6bzcaGDRu48847adOmDZdccgmfffZZdGp/UQ98TpxhHaag9qOtZYvGjUcI0aQ888wzlJWV8e6771ZKXAEMBgO33XYbbdu2rbA9Ly+PY489llNPPZXTTz+dvLy8w45l8+bNAAwcOLDSPr1eXyF527x5M926dauUuAJNumut1KENz+e0AxCONwEQ2PM7AI6EXPhbt3YS0g7oNlxGmlm7J6XlVQghYk+NkleAvn37MmXKFHbu3Mm7777LwIED8fv9zJ49m2HDhpGTk1Np7JKoI58LT0BrFA/oFNJaJjduPEKIJmXBggV07NiRAQMG1PgYn8/H//73P8aOHQvA2LFj+fLLL9m1a9dhxVLeLTkvL49gMHjIsmvXruXnn38+rGvGIqlDG1bA5dQ+JGgTH8bt26RtT+lUqawuIV26DQshRBNR4+S1nMVi4YorrmDp0qX8/vvv3HPPPWRkZLBt2zaKi7UuNgMHDuSNN97A4XDUe8BHBa8Dn1/7p3GZ40i0WRs5ICGODqqq4vYHY+ZVl9Y4p9PJjh07OO644yrts9vtFBUVRV8ejye6b8GCBdjtdsaMGQPAyJEjMRqNzJw5s+6/UOCEE07g5JNP5s033yQrK4tx48bx6quv8tdff1Uqe/fdd+N2u+nVqxcnnXQS//73v1m8eDGBQOCwYoglUoc2jFCpCwAlkrwmu/MBMGZ0qVQ2zpaO3lDebdhJmiXS8uqR5FUIIWLNYU1j26lTJ5566in+85//sHDhQt566y0++eQTvvvuO77//nvuuOMORowYcdh//Bx1fC78vkjyajLRJkHWmROiIXgCIY598LPGDiPq10fPIj6udv+bdjq1FiertfJDr1NOOYX169dHf3722We5++67Aa1ltG/fvnTs2BEAm83G8OHDycvL44477qjjNwBFUfjss8947rnneP/995kxYwYzZszg5ptv5uKLL+b111+PdhM+44wz+O6773jyySf57LPP+O6773jmmWdo0aIFb731FiNGjKhzHLFI6tAjRy0tA7Q1Xh2eAG1D20AHSdndKpVNiI/HY9DGxoZcLml5FUKIGFbrlteq6PV6RowYwbx58ygsLOQ///kPHTp0wOv1Mnv27Pq4xNHF5yQcaXktjTNjizw5FkKIQymfHbi0tLTSvtdff53PP/+c999/v8J2u93OokWLOPnkk9m0aVP0NXDgQNasWcMff/xxWDGZTCbuv/9+fvvtN3bs2MGMGTM44YQT+OCDD7jlllsqlO3Xrx9z5sxh3759rFq1igkTJuByuRg1ahS//vrrYcURq6QOrX+qW5uu32BNpGCviw7KTgAsrY6pVNZmNuAyaPVs2C1L5QghRCyr9wVEMzMzmTBhAhMmTOCbb77hnXfeqe9LNH9eJ3i15DVgS6Jlusw2LERDsBj1/ProWY0dRpTFqK/1MUlJSbRq1arKcaPlY2ALCgoqbJ89ezY+n4/nn3+e559/vtJxeXl5PPLIIwCYzWaACl2OD+R2u6NlqtKqVSvGjBnDRRddRLdu3fjggw+YOnUqhr+tZx0XF0e/fv3o168fnTt35sorr2T27Nk89NBD1X/5ZkDq0PqhK/MCEJeYxLZtW+ip+Aiix5CSW6mszWzEYbBiwkvY7SHFpNW5Jd4SwmoYnVIvz/mFEELUg3pPXg908skny3p2deFzofdpsyEqSanEW6r/Q1AIUX8URal1N91YNHz4cN566y1WrVpF//79D1k+Ly+P4447rsrE8PXXX2f69OnR5LV8Aqbff/+9ynP9/vvvFdaOrY7RaKRHjx78+eefFBUVkZmZWW3Zvn37ArBz585Dnrc5kTq0boLhIAaPHwBTYgpl238DoNiURYbeWKm8zWxgl8FKJl4IhUnRaV3uQ2oIp89Jsjm5wWIXQghxcPI4MQaFvQ6MnshU/omyBqAQonbuuece4uPjueqqq9i9e3el/QdOBFVYWMjSpUu5+OKLGTVqVKXXlVdeyaZNm1i5ciWgtZz26tWL999/v8IarQBr167l+++/55xzzolu+/PPP6ucnMlut/Pdd9+RkpJCixbacmBfffVVlZNULVq0CIAuXSpPtiPE35X6S7FouSvmpBTYqz1oKbO1r7K8zWyg2JAU/Vnv8WGL07rfS9dhIYSILU2/iaEZcnlLsEV65PlNiY0bjBCiyenUqRPTp09n7NixdOnShUsvvZSePXuiqir5+flMnz4dnU5HVlYW06dPR1XVaidDGjZsGAaDgby8vGi344kTJ3LWWWfRq1cvxo8fT+vWrfntt9944403aNWqFRMmTIgev379esaNG8c555zD4MGDSU1NZfv27UybNo0dO3bw4osvotdr3aNvvfVW3G43F1xwAV27dsXv97NixQpmzZpFbm4uV1555ZH/5Ykmz+l3YvFpn422JMwF3wCgplVeJgfAZjJSQiKKIYwa1EXXenX5XRR7i2lP1UmvEEKIhifJawxyeO1YI8nrLk/zWSJCCNFwzj//fH766Seef/55Fi9ezDvvvIOiKOTk5DB8+HBuuOEGevbsSY8ePcjOzqZnz55Vnic5OZlBgwYxa9YsJk6ciMFg4NRTT2XZsmU8/vjjvPTSS7hcLjIyMhg3bhwPP/wwLVu2jB4/ZMgQHnvsMT755BMmTpzI3r17sdls9O7dm6effpqLLrooWva5555j9uzZLFq0iDfeeAO/3092djY33XQTDzzwQHRWYiEOxul3RltedQlW0r0FAFhaV56sCcBqNrBPtaEzqISC+9d6LXAWSMurEELEGEleY5Dd78Tm0brO6ZKSDlFaCCGq1qFDB1599dWDltmwYcMhz/PVV19V2jZgwADmz59/yGNbtmzJv//9b/79738fsuzZZ5/N2WeffchyQhyM0+ck3qfVoW6DiRx1OyiQmtO9yvJ6nUKZPhG9USXk3Z+8AhR7itlVtovN9s1ssm9is30zmx2b+cv5F6FwqMG+kxBCxLITW5/I86dUnvDxSJDkNQbZA65ot2FTsiSvQgghRE05A07iI92G9/h89FTsAJgzqx8z7Y1LQWcIAxWT16dXP82Tq548ovEKIURT5wlWvQLBkSDJawyyBz20jdwDlhapjRuMEEII0YQ4fU6yIsmr067NUF2iSyPVXP0cEv64FHRGrbU2VFpK5w6dAQirYQyKgezEbDokd4i+2iW2w2yQlQCEEALArG+4/x9K8hqDHD4PnSJDXZMyWjRuMEIIIUQTcuCYV59rGwAl8Tkc7FFw0JyKzqAlr+GyMi7qfBGdUjqRGJdITmIOxiqW2BFCCNHwJHmNQW63H4gjqFNokSlL5QghhBA15fLYMUceAOs82wHwJHY86DFhSxo6Y6TbsMuFQWegT0afIxqnEEKI2pN1XmNNKIjXo1WgLpORlinWRg5ICCGEaDrcjv0zBFsCuwBQU9sd9Bh9fBJKecuro+jIBSeEEOKwSPIaa/wuAn5tzcNSs4ns1i0PcYAQQgghyvmddgDCcQYSQ3sAiEvLOegxVouZgFHrGhy2Fx/R+IQQQtSdJK+xxusk7NP+WcosCVgT4hs5ICGEEKLp8LvsAITjzaSF9gJga3Hw5DXRbMCrj9OOiyS/QgghYo8kr7HG5wKfAoA33tbIwQghhBBNS8DlBEC1mGnBPgBSWh2827DNbMBtNAEQdjmObIBCCCHqTJLXWONzokRaXr3mhEYORgghhGhaQi4XAGGzCb2i4lcNxKe0OugxVpOBUoNFO66s9IjHKIQQom4keY0xqteJ0au1vNoxNXI0QgghRNOiut0AhI3a/BFFunTQHfzPHZvZiMugDdMJlZYd2QCFEELUmSSvMcbjLibBo30Oxic3aixCCCFEUxJWwyhlkUpUH3kQbDz0xIc2swGHQevtFPZ4jlh8QgghDo8krzHG7t6NNVJvqtbkRo1FCCGEaErKAmWYfdqSN4oupG2zZB7yOJvZyD69tjRd2OM/cgEKIYQ4LJK8xhi7t5hEj1bx6pOSGzcYIUSTNHXqVBRFYc2aNVXuLygoQFEUnnvuuei2r7/+GkVRqn3NnDmzocIXos6cfieWSPKq0wUACCS0PuRxNrOBYkMSAGFf4MgFKIQQ4rAYGjsAUZHdsy/a8mpKSW7UWIQQR5/bbruNfv36Vdp+4oknNkI0QtSO0+ckPtJwqle8AChJbQ55XKLZyF5DMgBhb+hIhSeEEOIwSfIaYxx+B5naXBPEt0hr3GCEEEedwYMHM2rUqMYOQ4g6cfldWHza5zidNvGSMfXga7yC1vK6R58CgBoCNRhEMcifSEIIEWuk23CMsZfZMUd6LCW2atG4wQghhBBNiNPvxBJpeU3Qa0veJLTIPuRxVrOBncbU6M/hMplxWAghYpE8VowxbpdWYYYU6NTl4IuqCyFEfXO5XBQVFVXanpaWhqIojRCREDWnjXnVPlsN2hiclFaHrkuNeh2lcWkoOhU1rBC2F6FPSjqSoQohhKgDSV5jjMel9RkuM+vp3DKlkaMR4iijqhBwN3YU+xnjoYETxquuuqrK7Tt37iQz89CztgrRmJw+JynlEzYZVVyqhbS0mvVi0pusKEYV1acQKtqOMafDkQxVCCFEHUjyGmMCZdojY7fJSKLF2MjRCHGUCbjhiUPPTNpg7tsBcQkNeskHH3yQwYMHV9qemppaRWkhYovT76R1pNuwzhhmt9KS9gZ9jY61WYzaX0U+CJfsOnJBCiGEqDNJXmNM0B0EoMxsRq+TLnpCiIbVvXt3Tj/99MYOQ4g6ObDbsM6oss/YssbH2sxGQgYdChDet/vIBCiEEOKwSPIaazxhAErjzI0ciBBHIWO81toZK4zxjR2BEE2K03/AUjnGMKXmmnd1t5kNBA16jIQI24uPUIRCCCEOhySvsURV0WnL0lFqsjZuLEIcjRSlwbvpCiHqj9PrwHxAy2sgoebDAGxmA36jUUteHZK8CiFELJKlcmJJ0Icxkry6zcmNGooQQgjR1PhKHdE/bHSGMGpimxofazMZ8RriAAg79x2B6IQQQhwuaXmNIQFPMWavAqh4LTI5ihDi8Lzzzjt8+umnlbaff/751R6zbNkyvF5vpe09evSgR48e9RqfEPXN73QAoCqg6MGYcug1XsvZzAY8BhMAYZfziMQnhBDi8EjyGkPsrh3YIqt0qLb0xg1GCNHkvfbaa1VuP+WUU6o95qWXXqpy+0MPPSTJq4h5wdJI0mkMoygQ3yKnxsfazEZKDRYAwqWuIxGeEEKIwyTJawyxl+7E5tHWpzMkJTduMEKIJmv8+PGMHz/+oGVUVa3w8ymnnFJpmxBNiaqqhEpLAdAbtXs5KbN2La8ugzbmPVwWQ+s9CyGEiJIxrzHEXrYHm0f7HCdrKgohhBA15gl6iPNqy83pjWGKVRsZqSk1Pt5mNmCPJK8ht+eIxCiEEOLwxHzyOnv2bE455RRSUlJISEigZ8+ePPPMMwQCgVqdZ926dTz55JMMHTqUjIwMjEYjKSkpDB48mMmTJ9f6fEeC3bM3mry2bJfVuMEIIYRo8o6mOtTldxEfmWnYYAizk3SSLMYaH28zG7EbbACEPb4jEaIQQojDFNPdhu+44w4mTZqEwWDgtNNOw2q18uWXX/Lvf/+b+fPns3jxYiwWyyHPEwwG6dOnDwBWq5V+/fqRkZHBtm3b+O6771i+fDnvvfcen332GcnJyUf4W1XP4dxLVmR9upY5NZ8hUQghhPi7o60Odfqd0eRVZ1TZZ2iBoig1Pj7RbKBYnwhA2BcAVdWWzxJCCBEzYrbl9aOPPmLSpElYrVZWrlzJZ599xv/+9z/+/PNPunfvzvLly/m///u/Gp/v+OOP54MPPqCoqIgvv/ySGTNmsGzZMtatW0erVq1YtWoV//znP4/gNzo0974iAMIKJKbVvKuTEEIIcaCjsQ51+p1YIsmr3him1JRZq+NtZiNFxiQAwgEFvI76DlEIIcRhitnk9YknngDg3nvvjT7xBUhPT+fVV18F4JVXXsHhOHTlYjAYWLNmDaNHj8ZkMlXY1717d5555hkAZs6c2ahdn8r2aevKeU0KyVbTIUoLIYQQVTsa61Cnz4kl0ntJZ1Txxreq1fE2swFn+YRNAQXcxfUdohBCiMMUk8nr9u3bWb16NQDjxo2rtH/QoEG0bdsWn8/HokWLDvt6vXv3BsDj8VBUVHTY56srr12b4t9j1pNci3E6QgghRLmjtQ51BVzE+7RZhnVGFTWxdsNvrGYD7vJ1XoM6SV6FEOIQCkvc3D17PZ/+vLPBrhmTyeu6desASE1NpV27dlWW6du3b4Wyh+PPP/8EIC4ujtRGnOU34NKm5veYDLWaZEIIIYQod7TWoRVaXg1hDMlta3W8zWzAU568SsurEEJUa4/Ly8PzfuG057/mw7XbePaz3wmHG2a5vZicsCk/Px+A7Ozq12dr27ZthbJ1papqtMvTueeeW6lLVENSyrTuVt64OAz6mHyuIIQQIsYdrXVoxTGvKuYWObU63mTQEzDHAxAOKqilRch0TUIIsZ/DHeD1pZt599sCPIEQAIM6pnP3WV3Q6Rrm/5gxmby6XC4AEhISqi1jtVoBcDqdh3WtRx55hO+++w6r1cpTTz11yPI+nw+fb/8U+od7/QPpPNr6dD7ToWd/FEIIIapytNahTr+TtuWnNqgkt6xdyyuA0Vr+O1MI79uFvt6iE0KIpsvtD/LutwW8/s1mnJH1tHu2TebfZ3XhpI7pDRpLTCavDeW9997j0UcfRafT8c4779CpU6dDHvPkk0/yyCOPHJF4jJ4wAD5z9X9wCCGEELEg1upQp89JvF/rtuYyxpORXPu61JQQjwooQNi+R5JXIcRRb+kfe/nnB+spKtWeDnbOsHL3mV0449iMWi1HVl9ism+qzaYtEl5WVlZtmdLSUgASExPrdI3Zs2dz1VVXAfDmm28yevToGh03YcIEHA5H9FVYWFin6/9dKBzC5NU++y1J9XJOIYQQR5+jsQ4FcPld0W7DxYZEMhLNtT6HzWIkaNRS1vC+vfUWmxBCNDWqqvLO8nzGv7uKolIf2anxvHhJLz65fQhndstslMQVYrTlNTc3F+CglVr5vvKytTFnzhzGjRtHOBzm9ddfj1bANWEymY7ImB6X34XNo30OJqTV+/mFEEIcHY7GOhQiY14jEzbZTSmYjbVvN7WZDfgMRoyBEGFHST1HKISoKV8wxIdrt/FXsZsEk4EEkwFb5N1qNmA16WmfbiUlIa6xQ22W/MEw//fRz8xao9UVo47P4j8XHIfJ0Pj9UWIyeS2fdr+4uJj8/PwqZ0tcs2YNQIX162rio48+YsyYMYRCIV577TWuvfbaww+4Huzz7cPm1ro7GVq0buRohBBCNFVHYx0KWvIaH2l5dcW3qNM5bCYjPkMcVryEnfvqMTohRE2oqsqin3bx9Kcb+avEfdCyFqOeu8/qwviTctE30GRBNbXL4eXjH7czd912CorLGNQxneE9WnH6MRnYzLG9okhxqY8b3/+BVQUl6BS4b9gxXD2oXaO1tP5dTHYbzsrKol+/fgBMnz690v7ly5dTWFiIyWRi2LBhNT7v/PnzufjiiwkGg7z22mtcf/319Rbz4XJ49mGLdBuOy6jd2nRCCFGVX375hcsuu4w2bdpgMplo3bo1l156Kb/88kuFclOnTkVRlOjLbDbTuXNnbrnlFnbv3g1oLXQHlqnuNXXq1Eb4puJAR2MdCuD0OqLdhj1JdXsIbDtwrVdX/U0mJYQ4tHV/7WPUlO+4efoP/FXipqXNxPiTchnbvy3n9WzNqV1a0D83lWNaJZKZaMYTCPHYgl8ZNWUFm/a4jlhce10+3li6mQlzNvDG0s18tXEP2/a5UdWKS8OU+YL8b+02LntrJSc+tYQnP9nIxl0uvIEwX/y2hztnref4x7/guvfW8PGP2yn1BY9YzHX1204nI175llUFJdhMBt4e349rBrePmcQVYrTlFeC+++7jggsu4KmnnuKcc86JPh0uLi7mpptuAuCWW24hKWn/+NC5c+cyYcIE2rRpw5IlSyqcb9GiRYwaNYpgMMiUKVO47rrrGu7L1IC9bBe2yAMmXQtJXoUQh2fOnDmMHTuW1NRUrr76atq1a0dBQQFvv/02H374ITNnzuSCCy6ocMyjjz5Ku3bt8Hq9LF++nNdee41Fixbx888/8+KLL0bHSYL2/9QZM2bwwgsvkJ6+f6bBk046qcG+o6je0VaHAvg8LvSRvyX9KbWfaRjAZjZSZtBm/A+XHrk/hoUQ+xWWuHnms9+Zv34HoLWoXjekPdcNaU+CqepUJRxWmbm6kCcW/ca6v+wMm7Sc24Z25PqTO2Csh+Umg6EwX/++l1lrCvly4x5CVaxhmhCnp2NLKx1b2giGwyz+ZXd0+RiAfrkpXNA7i+PaJPLFb3tYsGEHW/aWsfjX3Sz+dTcmg45TurTg1C4tGdK5Ba2TG3e1kcW/7OKOWT/i9ofITYvnrSv60rGlrVFjqkrMJq8jR47ktttu46WXXuKEE05g6NChJCQksGTJEux2OwMHDuSxxx6rcIzD4eD333/H6/VW2L5nzx4uvPBC/H4/WVlZrFixghUrVlR53eeee67CH2INpcS+jYzIWB1Ty4wGv74QovnYvHkz//jHP2jfvj1Lly6lRYv9XShvv/12Bg8ezD/+8Q82bNhA+/bto/vOOecc+vbtC8A111xDWloaEydO5OOPP2bs2LEVrrFr1y5mzJjByJEj6zRuUhxZR1sdGggFoCwycQQqSsvKXaVrwmY24DRE1np1eyAUAH1sd/EToqkKhVVe/OIPXl+6BX8wjKLAqD5Z3HVmFzKTDj7hmk6nMG5ANqd0acH9c3/iq9/38tziP1j40y6eHdWD49rUbfLTzXtLmb1mG//7YRt7XfuX9eqdncyJ7dPYWuJm0+5SthSVUuYPsX6bg/XbHNFy7dITuKB3G0b2akN2Wnx0e4+sZO48vRO/73axYP1OFmzYQUGxm89+2c1nv2g9nDq0SGBI5xYM6dSCAe1TiY9ruDRt+sq/uG/uTwAM7JjG5HF9SI6PzfHEMZu8AkyaNImBAwcyefJkVqxYQSAQoEOHDtx7773ceeedxMXV7Jfqdruj68pt27aNadOmVVv24YcfbpSKd9/2AgDCioqtRWqDX18I0Xw8++yzuN1u3njjjQqJK0B6ejqvv/46J598Ms888wxTpkyp9jynnXYaEydOJD8//0iHLI6Ao6kOdfgd0fGuOqOKuUVunc6jJa/aEjvhoA48+8Dasp6iFEKU8wZC3DZjHYt/1RK3kzqkcf/wY+jWunZJZ+tkC++M78fHP+7gkfm/8NtOJ+dP/pYLerchNy2eFjYT6VZT9D3NGoeqaq29BcVuthaXsbXYTUHk/cBxtmkJcVzYpw2j+7alc0bFFshAKMzWYjeb9rj4Y3cpnkCIM4/NoFfb5Gq72CqKQtfMRLpmJnLXmZ35daeTz3/dzdI/9vJjoZ3Ne8vYvLeMd78tIE6v4/icFI7PSaFX22R6ZSeTbj0yk919tG4793+kJa6XnZDNQ+d1q5fW6yMlppNXgIsvvpiLL764RmXHjx/P+PHjK23Pzc2t1C891rj37ATAb1JIsdZ+en8hhCg3f/58cnNzGTx4cJX7hwwZQm5uLgsXLjzoeTZv3gxAWprMgN5UHS116IEzDStGleS0VnU6j81soNig1cGhgALuYklehahndrefa6atYc3WfcQZdDw7qgcjerau87hKRVEY2bsNgzql89C8X1i4YScfrt1Wp3PpFDi1S0tG923LaV1bEmeoOokz6nWRLsNWzj6ubjF3a51Et9ZJ3HF6ZxyeACs2FbH0zyKW/rGX7XYP320p5rstxdFj2iRb6JWdTO+2yfTJSaH3QRLlmlr8yy7umr0eVYXLT8zhkRHdYmp8a1ViPnk9Wvj2aVPy+82QFqPN9EI0d6qq4gl6Dl2wgVgMllpXIg6Hgx07dnD++ecftFyPHj2YN28eLtf+cX0Oh4OioiK8Xi/ffvstjz76KBaLhXPPPbdO8QvRUJw+JxaflmCHDToyk+o2dsxmNuI2RiZsCkaSVyFEvdlh93DFO6v4c08pNrOBty7vy4D29fOANN1qYvK4PlzSdy/fbymmqNTHXpePolJ/5N1HMDJ21WoykJMWT25aQvQ9Oy2ezhk2Uhth+Z0ki5FzurfinO6tUFWV/KIyvttSzI9/2fmx0M6mvaVst3vYbvewcIPW4HVi+zQev+A4OrSw1umay/8s4pbp6wiFVS7s04aHz4v9xBUkeY0ZYYc2q2HApCM5XsbXCNEYPEEPA6YPaOwwolaOW0m8Mf7QBQ9QnozabAefZKF8v9O5f0bV008/vUKZnJwc8vLyaNNGJpETsc3ldxEfaXn1Gw1kJtate50227DW8hoO6CR5FaIe/b7LxRXvrGKX00tmoplpV/WnS2b9Twg0pHMLhnSuvFxWOKzi8AQIqyqpCXExm6gpikL7Flbat7By6YAcAJzeAD9tc/BjoZbMLvtzL99tKeacF5dxwykduOmUDrVa23rt1hKufW8N/lCYs7tl8sxFPdDF2HJD1ZHkNUaES7U+9gGTniSLJK9CiLopT0oPbFGtSlVJ7uTJk+ncuTMGg4GMjAy6dOmCThe7416EKOf0O6PL5PgMJtLqODbMZjbiMUjLqxD1beWWYq55bw0ub5BOLa1Mu6p/g8+uq9MppDRCq2p9SDQbGdgxnYEdtTkFCkvc/N/HP/P173t5acmfzF+/g8dHHhfdfzA/b3cw/t3VeAIhhnRuwaSxvTDE8BjXv5PkNUYYyrRaN2AyxvQgaSGaM4vBwspxKxs7jCiLofYVe1JSEq1atWLDhg0HLbdhwwbatGlDYmJidFv//v2jsw0L0ZRUSF5NFvR1bEGwmQ37k9eAAmWSvApxKHtcXhas34ndE8Bk0Gkvoz76ubjUz1OfbsQfDNMvN4U3L+8bszPZNhVtU+N5d3w/Fv20i0fm/0J+URmXvrWSkb1a88C5x1Y7udOmPS4uf2cVLm+Q/rmpvH7Z8ZgMNW+xjQWSvMaIOI+2UHGwhrM/CiHqn6Iote6mG4vOPfdc3nzzTZYvX86gQYMq7V+2bBkFBQVcf/31jRCdEPXP6XNGuw37zHXvhlgxedXBzx/C8ePBWrkLohBHs1BYZemfe5m56i+W/LYnOpb0YM7qlsGkMb1r1b1VVE9RFIb3aMXgzuk8/9nvvPf9Vj76cQdf/LaHtqnxmI06zAa99m7UYzbqWbG5iJIyP93bJPHW+L5Y4prev4UkrzFAVVXMnrD22ZLQyNEIIZq6f/3rX7z//vtcf/31LF26tMJswSUlJdxwww3Ex8fzr3/9qxGjFKL+uPyu6IRNAWtync+TaDbijiSvobAR9m6EqcPhinlgy6yPUIVo0rbbPXywupDZawrZ4di/JnSf7GS6tU7CHwzjC4bwBcORVwhfIMygTuncelqnOveKENVLNBt55PzjuOj4LO6b+xM/b3fy205nteXLu20nmpvmMEVJXmNAWaCMhMgEp0p8cqPGIoRo+jp16sS0adO49NJL6d69O1dffTXt2rWjoKCAt99+m6KiImbMmEGHDh0aO1Qh6oXT7yQ10m04ZKv7OrMmgw6/SeuuHzS2hEQdFP0O7w6DK+ZDkkxeJo4+qqry/ZYS3li6ma//2Ev5ylnJ8UYu6N2GMf2yj8jES6J2emQl8/HNg9iwzY7LG8QbCOENhvEGQvgCIbyBMAa9wshebZrs2F+Q5DUm2H12Ej2R/xMkJjdqLEKI5mH06NF07dqVJ598MpqwpqWlceqpp3Lfffdx3HF1WJhOiBjl9JaQFek2TGrdW0gVRUGXoPWACnt8MH4hTBsBJZthaiSBTc6uh4iFiH3hsMqSjXt49etNrPvLHt1+Yvs0xvRvy1ndMqULcIzR6xR6Z6c0dhhHlCSvMcDutWONtLyGElMbNxghRLPRvXt3pk+ffshy48ePZ/z48bU69913383dd99dx8iEqF9Od1F0wiZz2uGNT9VbteRVdZehpuSiXLkQpp0H+wr2t8CmtjvMiGOHqqoEwyr+YJhAKIw/GMYfCpORaJYJJI9SgVCY+et3MOWbzfyxuxSAOIOOi/tmcfWg9rRLlyFuovFI8hoD9pTuJSmSvKrJ9bNQsxBCCHG0cPnsWPxaDyZbavJhncsYSV6VUAjV70dJzoYrP9ES2OJN+xPY9I6HG3aj+XLjbp759Hfyi8rwh8LRbqAHapNsYcplx9M9K6nhAxSNwukNMPeH7by5bAvb9ml/mFpNBi47IYerBuXS0mZu5AiFkOQ1Juwq2UmryBNjfYtWjRuMEEII0cQ4/a5oy2tyi+TDOpfRao1+DpeVoTOZILE1jF8E742ITOI0DLpdWPUJ9EbocQlkxl7X/K3FZTw6/1eWbNxTbRmDGsIW9OHbaefG5xdw31mdGdolHcJh1GAIwiHUUBgt41W198hLjWTBil4Pig50SvSzolNAibyqoIZCEAqhBoOowSAEg6ihEGogCGpYO4deBzpdxc+qiur3owYC2nvkc9jvh1Bofxw6Pegicej0kVgO/vtSdLpIvErF8uGwFu+Bv5NgCFQVxaBHMRrBYEAxGlEMRu3daKhwnui5y8//999lOPIZUOLi0JlNKGYzOpP2rpjM6Exx2vHlx8D+c0S/hHYN5YDPKJF/F7SW99UF+5i1upCFP+3AG9AmEE23xnHlwHZcdkIOSZamObGPaJ4keY0Bjt3bAFBRMbaQ2QyFEEKI2nAGPcRHktfUFoc33ssab8Kjj8MS8hMuLYXUyHAeWwZcsQDeOx/2/AIrX6v+JCtfh3Oe1pbZqSZZ+7uCojJ2OryU+oKU+YK4Iu+l3iCeMjdpcQptUyxkJZvJSraQYjFqOU84TMjpJFRcTLC4hGBxEaED3tVAgJAKBcVu8kvcnKTCSYpCTnoC2fF6FHcplJailroIu1yoHk/FwD6BLXX6TYqYptMRMhjxKAZ8ip7hOiNn6A3o4uJIizeQZAC+DrDH72d3+YOBQADFYECJj0dnsWiv+Hh08RYUi7bM3IEPEA78jKJoxxqN0XeMkXdFgbCqPQwIh/Z/VsMoegO6hATtOuXvkc8oCuGyMsJuN2F3GeEyd/QzYbXyMZGfFYOesNeH6vNq714vYZ8X1etDDYfQxZlQTCaUuDgUU5z2wCDOpD2AUHT7H0Ac+PCh/CECKmo4DCoHPIzQHnKoofIHM5GHNKEghMKg12kPV/Q6FL2h4s9/f+iiUyIPPnSR/Qc8zFHKH+6gPQgKBFD9Ae1agYD2CgW1339cnPZvEReHLi4u+rMaDO7/dzvgQZAaCGjlzBZ0FjM6iyX6WTGbMaSlYWqgSSAleY0B7r27AfCbwZIk3YaFEEKImgqFQ5SqgWjyak05vG6u5Wu9WkJ+wmVlFXdaW8CVC+GH98Bjr/oEO9bBlq9gwR3w13dw7gsQV/0YwV177Lw6czm//fAbLTx20jxO0rxO0rwOsr0O0j0ObIGKCWVR5FUbGZHX/jghdLADDAZCig6fqhBGQWc0YLXEodPrQa/fn5QroKBQoVU1HNZaYcPhSp+rFUluMBpQ9AbtD2yDHvQG0O1PblDDWqtkKISqaufTxcWhGPf/MR59GfTatUPatUOhMFv3uti+rwwlrGIy6miXbiW1qplXyxOPcBgVdX8yQqTVVK/XWi8PeEdhf2JSnixEP/srJjR/P295QoQSSUD2/z5Vvz+SYGmJlur3V463tsJh9H4fVnxY/76vGALVHKb6/eB2H/zeEUedhEGDyH7rzQa5liSvMcBfsheAoFklQSZsEkIIIWrM5XcBYIn8Pa+3VfpTvFbK13pN9bkqJ68AlhQYeHu1x6t+P+GvXyD8xbOEl/2P8LofCJ90D2FTOmGXi8COHfj/KsRfWIh9cz6mfcWMPayIocxgxm6yVnh5EhIJ2JLZ7VVRUEmxGBnePZNjW9m0Tqqqis5kQmezoU9M1N5ttui7YtD+RJy/fgf/+nA93kCY9ukJvHF5Xzq2PLzfcWP4fksxE+b8RH669m+aaDbg9AYBOPPYDB4e0Y3WyZYqjw2HVVZsLmb6qq38vN3JwI7pXNw3i15tk7XWtwamhsMs/2U7T368njJvkDiDjjijnjijAZNRj8mox2DQs9fhpXBfGb5ACEUFBe1eUFQwhIP0zIhnxDHpnNwuCbMaRPX5Uf0+0OlR4oyVW+YMBtRgkLDHQ9jtIezRWjrVyM8oygEPEA441qh1O66YzAeiLYPAAa2LushnrSVRDQYPaFWNtLKWaT8D6BLi0cVXbplFUbT4Ii2zqttNqKwM1e1GDQS1LthmE4rJjGI2oYu8Kzo9asCvPSjw+VF9Pu2/ab8PAoHIg5gDungT6S6vEmmAPeDBQ7SFVlf5oYzBEG1l1R6uaF3P1XAIQuFoq6yqhrXrlT+sCasVu+6XP8yJPijSymit24aKXdiNRu37RcbzV9VCrh0TV6FlNvpvHwho//ZeD6rHS9jr1f7tvV6MbRpuGTFJXmNA2F4CgGpSsclSOUIIIUSNufwuDEEVY6QpSGc9vMTKZjbgNmoT01SVvAa2b2ffrA8I7tlDyOUi7HBo3XYjn8Nud6Rk+azHZTD7oSqvZYq8e41mTNltsWVnYchoiTEjA0PLDAwZGRgzWmJo2RLFYomOW/QGw2yzeyi0e9la4mHbPg/b93nYZnezfZ+Hfe797WZxeh3Xn9yem07piCWu9suanNezNe3SE7juvTVsKSrjgsnf8uzoHpzVLbNRErfacnoDPLloIzNW/QVAS5uJx0Yex+BO6by0ZBNvLdvC4l938+2mIu48ozPjT8rFEJlluajUx+w125i5+i+2Fruj5/xr1V/MWPUXHVtaubhvFiN7t2nQyYxWb7Vz7Qc/4w0ogBGCgBe0D8G/lY5Db1LISrGQk5ZAblo82anxDO7UQtZmFU2SJK8xILtMq2jj4sIkWat+6ieEEEKIypw+R7TVFYiu01pX5d2GAUKlpdHtajBIyX/fZ+9LL1UeF1oFxWjUxgKqbvSKB8WgQnImvyR2Y7k3gV3xqbhSM7jk/BO46NTj0NdiWRqLETpZTHSqZo7HMl+Q7XYPOx1eOrW0VtuiWFPHtUli3q2DuOn9H1hVUMIN7/9At9aJXH9yB4YdlxlN9mLNZ7/s4sGPf2a3U+tTPm5ANv8+u2t0AqJ7z+nKyN6tuX/uz6zduo/HF/7G3HXbuXZwez7/bTeLf9lFIBSZxdpkYGTvNgzsmMZnv+zmk593smlPKU8s2sjTn/7OqV1acEHvLLJT40myGEmyGLGZDeh09Zvgb9hm56qpq/EGwpzWtSX3nN0FXyCMNxDCG4y8B0L4g2Fa2EzkpiXQJsUiyx6JZkOS1xhwYkIOAdZjNSgkW6oYdyGEEEKIKjlc26PjXYNxpugsqnVlMxujyWt5y6vn51/Y9eCDeH/9FQDL8cdjPflk9ImJ6JMS0SUmok9MJJxg5Yu/3OR7wB4AhyeAo8zD2UXTGOWeiY4iysKlPBO4nROP78vEc7pWPd7yMCWYDHTOsNE5o/5a1tKtJt6/ZgATP/+DaSsK+GWHk9tmrOPZVAvXDm7P6OPb1qllt755/CE++Xkns1YXsjJf69nWLj2BJy/szgntK88r0jUzkdnXn8isNYU89clGftnh5I5ZP0b392ybzKX9szm3Zyvi47Q/m88+rhWPnN+NBet3MnttIev+svPFb3v44reKszgripb0JlqMpFtNjOzVmjH9szEb6/Z7+mO3iyveWUWpL8gJ7VN59dI+dT6XEE2VJK8xQFeqjdcJmgzEGeTJmBBCCFFTTkdBdJmcsOXwWl1Ba3ndF0leg0VF7H7yKUr++18Ih9ElJtLyX3eTfNFF2pi8A4TDKrfOWMfCn3ZWOueXnMcCXTYvGF/lOF0BX1kfRH/sZEjocdjxNqQ4g457z+nK9UPa8953W5n2XQGFJR4e/PgXXvziT644MZdze7bCqNPtn88pMiurAqRZ4zAZ6j/ZUlWVH/6y8+HaQuav30mpT+s6q9cpXD+kPbcN7XTQJE+nUxjbP5szjs3giUW/sfSPvZzVLZNxA7Lp1rrqCcASzUbGDchm3IBsNu1xMXvNNr75Yy92dwCHJ4AnEEJVwekN4vQG2bbPw4+Fdl75ajPXDWnHpQNySDDV/M/wrcVlXPbWSva5A/Rsm8xbV/STxFUclSR5jQGBkn0ABE2yjpYQQghRGy7X9v3dhg+zyzBoLa9ugzZ+seill6PbE4cPJ2PCvRjS06s87qlPN7Lwp50Y9QoX9ckiNSGO5HgjyZY4kuKNJFtOwKleROrXt6Ev/B4+uBwG3AhnPAqGptXrKiUhjttP78R1Q9oze20hby7bQmGJhxe++IMXvvij2uOSLEbuH34Mo4/PqvF4WZc3wJ97SgmHVW2uGlUlrKrahL2qyi87nMxeU8jmvfvHJ2enxjP6+CwuPD6LNrXoMp1uNTHx4l41Ll+uY0sbE4Ydw4Rhx0S3+YNhnF4tkXV4Avy83cHr32xhu93DE4s28urXm7l6YDuuGJhLovngf//tdHi49K2V7HH56JJhY9qV/bDWIvEVojmROz8GhPUqBksId0JiY4cihBBCNCnO0t3E+7RxifrDnKwJIhM2GUzRn41ZWWQ+9CDWwYOrPea97wp4Y6m2Guqzo3oysnd1M2+mQe4C+PIx+HaStlbstlUw6l1IyTns2BuaJU7P5SfmMq5/Not+3sXby7awaU/p/hVhUCmfiDUcVnF4Atzz4Qbmr9/BExd0p21qfLXnLvMFmbqigCnfbMbl/fskRFXEYtQzrHsrRvfNon9uar2PNa2tOIOOdKuJdKt2L/XJTmFs/2w+WredV7/eTH5RGc9//gdvLNvC5SfmcEL7NNqmxNMq2Vyhdbqo1Mdlb61k2z4PuWnx/Pea/iTHN62HHULUJ0leY0Bg2PEck/UJK82dGjsUIYQQoklxuvdGuw3HJR7+GM9Es4HVGV05eecGjhl7Aek334zOUn3r3Re/7ubheb8AcPeZnQ+SuEbojVpra/ZJMPd62L4WXh8MI6dA12Fa1ucvA68DfE7t3esEcxIkZYEtE3T13F1U1ZbfIOTXXuEgqAdZkxWgfG1XFAyKwohOZkZ07qadKxSAcCByvgCEAgQDPub9uI3pK//CuWkTd7+4mn+ckM2w7q3RKQoYLZCcjU9vYfrKv5j81SaKSrUm9XSrCatJj05RUBTQKQp6nYKiKKQmGBnRszXDe7SO+dZIo17H6L5tubBPFgs27GDyV5v4Y3cpk7/azOSvNgParzTDZqZtqoWslHh+3eFk894yWieZef+aAQ06q7EQsSi2/ys/SgTcTgBCcTJluRCi/rz66qvcfPPN9O/fn5UrV1ZZZs+ePUycOJGFCxeSn59PMBgkKyuLwYMHc/XVVzNo0KBo2alTp3LllVdGf9br9WRkZHDGGWfwn//8hzY1WOft4Ycf5pFHHkFRFLZu3Urbtm0r7Hc6nWRkZOD1ern55pt55ZVXACgoKKBdu3YAPPbYYzzwwAOVzn3ppZcyffp0EhISKD1glljRvO3z7It2GzYnHX4PJpvZyE8tOnLVWfez6a5zDtq9dX2hnVtnrCOswph+bbn51I41v1CXs+GGZTB7vJbAzhyrrSHrdYIaqv44nQESW0NS28irDehNEPJB0Lc/AQ3692/zl0HAAwF35D3yubxsyF/99eqJAbgQuNDA/r8+V0VeB/Bgo0+4BY+o6ThtrTn2mO5079IRnXKwOUH2wOYfj0DUR4YeOD8OzjtTZf02B99tLmavy0txqR9fKAylQCm4/4JcoEe8gbtO7ULmzi+g8pBqIRqfNQOyBzTIpSR5jQGq1wFAWJJXIUQ9ysvLIzc3l1WrVrFp0yY6dqz4h/WqVasYPnw4LpeLMWPGcMMNN2AymcjPz+ejjz5i6tSpfPPNNwwZMqTCcY8++ijt2rXD6/Xy/fffM3XqVJYvX87PP/+M2VyzVgGTycSMGTO45557KmyfM2fOQY8zm83MmDGjUvJaVlbGxx9/XOPri+ajyJRIqk+b5dVgO/xuw+Wtd6GwitsfqnZSncISN1dPW40nEOLkzi14bORxtV/3NDkbrvwUvngIvn8VPPv279MZtNZWcxLEWcFrB+cOrVXU/pf2imX6uMjLCDqj9jkyyZWK1i3Y7g6gqoACSYqHREpJxkWyzkVPtkAA2PARbGjE73EE6YDekRegZbVVNaqHgU8bKCgh6qLTmXDp7Aa5lCSvMUD1ai2viknGvAoh6kd+fj4rVqxgzpw5XH/99eTl5fHQQw9F9+/bt4+RI0diMBj48ccf6dq1a4XjH3/8cWbOnImliu6S55xzDn379gXgmmuuIT09naeffpp58+Zx8cUX1yi+YcOGVZm8Tp8+neHDh/O///2v2uPmzJnD+vXr6dmzZ3T7xx9/jN/v5+yzz+bLL7+sUQyieeiecz17Sl4H/kBXD2Ne4+P06HUKobCKyxusMnm1u/2Mf3cVRaV+jm2VyORL+9R9HU1DHJz9JJxwk9YaakrUElajBf6eDIdDULob7IXgKATHNu2lhvYniwbTAYljnHYeY/zf3iOvA8vqDBWTTd0hvo+qai/+9q4o2rkOksgrgBVw2D3cP+cnvvljLwBtLX7u6GfhvOwAcaXbYN9WLUl3F9ftdyuEaBgtujTYpSR5jQE6v7ZUjs4iyasQon7k5eWRkpLC8OHDGTVqVKXkdcqUKezcuZOZM2dWSlxBW9pi7NixNbrW4MGDefrpp9m8eXON4xs3bhyjRo1i48aN0evv2rWLL7/8kg8++KDa5PXEE09k3bp1TJ8+vULympeXx9lnn01qamqNYxDNQ7JyHE5XC7Tk9fBnG1YUBavJgMMTwOUNkJlUsTXfFwxx3X/XRschvltfM78mtz10GZ1e6zKc2BpomC561VKUgyaoNdEm2cLUK/ux6Kdd7HR4uKRfW2yHmHlXCHF0k0VFY4ArsQPr9N3Rp7Vv7FCEEM1EXl4eF154IXFxcYwdO5Y///yT1atXR/fPnz8fi8XChRdeeNjXKigoACAlJaXGxwwZMoSsrCymT58e3TZr1iysVivDhw8/6LFjx45l5syZqKo2w2xRURGLFy9m3LhxtQ9eNHm7nV7ig16gfmYbBm3GYdDW6DxQcWTm11X5JdhMBt69sj8ZidJV/XAoisLwHq24ZnB7SVyFEIckLa8x4JRrnmnsEIQQaAvdqx5PY4cRpVgstR9DB6xdu5aNGzfy8svaGpWDBg0iKyuLvLw8+vXrB8DGjRvp0qULRmPFPxZdLhc+ny/6s8ViIeFva2c6HA6Kiorwer2sXLmSRx55BJPJxLnnnlvz76YojBkzhhkzZvDoo48C+xNuk8l00GPHjRvHE088wbfffsugQYP44IMPMJvNjBgxgk8/lYFhR5vdTi8dg9o9Wx/dhoFIEuXB5Q1Et23c5eTqqWvYbvdgMxt44x996ZIpc1UIIURDkuRVCCEiVI+H3/sc39hhRHX5YS1KfPXrIFYnLy+PjIwMTj31VEBLFC+55BLef/99nn/+efR6PU6nE2sVf+j/4x//4OOPP47+fOCMv+VOP/30Cj/n5uby/vvvk5WVVas4x40bx3PPPcfq1atJSUlh9erVPPHEE4c8rlu3bvTo0YMZM2YwaNAgpk+fzvnnn098HX5Xounb5fDSI6C1vOoS6rfltdSntbx+/utu7pi5jjJ/iNy0eN66oh8dW9bPtYQQQtScdBsWQohmJBQKMXPmTE499VTy8/PZtGkTmzZtYsCAAezevZslS5YAYLPZqlxO5tFHH+Xzzz/n888/r/YakydP5vPPP+fDDz9k2LBhFBUVVWgt9fv97Nq1q8IrFKq89Efv3r3p2rUr06dPJy8vj8zMTE477bQafc9x48Yxe/ZsNm3axIoVK6TL8FFsyj+Op096HAC6ephtGLS1XgFc3iCvfb2Z6/67hjJ/iJM6pPHRzQMlcRVCiEYiLa9CCBGhWCx0+WFtY4cRpVQx0++hfPnll9GJmGbOnFlpf15eHmeeeSZdu3Zl/fr1BAKBCl2He/Tocchr9O/fPzrb8MiRIxk0aBDjxo3j999/x2q1smLFimirb7n8/Hxyc3MrnWvcuHG89tpr2Gw2LrnkEnSHmuE0YuzYsUyYMIFrr72WtLQ0zjzzzBodJ5ofk0GPwevGT32OedX+m3jxiz/Y7dS6JP/jhBwePO/Yus8qLIQQ4rBJ8iqEEBGKotSpm24sycvLo2XLlkyePLnSvjlz5jB37lymTJnCueeey/fff8/cuXNrvLxNVfR6PU8++SSnnnoqr7zyCvfeey89e/as1HKbmZlZ5fHjxo3jwQcfZOfOnfz3v/+t8XWzs7MZOHAgX3/9NTfeeCMGg1RnR7NQmdaLoP7GvGr3026nD71O4eHzjuUfJ+bWy7mFEELUndT2QgjRTHg8HubMmcPo0aMZNWpUpf2tW7dmxowZzJs3jxtvvJGXX36ZO++8k169etG5c+cKZctn8q2JU045hf79+/Piiy9yxx13kJKSUmlcbHU6dOjAiy++iMfjoX///jW+Jmhr0X711VdccskltTpOND/h0jKg/pLXdKvWDT7JYuTVS/swsGN6vZxXCCHE4ZHkVQghmol58+bhcrkYMWJElftPOOEEWrRoQV5eHpdccglz587lvPPOo2fPnowZM4Z+/fphNBopLCxk9uzZgNbCWRP/+te/GD16NFOnTuWGG26oVdy33357rcqXO/nkkzn55JPrdKxoPtRgMDpLuC7h8Nd5Bbh0QDZ6ncK5PVqRk1Y/5xRCCHH4JHkVQohmIi8vD7PZzBlnnFHlfp1Ox/Dhw8nLy6O4uJgTTzyRn3/+mYkTJ7Jw4UJmzZpFOBymTZs2DBo0iDfeeIPBgwfX6NoXXnghHTp04LnnnuPaa69Fr9fX51cTolrhsrLoZ309Ja9pVhM3n9qxXs4lhBCi/ihqbfqGiUqcTidJSUk4HA4SExMbOxwhRA14vV7y8/Np164dZrO5scMRTcCh7hmpC+qmPn5vge3b2TT0dBSTia7rf6zfAIUQQhxxtakLZMo8IYQQQjRZoXoe7yqEECJ2SfIqhBBCiCYrHJ1pWMamCiFEcyfJqxBCCCGarHCplrzqE6TlVQghmjtJXoUQQgjRZJUnr9JtWAghmj9JXoUQQgjRZIUkeRVCiKOGJK9CCCGEaLLCLhnzKoQQRwtJXoUQRy1ZKUzUlNwrsat8wia9tLwKIUSzJ8mrEOKoYzQaAXC73Y0ciWgqyu+V8ntHxI5ot2GZsEkIIZo9Q2MHIIQQDU2v15OcnMyePXsAiI+PR1GURo5KxCJVVXG73ezZs4fk5GT0en1jhyT+JizrvAohxFFDklchxFEpMzMTIJrACnEwycnJ0XtGxJb9sw3LmFchhGjuYj55nT17NpMnT2b9+vX4/X46duzIpZdeyp133lmn7ltr167lqaeeYunSpTgcDlq1asW5557L//3f/9GyZcsj8A2EELFIURRatWpFy5YtCQQCjR2OiGFGo7HJtrgeDXVoi9tvI/mSizHl5jbK9YUQQjQcRY3hWSjuuOMOJk2ahMFg4LTTTsNqtfLll19it9sZNGgQixcvxmKx1Ph8H374IWPHjiUYDNKvXz/atWvHmjVr2LJlCxkZGSxfvpyOHTvWKkan00lSUhIOh4PExMTafkUhhBDNQCzWBVKHCiGEaApqVReoMWru3LkqoFqtVnXt2rXR7Xv37lW7d++uAupdd91V4/Nt375djY+PVwH19ddfj24PBoPqZZddpgJqv3791HA4XKs4HQ6HCqgOh6NWxwkhhGg+Yq0ukDpUCCFEU1GbuiBmZxt+4oknALj33nvp06dPdHt6ejqvvvoqAK+88goOh6NG53vxxRdxu92cfvrpXHfdddHter2e1157jaSkJFavXs3ixYvr8VsIIYQQDU/qUCGEEM1RTCav27dvZ/Xq1QCMGzeu0v5BgwbRtm1bfD4fixYtqtE5586dW+35rFYrI0aMAGDOnDl1DVsIIYRodFKHCiGEaK5iMnldt24dAKmpqbRr167KMn379q1Q9mBcLhebNm2qcNzhnE8IIYSIVVKHCiGEaK5iMnnNz88HIDs7u9oybdu2rVD2YAoKCqKfqztnbc4nhBBCxCqpQ4UQQjRXMblUjsvlAiAhofo126yRxcidTmeNz3ewc9b0fD6fD5/PF/25fLxQTeIQQgjRPJXXAWoMTOAvdagQQoimpDZ1aEwmr7HsySef5JFHHqm0vfypsxBCiKOXy+UiKSmpscOIWVKHCiGEqE5N6tCYTF5tNhsAZWVl1ZYpLS0FqNG6cOXnKz9nVb+Ump5vwoQJ/POf/4z+HA6HKSkpIS0tDUVRDhlLVZxOJ23btqWwsFDWuRM1IveMqC25Z44sVVVxuVy0bt26sUOROlSIQ5B7RtSW3DNHVm3q0JhMXnNzcwEoLCystkz5vvKyB5OTkxP9/Ndff9G9e/c6n89kMmEymSpsS05OPmQMNZGYmCj/QYhakXtG1JbcM0dOrLS4Sh0qRM3IPSNqS+6ZI6emdWhMTtjUu3dvAIqLi6ud/GHNmjUAFdavq05iYiIdO3ascNzhnE8IIYSIVVKHCiGEaK5iMnnNysqiX79+AEyfPr3S/uXLl1NYWIjJZGLYsGE1OucFF1xQ7flKS0uZP38+ABdeeGFdwxZCCCEandShQgghmquYTF4B7rvvPgCeeuopfvjhh+j24uJibrrpJgBuueWWCk3Mc+fOpWvXrgwdOrTS+e644w7i4+P54osvePPNN6PbQ6EQN910E3a7nX79+nHmmWceqa9ULZPJxEMPPVSpK5UQ1ZF7RtSW3DNHF6lDhaie3DOituSeiR2KGgvz+lfj9ttv56WXXsJoNDJ06FASEhJYsmQJdrudgQMH8vnnn2OxWKLlp06dypVXXklOTk6FdenKzZ49m7FjxxIKhRgwYAC5ubmsXr2aLVu2kJGRwfLly6Ndo4QQQoimTOpQIYQQzU3MtrwCTJo0iVmzZnHiiSeyYsUKFi1aRFZWFk899RRffvllhUq3JkaPHs3KlSu58MIL2bJlC3PnziUUCnHzzTezfv16qXSFEEI0G1KHCiGEaG5iuuVVCCGEEEIIIYSAGG95FUIIIYQQQgghQJLXRjd79mxOOeUUUlJSSEhIoGfPnjzzzDMEAoHGDk00sEAgwJIlS/jXv/5Fv379SE5Oxmg0kpmZyYgRI1i4cOFBj//iiy8YNmwY6enpWCwWunbtyv33309paWkDfQMRC+655x4URUFRFB5//PFqy8n9IpoDqUNFOalDRX2QOrQJUEWjuf3221VANRgM6plnnqleeOGFanJysgqogwYNUt1ud2OHKBrQ559/rgIqoGZmZqrDhw9XL774YvW4446Lbr/uuuvUcDhc6diJEyeqgKooijpkyBB19OjRamZmpgqoXbp0Uffu3dsI30g0tG+//VbV6XSqoigqoD722GNVlpP7RTQHUoeKA0kdKg6X1KFNgySvjWTu3LkqoFqtVnXt2rXR7Xv37lW7d++uAupdd93ViBGKhrZkyRL1oosuUpcuXVpp38yZM1W9Xq8C6rRp0yrs++GHH1RFUVS9Xq8uWrQour2srEwdOnSoCqgXXXTREY9fNK6ysjK1U6dOaps2bdSRI0dWW/HK/SKaA6lDxd9JHSoOh9ShTYckr42kX79+KqA+/vjjlfYtW7ZMBVSTyaTa7fZGiE7EoquvvloF1KFDh1bYPnr0aBVQr7nmmkrHFBQUqDqdTgXU3377raFCFY3gtttuUwF14cKF6hVXXFFtxSv3i2gOpA4VtSV1qDgYqUObDhnz2gi2b9/O6tWrARg3blyl/YMGDaJt27b4fD4WLVrU0OGJGNW7d28ACgsLo9v8fn90HE9V91JOTg4DBw4EYO7cuQ0QpWgMX3/9NS+//DKXX345w4YNq7ac3C+iOZA6VNSF1KGiOlKHNi2SvDaCdevWAZCamkq7du2qLNO3b98KZYX4888/AWjVqlV02x9//IHb7Qb23zN/J/dS81ZaWspVV11FRkYGL7744kHLyv0imgOpQ0VdSB0qqiJ1aNNjaOwAjkb5+fkAZGdnV1umbdu2FcqKo9uuXbuYOnUqABdddFF0e/n9kZycjM1mq/JYuZeat7vvvpv8/Hzmzp1LSkrKQcvK/SKaA6lDRW1JHSqqI3Vo0yMtr43A5XIBkJCQUG0Zq9UKgNPpbJCYROwKBoNcdtllOBwOunfvzvXXXx/dJ/fS0W3x4sW8/vrrjBkzhpEjRx6yvNwvojmQ+1jUhtShojpShzZNkrwKEeNuuOEGlixZQlpaGh9++CFxcXGNHZKIAQ6Hg6uvvpoWLVrw8ssvN3Y4QggRk6QOFVWROrTpkm7DjaC8q0FZWVm1ZcoXOU5MTGyQmERsuv3223n77bdJSUnh888/p3PnzhX2y7109LrjjjvYtm0bs2bNIj09vUbHyP0imgO5j0VNSR0qqiN1aNMlyWsjyM3NBSrOePd35fvKy4qjz1133cVLL71EcnIyixcvjs6UeKDy+8Nut+NyuaocgyH3UvM0d+5cDAYDr776Kq+++mqFfRs3bgTg7bff5osvviAzM5OZM2fK/SKaBalDRU1IHSoORurQpkuS10ZQ/j/Q4uJi8vPzq5wtcc2aNQD06dOnQWMTseGee+5h4sSJJCUlsXjx4mpntevSpQvx8fG43W7WrFnDqaeeWqmM3EvNVzAY5Jtvvql2f0FBAQUFBeTk5AByv4jmQepQcShSh4qakDq0aZIxr40gKyuLfv36ATB9+vRK+5cvX05hYSEmk+mg602J5unee+/l2WefJSkpic8//zx6r1QlLi6O4cOHA1XfS1u3bmXFihUAXHDBBUcmYNEo7HY7qqpW+briiisAeOyxx1BVlYKCAkDuF9E8SB0qDkbqUFETUoc2YapoFHPnzlUB1Wq1qmvXro1uLyoqUrt3764C6l133dWIEYrGcP/996uAmpycrK5atapGx6xdu1ZVFEXV6/XqJ598Et1eVlamDh06VAXUiy666EiFLGLQFVdcoQLqY489Vmmf3C+iOZA6VFRF6lBRH6QOjW2Kqqpq46TN4vbbb+ell17CaDQydOhQEhISWLJkCXa7nYEDB/L5559jsVgaO0zRQObNm8f5558PaItcd+vWrcpy6enpPPfccxW2vfDCC/zzn/9EURROPvlkWrZsybJly9i5cyddunRh+fLlNZ6QQDR948ePZ9q0aTz22GM88MADlfbL/SKaA6lDxYGkDhX1RerQGNfY2fPRbtasWeqQIUPUxMRE1WKxqMcdd5z61FNPqT6fr7FDEw3s3XffVYFDvnJycqo8/vPPP1fPPvtsNTU1VTWZTGqnTp3UCRMmqE6ns2G/iGh0B3tqXE7uF9EcSB0qykkdKuqL1KGxTVpehRBCCCGEEELEPJmwSQghhBBCCCFEzJPkVQghhBBCCCFEzJPkVQghhBBCCCFEzJPkVQghhBBCCCFEzJPkVQghhBBCCCFEzJPkVQghhBBCCCFEzJPkVQghhBBCCCFEzJPkVQghhBBCCCFEzJPkVYgmLjc3F0VRKrxMJhNZWVmcf/75LFiwoLFDPGLGjx+PoihMnTq1sUMRQgjRBEkdKnWoaFoMjR2AEKJ+DBw4kI4dOwLgcDhYt24d8+bNY968edx5551MnDixkSMUQgghYpPUoUI0DZK8CtFMXHPNNYwfPz76czAY5M477+SVV17hhRdeYOzYsfTr16/xAhRCCCFilNShQjQN0m1YiGbKYDDw7LPPkpiYCMD8+fMbOSIhhBCiaZA6VIjYJMmrEM2Y2WymU6dOAOzevbvCvmAwyJQpUzjppJNISkqKlr3tttvYvn17pXMVFBSgKAq5ubnVXq987FBBQUG127/66ivOPPNMUlJSsFgs9OnTh/fee6/ac5aUlHDHHXeQk5ODyWQiOzubW265hZKSkpr/IoQQQohakjpUiNgjyasQzZzT6QQgIyMjus3n83HOOedw4403sm7dOgYOHMjIkSPx+Xy8/PLL9OrVix9++KHeY3nnnXcYOnQoJSUlnH322fTq1Yt169ZxxRVX8OKLL1Yqv3v3bk444QQmTZqEy+Xi3HPP5fjjjycvL4/+/fuzb9++eo9RCCGEKCd1qBAxRhVCNGk5OTkqoL777ruV9v3666+qXq9XAXX16tXR7f/+979VQO3QoYOan58f3e73+9Wrr75aBdR27dqpPp8vui8/P18F1JycnEPGcuA5D9xuNBrV+fPnV9j37rvvqoCalJSkut3uCvtGjRqlAurgwYNVu90e3V5cXKwOGDBABar97kIIIcShSB0qdahoWqTlVYhmyOFwsHjxYi688EJCoRAPPPAAffv2BcDr9TJ58mQAXnjhhQpdmIxGIy+99BIZGRnk5+fz4Ycf1mtct956K+eee26FbePHj6dr1644HA7WrFkT3V5YWMicOXNQFIUpU6aQlJQU3ZeamsqUKVPqNTYhhBACpA4VIpZJ8ipEM3HllVdG16hLTk7mrLPO4s8//+T999/nsccei5Zbs2YNpaWlpKamct5551U6T3x8PGPGjAHgq6++qtcYq7oewDHHHANQYZzQ0qVLCYfD9OnTh2OPPbbSMb169aJHjx71Gp8QQoijk9ShQjQNslSOEM3EgWvU7d27l2XLluFyubjxxhvp1KkT/fv3B/ZXbu3atav2XB06dKhQtr5kZ2dXub18Nkev1xvdtm3bNuDgcbZr144NGzbUY4RCCCGORlKHCtE0SPIqRDPx9zXqHA4HF1xwAV999RUXX3wxv/76K/Hx8Uc0hnA4fND9Op109hBCCBF7pA4VommQ/wqEaKaSkpKYNWsWqampbN26lYkTJwLQpk0bAPLz86s9dsuWLRXKAsTFxQHgcrmqPCYQCLBz5856if3Aa/99yYADHWyfEEIIUVdShwoRmyR5FaIZa9GiBQ888AAAzz33HHa7nb59+2K1WikpKWHevHmVjvF4PMycOROAU089tcK54uLiKCkpYc+ePZWO++yzzwgGg/UW+5AhQ1AUhR9++IGNGzdW2r9+/Xrp7iSEEOKIkTpUiNgjyasQzdxNN91EdnY2DoeD559/HrPZzM033wzAXXfdxdatW6NlA4EAt99+O7t27aJdu3aMGjUqus9oNDJkyBAAHnjggQrdm9avX88tt9xSr3FnZ2dzwQUXEA6HufHGG6Nr7QHs27ePm266CVVV6/WaQgghxIGkDhUitkjyKkQzZzKZePjhhwGYNGkSJSUlPPLIIwwdOpRNmzZxzDHHMHz4cMaMGUPHjh158803SUtLY/bs2dFuTuUef/xx4uLiePPNNznmmGMYPXo0J510Ev369eOUU04hJyenXmOfPHkyHTp04Ouvv6Zdu3ZcdNFFXHjhhbRv357du3czYsSIer2eEEIIcSCpQ4WILZK8CnEUuPzyyzn22GNxuVw8++yzmEwmPv30U1599VV69uzJsmXLmDt3LkajkVtvvZX169dz/PHHVzrPgAED+OabbzjzzDPZtWsXCxcuxO12M2nSJN599916jzszM5OVK1dy6623Eh8fz4IFC1i9ejVjxozh+++/JyUlpd6vKYQQQhxI6lAhYoeiSp8BIYQQQgghhBAxTlpehRBCCCGEEELEPElehRBCCCGEEELEPElehRBCCCGEEELEPElehRBCCCGEEELEPElehRBCCCGEEELEPElehRBCCCGEEELEPElehRBCCCGEEELEPElehRBCCCGEEELEPElehRBCCCGEEELEPElehRBCCCGEEELEPElehRBCCCGEEELEPElehRBCCCGEEELEPElehRBCCCGEEELEvP8HHigTh2m+CKAAAAAASUVORK5CYII=","text/plain":["
"]},"metadata":{},"output_type":"display_data"}],"source":["# Figure 3\n","num_plots = 2\n","plt.style.use('default')\n","fig, axs = plt.subplots(1, num_plots, figsize=(11, 4))\n","\n","data = [df_mnist_acc_flanders, df_mnist_acc_fedavg]\n","\n","for i in range(num_plots):\n"," if i == 0:\n"," acc = df_no_attack[df_no_attack[\"attack_fn\"]=='GAUSS']['accuracy'].to_list()\n"," axs[i].plot(acc, label=\"No Attack\", linestyle='--', color='slategray')\n"," for attack in ['GAUSS', 'LIE', 'OPT', 'AGR-MM']:\n"," acc = data[i][data[i]['attack_fn']==attack]['accuracy'].to_list()\n"," x = [i for i in range(len(data))]\n"," axs[i].plot(acc, label=attack)\n"," axs[i].set_ylim((0,1.0))\n"," axs[i].set_xlabel('Round', fontsize=16)\n"," axs[i].set_ylabel('Accuracy', fontsize=16)\n"," axs[i].legend(prop={'size': 12})\n"," axs[i].tick_params(axis='both', which='major', labelsize=16)\n"," axs[i].tick_params(axis='both', which='minor', labelsize=16)\n","\n","plt.show()"]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":[]}],"metadata":{"colab":{"authorship_tag":"ABX9TyODCHCYl18UhHkKwq6LlRvG","collapsed_sections":["P_3Z05w0wvNB","dE_uqUeuyl6M","9vVX6wsxT-rc","RctMDJMZyPq2","R9VNz7Cv9RHn","6V4padUiYeac","pZ863s6JJbph","EJDtdXqLJX0H"],"provenance":[]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.9.18"}},"nbformat":4,"nbformat_minor":0} diff --git a/baselines/flanders/pyproject.toml b/baselines/flanders/pyproject.toml new file mode 100644 index 000000000000..416247f9c7bb --- /dev/null +++ b/baselines/flanders/pyproject.toml @@ -0,0 +1,151 @@ +[build-system] +requires = ["poetry-core>=1.4.0"] +build-backend = "poetry.masonry.api" + +[tool.poetry] +name = "flanders" +version = "1.0.0" +description = "FLANDERS" +license = "Apache-2.0" +authors = ["Edoardo Gabrielli "] +readme = "README.md" +homepage = "https://flower.dev" +repository = "https://github.com/adap/flower" +documentation = "https://flower.dev" +classifiers = [ + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Operating System :: MacOS :: MacOS X", + "Operating System :: POSIX :: Linux", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: Implementation :: CPython", + "Topic :: Scientific/Engineering", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: Scientific/Engineering :: Mathematics", + "Topic :: Software Development", + "Topic :: Software Development :: Libraries", + "Topic :: Software Development :: Libraries :: Python Modules", + "Typing :: Typed", +] + +[tool.poetry.dependencies] +python = ">=3.10, <3.12.0" +hydra-core = "1.3.2" # don't change this +flwr = {extras = ["simulation"], version = "1.6.0" } +torch = [ + { platform = "darwin", version = "2.1.1" }, + { platform = "linux", url = "https://download.pytorch.org/whl/cu118/torch-2.1.1%2Bcu118-cp310-cp310-linux_x86_64.whl" } + ] +torchvision = [ + { platform = "darwin", version = "0.16.1"}, + { platform = "linux", url = "https://download.pytorch.org/whl/cu118/torchvision-0.16.1%2Bcu118-cp310-cp310-linux_x86_64.whl" } + ] +pandas = "^2.1.3" +scikit-learn = "1.3.2" +ipykernel = "^6.27.1" +natsort = "^8.4.0" +seaborn = "^0.13.0" + +[tool.poetry.dev-dependencies] +isort = "==5.11.5" +black = "==23.1.0" +docformatter = "==1.5.1" +mypy = "==1.4.1" +pylint = "==2.8.2" +flake8 = "==3.9.2" +pytest = "==6.2.4" +pytest-watch = "==4.2.0" +ruff = "==0.0.272" +types-requests = "==2.27.7" +virtualenv = "20.21.0" + +[tool.isort] +line_length = 88 +indent = " " +multi_line_output = 3 +include_trailing_comma = true +force_grid_wrap = 0 +use_parentheses = true + +[tool.black] +line-length = 88 +target-version = ["py38", "py39", "py310", "py311"] + +[tool.pytest.ini_options] +minversion = "6.2" +addopts = "-qq" +testpaths = [ + "flwr_baselines", +] + +[tool.mypy] +ignore_missing_imports = true +strict = false +plugins = "numpy.typing.mypy_plugin" + +[tool.pylint."MESSAGES CONTROL"] +disable = "bad-continuation,duplicate-code,too-few-public-methods,useless-import-alias" +good-names = "i,j,k,_,x,y,X,Y" +signature-mutators="hydra.main.main" + +[tool.pylint.typecheck] +generated-members="numpy.*, torch.*, tensorflow.*" + +[[tool.mypy.overrides]] +module = [ + "importlib.metadata.*", + "importlib_metadata.*", +] +follow_imports = "skip" +follow_imports_for_stubs = true +disallow_untyped_calls = false + +[[tool.mypy.overrides]] +module = "torch.*" +follow_imports = "skip" +follow_imports_for_stubs = true + +[tool.docformatter] +wrap-summaries = 88 +wrap-descriptions = 88 + +[tool.ruff] +target-version = "py38" +line-length = 88 +select = ["D", "E", "F", "W", "B", "ISC", "C4"] +fixable = ["D", "E", "F", "W", "B", "ISC", "C4"] +ignore = ["B024", "B027"] +exclude = [ + ".bzr", + ".direnv", + ".eggs", + ".git", + ".hg", + ".mypy_cache", + ".nox", + ".pants.d", + ".pytype", + ".ruff_cache", + ".svn", + ".tox", + ".venv", + "__pypackages__", + "_build", + "buck-out", + "build", + "dist", + "node_modules", + "venv", + "proto", +] + +[tool.ruff.pydocstyle] +convention = "numpy" diff --git a/baselines/flanders/run.sh b/baselines/flanders/run.sh new file mode 100644 index 000000000000..435c358c4ee7 --- /dev/null +++ b/baselines/flanders/run.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +python -m flanders.main --multirun server.num_rounds=50 dataset=mnist strategy=flanders aggregate_fn=fedavg,trimmedmean,fedmedian,krum,bulyan server.pool_size=100 server.num_malicious=0,20,60,80 server.attack_fn=gaussian,lie,fang,minmax server.warmup_rounds=2 client_resources.num_cpus=0.1 client_resources.num_gpus=0.1 + +python -m flanders.main --multirun server.num_rounds=50 dataset=mnist strategy=fedavg,trimmedmean,fedmedian,krum,bulyan server.pool_size=100 server.num_malicious=0,20,60,80 server.attack_fn=gaussian,lie,fang,minmax server.warmup_rounds=2 client_resources.num_cpus=0.1 client_resources.num_gpus=0.1 + +python -m flanders.main --multirun server.num_rounds=50 dataset=fmnist strategy=flanders aggregate_fn=fedavg,trimmedmean,fedmedian,krum,bulyan server.pool_size=100 server.num_malicious=0,20,60,80 server.attack_fn=gaussian,lie,fang,minmax server.warmup_rounds=2 client_resources.num_cpus=0.1 client_resources.num_gpus=0.1 + +python -m flanders.main --multirun server.num_rounds=50 dataset=fmnist strategy=fedavg,trimmedmean,fedmedian,krum,bulyan server.pool_size=100 server.num_malicious=0,20,60,80 server.attack_fn=gaussian,lie,fang,minmax server.warmup_rounds=2 client_resources.num_cpus=0.1 client_resources.num_gpus=0.1 \ No newline at end of file