From 67ad7adef80116c2f3e23eedff0d1931bfc11b8f Mon Sep 17 00:00:00 2001 From: porteratzo Date: Thu, 5 Oct 2023 08:46:33 -0700 Subject: [PATCH 1/7] llm tutorial --- openfl-tutorials/Federated_PyTorch_LLM.ipynb | 474 +++++++++++++++++++ 1 file changed, 474 insertions(+) create mode 100644 openfl-tutorials/Federated_PyTorch_LLM.ipynb diff --git a/openfl-tutorials/Federated_PyTorch_LLM.ipynb b/openfl-tutorials/Federated_PyTorch_LLM.ipynb new file mode 100644 index 0000000000..4ace9f3d30 --- /dev/null +++ b/openfl-tutorials/Federated_PyTorch_LLM.ipynb @@ -0,0 +1,474 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Federated PyTorch TinyImageNet Tutorial" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This notebook is an example of Transfer Learning \n", + "\n", + "Custom DataLoader is used with OpenFL Python API" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#Install dependencies if not already installed\n", + "!pip install torch torchvision peft transformers sentencepiece huggingface_hub accelerate datasets evaluate seqeval\n", + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from typing import Any, Mapping\n", + "import numpy as np\n", + "import openfl.native as fx\n", + "import torch\n", + "import torch as pt\n", + "from accelerate import Accelerator\n", + "from datasets import Dataset, load_dataset, load_metric\n", + "from openfl.federated import PyTorchTaskRunner, TaskRunner\n", + "from openfl.federated.task.runner_pt import change_tags\n", + "from openfl.utilities import Metric, TensorKey\n", + "from openfl.utilities.data_splitters import EqualNumPyDataSplitter\n", + "from peft import LoraConfig, TaskType, get_peft_model\n", + "from peft.utils import get_peft_model_state_dict, set_peft_model_state_dict\n", + "from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\n", + "from torch.optim import AdamW\n", + "from torch.utils.data import DataLoader\n", + "from tqdm import tqdm\n", + "import torch.nn as nn\n", + "\n", + "from transformers import (AutoConfig, AutoModelForSequenceClassification,\n", + " AutoTokenizer, DataCollatorWithPadding)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "After importing the required packages, the next step is setting up our openfl workspace. To do this, simply run the `fx.init()` command as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#Setup default workspace, logging, etc.\n", + "fx.init('torch_cnn_mnist')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we are ready to define our dataset and model to perform federated learning on. The dataset should be composed of a numpy arrayWe start with a simple fully connected model that is trained on the MNIST dataset. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Download the data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def get_glue_mrpc_dataset(tokenizer):\n", + " dataset = load_dataset(\"glue\", \"mrpc\")\n", + "\n", + " def tokenize_function(examples):\n", + " # max_length=None => use the model max length (it's actually the default)\n", + " outputs = tokenizer(\n", + " examples[\"sentence1\"],\n", + " examples[\"sentence2\"],\n", + " truncation=True,\n", + " max_length=None,\n", + " )\n", + " return outputs\n", + "\n", + " tokenized_datasets = dataset.map(\n", + " tokenize_function,\n", + " batched=True,\n", + " remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n", + " )\n", + " tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")\n", + " tokenized_datasets.set_format(\"torch\")\n", + " data_collator = DataCollatorWithPadding(tokenizer=tokenizer, padding=\"longest\")\n", + " return data_collator, tokenized_datasets\n", + "\n", + "base_model_name = \"roberta-large\"\n", + "padding_side = \"right\"\n", + "tokenizer = AutoTokenizer.from_pretrained(base_model_name, padding_side=padding_side)\n", + "if getattr(tokenizer, \"pad_token_id\") is None:\n", + " tokenizer.pad_token_id = tokenizer.eos_token_id\n", + "data_collator, tokenized_datasets = get_glue_mrpc_dataset(tokenizer)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Describe the dataset" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class GlueMrpc(Dataset):\n", + " \"\"\"\n", + " Has 5.8k pairs of sentences with annotations if the two sentences are equivalent\n", + " \"\"\" \n", + " def get_shape(self):\n", + " \n", + " if not hasattr(self, 'saved_shape'):\n", + " self.saved_shape = max([len(i) for i in self.data['input_ids']])\n", + " return self.saved_shape\n", + "\n", + "train_set = GlueMrpc.from_dict(tokenized_datasets['train'].to_dict())\n", + "valid_set = GlueMrpc.from_dict(tokenized_datasets['test'].to_dict())\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Implement Federated dataset\n", + "We have to implement `split` method" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class GlueMrpcFederatedDataset(DataLoader):\n", + " def __init__(self, train_set, valid_set, batch_size, data_collator=None):\n", + " self.data_splitter = EqualNumPyDataSplitter()\n", + " if isinstance(train_set,Dataset):\n", + " self.train_set = GlueMrpc.from_dict(train_set.to_dict())\n", + " else:\n", + " self.train_set = train_set\n", + " \n", + " if isinstance(valid_set,Dataset):\n", + " self.valid_set = GlueMrpc.from_dict(valid_set.to_dict())\n", + " else:\n", + " self.valid_set = valid_set \n", + " \n", + " self.batch_size = batch_size\n", + " self.data_collator = data_collator\n", + " \n", + " def split(self, num_collaborators):\n", + " train_split = self.data_splitter.split(self.train_set, num_collaborators)\n", + " valid_split = self.data_splitter.split(self.valid_set, num_collaborators)\n", + " return [\n", + " GlueMrpcFederatedDataset(\n", + " self.train_set.select(train_split[i]),\n", + " self.valid_set.select(valid_split[i]),\n", + " self.batch_size\n", + " )\n", + " for i in range(num_collaborators)\n", + " ]\n", + " \n", + " def get_feature_shape(self):\n", + " return self.train_set.get_shape()\n", + " \n", + " def get_train_loader(self, num_batches=None):\n", + " return DataLoader(self.train_set, batch_size=self.batch_size, collate_fn=data_collator)\n", + " \n", + " def get_valid_loader(self):\n", + " return DataLoader(self.valid_set, collate_fn=data_collator)\n", + " \n", + " def get_train_data_size(self):\n", + " return len(self.train_set)\n", + " \n", + " def get_valid_data_size(self):\n", + " return len(self.valid_set)\n", + " \n", + "fl_data = GlueMrpcFederatedDataset(train_set, valid_set, batch_size=32)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Define model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class LLMTaskRunner(PyTorchTaskRunner):\n", + " def __init__(self, base_model_name, data_loader, device=None, metric=None, **kwargs):\n", + " kwargs['data_loader'] = data_loader\n", + " super().__init__(device, **kwargs)\n", + " self.base_model_name = base_model_name\n", + " self.metric = metric\n", + " self._init_model()\n", + " self._init_optimizer()\n", + " \n", + " def _init_model(self):\n", + " model = AutoModelForSequenceClassification.from_pretrained(\n", + " self.base_model_name, return_dict=True)\n", + " peft_config = LoraConfig(task_type=TaskType.SEQ_CLS, inference_mode=False, r=16, lora_alpha=16, lora_dropout=0.1, bias=\"all\")\n", + " self.model = get_peft_model(model, peft_config)\n", + " \n", + " def _init_optimizer(self):\n", + " no_decay = [\"bias\", \"LayerNorm.weight\"]\n", + " optimizer_grouped_parameters = [\n", + " {\n", + " \"params\": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],\n", + " \"weight_decay\": 0.01,\n", + " },\n", + " {\n", + " \"params\": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],\n", + " \"weight_decay\": 0.0,\n", + " },\n", + " ]\n", + " self.optimizer = AdamW(optimizer_grouped_parameters, lr=0.01)\n", + " \n", + " self.training_round_completed = False\n", + " self.initialize_tensorkeys_for_functions()\n", + " \n", + " def state_dict(self):\n", + " return get_peft_model_state_dict(self.model)\n", + " \n", + " def load_state_dict(self, state_dict: Mapping[str, Any], strict: bool = True):\n", + " return set_peft_model_state_dict(\n", + " self.model, state_dict\n", + " )\n", + " \n", + " def validate(self, col_name, round_num, input_tensor_dict,\n", + " use_tqdm=False, **kwargs):\n", + " \"\"\"Validate.\n", + "\n", + " Run validation of the model on the local data.\n", + "\n", + " Args:\n", + " col_name: Name of the collaborator\n", + " round_num: What round is it\n", + " input_tensor_dict: Required input tensors (for model)\n", + " use_tqdm (bool): Use tqdm to print a progress bar (Default=True)\n", + "\n", + " Returns:\n", + " global_output_dict: Tensors to send back to the aggregator\n", + " local_output_dict: Tensors to maintain in the local TensorDB\n", + "\n", + " \"\"\"\n", + " self.rebuild_model(round_num, input_tensor_dict, validation=True)\n", + " self.model.eval()\n", + " self.model.to(self.device)\n", + " val_score = 0\n", + " total_samples = 0\n", + "\n", + " loader = self.data_loader.get_valid_loader()\n", + " if use_tqdm:\n", + " loader = tqdm(loader, desc='validate')\n", + "\n", + " with pt.no_grad():\n", + " for sample in loader:\n", + " samples = sample['input_ids'].shape[0]\n", + " total_samples += samples\n", + " output = self.model(**sample)\n", + " # get the index of the max log-probability\n", + " logits = output.logits\n", + " predictions = torch.argmax(logits, dim=-1)\n", + " metric.add_batch(predictions=predictions, references=sample['labels'])\n", + " val_score = metric.compute()['accuracy']\n", + "\n", + " origin = col_name\n", + " suffix = 'validate'\n", + " if kwargs['apply'] == 'local':\n", + " suffix += '_local'\n", + " else:\n", + " suffix += '_agg'\n", + " tags = ('metric',)\n", + " tags = change_tags(tags, add_field=suffix)\n", + " # TODO figure out a better way to pass in metric for this pytorch\n", + " # validate function\n", + " output_tensor_dict = {\n", + " TensorKey('acc', origin, round_num, True, tags):\n", + " np.array(val_score)\n", + " }\n", + "\n", + " # Empty list represents metrics that should only be stored locally\n", + " return output_tensor_dict, {}\n", + "\n", + " def train_epoch(self, batch_generator) -> Metric:\n", + " \"\"\"Train single epoch.\n", + "\n", + " Override this function in order to use custom training.\n", + "\n", + " Args:\n", + " batch_generator: Train dataset batch generator. Yields (samples, targets) tuples of\n", + " size = `self.data_loader.batch_size`.\n", + " Returns:\n", + " Metric: An object containing name and np.ndarray value.\n", + " \"\"\"\n", + " losses = []\n", + " for sample in batch_generator:\n", + " self.optimizer.zero_grad()\n", + " output = self.model(**sample)\n", + " loss = output.loss\n", + " loss.backward()\n", + " torch.nn.utils.clip_grad_norm_(self.model.parameters(),1.0)\n", + " self.model.step()\n", + " losses.append(loss.detach().cpu().numpy())\n", + " loss = np.mean(losses)\n", + " if self.model.config.problem_type == \"regression\":\n", + " loss_fct = MSELoss()\n", + " elif self.model.config.problem_type == \"single_label_classification\":\n", + " loss_fct = CrossEntropyLoss()\n", + " elif self.model.config.problem_type == \"multi_label_classification\":\n", + " loss_fct = BCEWithLogitsLoss()\n", + " return Metric(name=loss_fct._get_name(), value=np.array(loss))\n", + " \n", + " \n", + " def save_native(self, filepath, model_state_dict_key='model_state_dict',\n", + " optimizer_state_dict_key='optimizer_state_dict', **kwargs):\n", + " \"\"\"\n", + " Save model and optimizer states in a picked file specified by the \\\n", + " filepath. model_/optimizer_state_dicts are stored in the keys provided. \\\n", + " Uses pt.save().\n", + "\n", + " Args:\n", + " filepath (string) : Path to pickle file to be\n", + " created by pt.save().\n", + " model_state_dict_key (string) : key for model state dict\n", + " in pickled file.\n", + " optimizer_state_dict_key (string) : key for optimizer state\n", + " dict in picked file.\n", + " kwargs : unused\n", + "\n", + " Returns:\n", + " None\n", + " \"\"\"\n", + " pickle_dict = {\n", + " model_state_dict_key: get_peft_model_state_dict(self.model),\n", + " optimizer_state_dict_key: self.optimizer.state_dict()\n", + " }\n", + " pt.save(pickle_dict, filepath)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "num_collaborators = 2\n", + "metric = load_metric('glue', \"mrpc\")\n", + "collaborator_models = [\n", + " LLMTaskRunner(\n", + " base_model_name,\n", + " data_loader=data_slice,\n", + " metric=metric\n", + " )\n", + " for data_slice in fl_data.split(num_collaborators)]\n", + "collaborators = {'one':collaborator_models[0],'two':collaborator_models[1]}#, 'three':collaborator_models[2]}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#Original TinyImageNet dataset\n", + "print(f'Original training data size: {len(fl_data.train_set)}')\n", + "print(f'Original validation data size: {len(fl_data.valid_set)}\\n')\n", + "\n", + "#Collaborator one's data\n", + "for i, model in enumerate(collaborator_models):\n", + " print(f'Collaborator {i}\\'s training data size: {len(model.data_loader.train_set)}')\n", + " print(f'Collaborator {i}\\'s validation data size: {len(model.data_loader.valid_set)}\\n')\n", + "\n", + "#Collaborator three's data\n", + "#print(f'Collaborator three\\'s training data size: {len(collaborator_models[2].data_loader.X_train)}')\n", + "#print(f'Collaborator three\\'s validation data size: {len(collaborator_models[2].data_loader.X_valid)}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#Run experiment, return trained FederatedModel\n", + "final_fl_model = fx.run_experiment(collaborators,{'aggregator.settings.rounds_to_train':3})" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#Save final model\n", + "final_fl_model.save_native('final_model.pth')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "llama-env", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.0" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} From 600bfc97ce0bfd43121148efc7f485ba5052a8c2 Mon Sep 17 00:00:00 2001 From: porteratzo Date: Thu, 5 Oct 2023 08:50:59 -0700 Subject: [PATCH 2/7] small fix --- openfl-tutorials/Federated_PyTorch_LLM.ipynb | 534 ++++++++++++++++++- 1 file changed, 520 insertions(+), 14 deletions(-) diff --git a/openfl-tutorials/Federated_PyTorch_LLM.ipynb b/openfl-tutorials/Federated_PyTorch_LLM.ipynb index 4ace9f3d30..ca41b3c13d 100644 --- a/openfl-tutorials/Federated_PyTorch_LLM.ipynb +++ b/openfl-tutorials/Federated_PyTorch_LLM.ipynb @@ -30,7 +30,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": {}, "outputs": [], "source": [ @@ -66,9 +66,134 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Creating Workspace Directories\n", + "Creating Workspace Templates\n", + "Collecting torch==1.13.1 (from -r /home/oamontoy/.local/workspace/requirements.txt (line 1))\n", + " Using cached torch-1.13.1-cp38-cp38-manylinux1_x86_64.whl (887.4 MB)\n", + "Requirement already satisfied: torchvision==0.14.1 in ./llama-env/lib/python3.8/site-packages (from -r /home/oamontoy/.local/workspace/requirements.txt (line 2)) (0.14.1)\n", + "Requirement already satisfied: tensorboard in ./llama-env/lib/python3.8/site-packages (from -r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (2.14.0)\n", + "Requirement already satisfied: wheel>=0.38.0 in ./llama-env/lib/python3.8/site-packages (from -r /home/oamontoy/.local/workspace/requirements.txt (line 4)) (0.41.2)\n", + "Requirement already satisfied: typing-extensions in ./llama-env/lib/python3.8/site-packages (from torch==1.13.1->-r /home/oamontoy/.local/workspace/requirements.txt (line 1)) (4.8.0)\n", + "Requirement already satisfied: nvidia-cuda-runtime-cu11==11.7.99 in ./llama-env/lib/python3.8/site-packages (from torch==1.13.1->-r /home/oamontoy/.local/workspace/requirements.txt (line 1)) (11.7.99)\n", + "Requirement already satisfied: nvidia-cudnn-cu11==8.5.0.96 in ./llama-env/lib/python3.8/site-packages (from torch==1.13.1->-r /home/oamontoy/.local/workspace/requirements.txt (line 1)) (8.5.0.96)\n", + "Requirement already satisfied: nvidia-cublas-cu11==11.10.3.66 in ./llama-env/lib/python3.8/site-packages (from torch==1.13.1->-r /home/oamontoy/.local/workspace/requirements.txt (line 1)) (11.10.3.66)\n", + "Requirement already satisfied: nvidia-cuda-nvrtc-cu11==11.7.99 in ./llama-env/lib/python3.8/site-packages (from torch==1.13.1->-r /home/oamontoy/.local/workspace/requirements.txt (line 1)) (11.7.99)\n", + "Requirement already satisfied: numpy in ./llama-env/lib/python3.8/site-packages (from torchvision==0.14.1->-r /home/oamontoy/.local/workspace/requirements.txt (line 2)) (1.24.4)\n", + "Requirement already satisfied: requests in ./llama-env/lib/python3.8/site-packages (from torchvision==0.14.1->-r /home/oamontoy/.local/workspace/requirements.txt (line 2)) (2.31.0)\n", + "Requirement already satisfied: pillow!=8.3.*,>=5.3.0 in ./llama-env/lib/python3.8/site-packages (from torchvision==0.14.1->-r /home/oamontoy/.local/workspace/requirements.txt (line 2)) (10.0.1)\n", + "Requirement already satisfied: setuptools in ./llama-env/lib/python3.8/site-packages (from nvidia-cublas-cu11==11.10.3.66->torch==1.13.1->-r /home/oamontoy/.local/workspace/requirements.txt (line 1)) (68.2.2)\n", + "Requirement already satisfied: absl-py>=0.4 in ./llama-env/lib/python3.8/site-packages (from tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (2.0.0)\n", + "Requirement already satisfied: grpcio>=1.48.2 in ./llama-env/lib/python3.8/site-packages (from tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (1.48.2)\n", + "Requirement already satisfied: google-auth<3,>=1.6.3 in ./llama-env/lib/python3.8/site-packages (from tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (2.23.0)\n", + "Requirement already satisfied: google-auth-oauthlib<1.1,>=0.5 in ./llama-env/lib/python3.8/site-packages (from tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (1.0.0)\n", + "Requirement already satisfied: markdown>=2.6.8 in ./llama-env/lib/python3.8/site-packages (from tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (3.4.4)\n", + "Requirement already satisfied: protobuf>=3.19.6 in ./llama-env/lib/python3.8/site-packages (from tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (3.19.6)\n", + "Requirement already satisfied: tensorboard-data-server<0.8.0,>=0.7.0 in ./llama-env/lib/python3.8/site-packages (from tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (0.7.1)\n", + "Requirement already satisfied: werkzeug>=1.0.1 in ./llama-env/lib/python3.8/site-packages (from tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (2.3.7)\n", + "Requirement already satisfied: cachetools<6.0,>=2.0.0 in ./llama-env/lib/python3.8/site-packages (from google-auth<3,>=1.6.3->tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (5.3.1)\n", + "Requirement already satisfied: pyasn1-modules>=0.2.1 in ./llama-env/lib/python3.8/site-packages (from google-auth<3,>=1.6.3->tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (0.3.0)\n", + "Requirement already satisfied: rsa<5,>=3.1.4 in ./llama-env/lib/python3.8/site-packages (from google-auth<3,>=1.6.3->tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (4.9)\n", + "Requirement already satisfied: urllib3<2.0 in ./llama-env/lib/python3.8/site-packages (from google-auth<3,>=1.6.3->tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (1.26.16)\n", + "Requirement already satisfied: requests-oauthlib>=0.7.0 in ./llama-env/lib/python3.8/site-packages (from google-auth-oauthlib<1.1,>=0.5->tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (1.3.1)\n", + "Requirement already satisfied: six>=1.5.2 in ./llama-env/lib/python3.8/site-packages (from grpcio>=1.48.2->tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (1.16.0)\n", + "Requirement already satisfied: importlib-metadata>=4.4 in ./llama-env/lib/python3.8/site-packages (from markdown>=2.6.8->tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (6.8.0)\n", + "Requirement already satisfied: charset-normalizer<4,>=2 in ./llama-env/lib/python3.8/site-packages (from requests->torchvision==0.14.1->-r /home/oamontoy/.local/workspace/requirements.txt (line 2)) (3.2.0)\n", + "Requirement already satisfied: idna<4,>=2.5 in ./llama-env/lib/python3.8/site-packages (from requests->torchvision==0.14.1->-r /home/oamontoy/.local/workspace/requirements.txt (line 2)) (3.4)\n", + "Requirement already satisfied: certifi>=2017.4.17 in ./llama-env/lib/python3.8/site-packages (from requests->torchvision==0.14.1->-r /home/oamontoy/.local/workspace/requirements.txt (line 2)) (2023.7.22)\n", + "Requirement already satisfied: MarkupSafe>=2.1.1 in ./llama-env/lib/python3.8/site-packages (from werkzeug>=1.0.1->tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (2.1.3)\n", + "Requirement already satisfied: zipp>=0.5 in ./llama-env/lib/python3.8/site-packages (from importlib-metadata>=4.4->markdown>=2.6.8->tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (3.17.0)\n", + "Requirement already satisfied: pyasn1<0.6.0,>=0.4.6 in ./llama-env/lib/python3.8/site-packages (from pyasn1-modules>=0.2.1->google-auth<3,>=1.6.3->tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (0.5.0)\n", + "Requirement already satisfied: oauthlib>=3.0.0 in ./llama-env/lib/python3.8/site-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<1.1,>=0.5->tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (3.2.2)\n", + "Installing collected packages: torch\n", + " Attempting uninstall: torch\n", + " Found existing installation: torch 2.1.0\n", + " Uninstalling torch-2.1.0:\n", + " Successfully uninstalled torch-2.1.0\n", + "Successfully installed torch-1.13.1\n", + "Successfully installed packages from /home/oamontoy/.local/workspace/requirements.txt.\n", + "\n", + "New workspace directory structure:\n", + "workspace\n", + "├── logs\n", + "│ └── cnn_mnist\n", + "│ ├── events.out.tfevents.1695850586.M50CYP2SBSTD.111429.0\n", + "│ ├── events.out.tfevents.1695942084.M50CYP2SBSTD.4924.0\n", + "│ ├── events.out.tfevents.1695849809.M50CYP2SBSTD.107313.0\n", + "│ ├── events.out.tfevents.1695850472.M50CYP2SBSTD.110437.0\n", + "│ ├── events.out.tfevents.1695942744.M50CYP2SBSTD.15635.0\n", + "│ ├── events.out.tfevents.1696008244.M50CYP2SBSTD.98097.0\n", + "│ ├── events.out.tfevents.1695850981.M50CYP2SBSTD.114740.0\n", + "│ ├── events.out.tfevents.1695939101.M50CYP2SBSTD.143673.0\n", + "│ ├── events.out.tfevents.1695850850.M50CYP2SBSTD.113094.0\n", + "│ ├── events.out.tfevents.1695850404.M50CYP2SBSTD.109391.0\n", + "│ ├── events.out.tfevents.1695942232.M50CYP2SBSTD.7126.0\n", + "│ └── events.out.tfevents.1695849986.M50CYP2SBSTD.107937.0\n", + "├── .workspace\n", + "├── final_model.pth\n", + "├── plan\n", + "│ ├── plan.yaml\n", + "│ ├── defaults\n", + "│ ├── data.yaml\n", + "│ └── cols.yaml\n", + "├── agg_to_col_two_signed_cert.zip\n", + "├── requirements.txt\n", + "├── data\n", + "├── save\n", + "│ ├── torch_cnn_mnist_best.pbuf\n", + "│ ├── torch_cnn_mnist_last.pbuf\n", + "│ └── torch_cnn_mnist_init.pbuf\n", + "├── agg_to_col_one_signed_cert.zip\n", + "├── src\n", + "│ ├── pt_cnn.py\n", + "│ ├── mnist_utils.py\n", + "│ ├── __pycache__\n", + "│ │ ├── __init__.cpython-38.pyc\n", + "│ │ └── mnist_utils.cpython-38.pyc\n", + "│ ├── ptmnist_inmemory.py\n", + "│ └── __init__.py\n", + "└── cert\n", + "\n", + "8 directories, 30 files\n", + "Setting Up Certificate Authority...\n", + "\n", + "1. Create Root CA\n", + "1.1 Create Directories\n", + "1.2 Create Database\n", + "1.3 Create CA Request and Certificate\n", + "2. Create Signing Certificate\n", + "2.1 Create Directories\n", + "2.2 Create Database\n", + "2.3 Create Signing Certificate CSR\n", + "2.4 Sign Signing Certificate CSR\n", + "3 Create Certificate Chain\n", + "\n", + "Done.\n", + "Creating AGGREGATOR certificate key pair with following settings: CN=\u001b[31mm50cyp2sbstd\u001b[0m, SAN=\u001b[31mDNS:m50cyp2sbstd\u001b[0m\n", + " Writing AGGREGATOR certificate key pair to: \u001b[32m/home/oamontoy/workspace/cert/server\u001b[0m\n", + "The CSR Hash for file \u001b[32mserver/agg_m50cyp2sbstd.csr\u001b[0m = \u001b[31md49a1328c9e8ccfb65a4d583018704fd9d24b3301bb800ceb9f50b591937e1a5f8f419238b5e4c24af732693d37ce088\u001b[0m\n", + " Signing AGGREGATOR certificate\n", + "Creating COLLABORATOR certificate key pair with following settings: CN=\u001b[31mone\u001b[0m, SAN=\u001b[31mDNS:one\u001b[0m\n", + " Moving COLLABORATOR certificate to: \u001b[32m/home/oamontoy/workspace/cert/col_one\u001b[0m\n", + "The CSR Hash for file \u001b[32mcol_one.csr\u001b[0m = \u001b[31m0caea6371d4b13f51be51507794c4c18e0a9cb408f286f2f81a4b179380b15b3215e94d739ec952065fbc7eb3b2edbba\u001b[0m\n", + " Signing COLLABORATOR certificate\n", + "\n", + "Registering \u001b[32mone\u001b[0m in \u001b[32m/home/oamontoy/.local/workspace/plan/cols.yaml\u001b[0m\n", + "Creating COLLABORATOR certificate key pair with following settings: CN=\u001b[31mtwo\u001b[0m, SAN=\u001b[31mDNS:two\u001b[0m\n", + " Moving COLLABORATOR certificate to: \u001b[32m/home/oamontoy/workspace/cert/col_two\u001b[0m\n", + "The CSR Hash for file \u001b[32mcol_two.csr\u001b[0m = \u001b[31m3e6ffe3d25d39bb6f3f1fb851eb8da60d4cbf4e0bee78ad0f7731cc0e6bb47433830523f2c39dc0ca7f0ce79b69cc6c3\u001b[0m\n", + " Signing COLLABORATOR certificate\n", + "\n", + "Registering \u001b[32mtwo\u001b[0m in \u001b[32m/home/oamontoy/.local/workspace/plan/cols.yaml\u001b[0m\n" + ] + } + ], "source": [ "#Setup default workspace, logging, etc.\n", "fx.init('torch_cnn_mnist')" @@ -90,7 +215,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ @@ -134,7 +259,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -162,7 +287,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ @@ -221,7 +346,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "metadata": {}, "outputs": [], "source": [ @@ -339,7 +464,7 @@ " loss = output.loss\n", " loss.backward()\n", " torch.nn.utils.clip_grad_norm_(self.model.parameters(),1.0)\n", - " self.model.step()\n", + " self.model.zero_grad()\n", " losses.append(loss.detach().cpu().numpy())\n", " loss = np.mean(losses)\n", " if self.model.config.problem_type == \"regression\":\n", @@ -379,9 +504,54 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 7, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/tmp/ipykernel_159004/1723172838.py:2: FutureWarning: load_metric is deprecated and will be removed in the next major version of datasets. Use 'evaluate.load' instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate\n", + " metric = load_metric('glue', \"mrpc\")\n", + "Some weights of RobertaForSequenceClassification were not initialized from the model checkpoint at roberta-large and are newly initialized: ['classifier.dense.bias', 'classifier.out_proj.bias', 'classifier.out_proj.weight', 'classifier.dense.weight']\n", + "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n" + ] + }, + { + "data": { + "text/html": [ + "
[08:48:31] WARNING  tried to remove tensor: __opt_state_needed not present in the tensor dict                                                       utils.py:172\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[2;36m[08:48:31]\u001b[0m\u001b[2;36m \u001b[0m\u001b[31mWARNING \u001b[0m tried to remove tensor: __opt_state_needed not present in the tensor dict \u001b]8;id=932122;file:///home/oamontoy/workspace/sec-openfl/openfl/utilities/utils.py\u001b\\\u001b[2mutils.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=685149;file:///home/oamontoy/workspace/sec-openfl/openfl/utilities/utils.py#172\u001b\\\u001b[2m172\u001b[0m\u001b]8;;\u001b\\\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of RobertaForSequenceClassification were not initialized from the model checkpoint at roberta-large and are newly initialized: ['classifier.dense.bias', 'classifier.out_proj.bias', 'classifier.out_proj.weight', 'classifier.dense.weight']\n", + "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n" + ] + }, + { + "data": { + "text/html": [ + "
[08:48:34] WARNING  tried to remove tensor: __opt_state_needed not present in the tensor dict                                                       utils.py:172\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[2;36m[08:48:34]\u001b[0m\u001b[2;36m \u001b[0m\u001b[31mWARNING \u001b[0m tried to remove tensor: __opt_state_needed not present in the tensor dict \u001b]8;id=38894;file:///home/oamontoy/workspace/sec-openfl/openfl/utilities/utils.py\u001b\\\u001b[2mutils.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=682120;file:///home/oamontoy/workspace/sec-openfl/openfl/utilities/utils.py#172\u001b\\\u001b[2m172\u001b[0m\u001b]8;;\u001b\\\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "num_collaborators = 2\n", "metric = load_metric('glue', \"mrpc\")\n", @@ -397,9 +567,25 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 8, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Original training data size: 3668\n", + "Original validation data size: 1725\n", + "\n", + "Collaborator 0's training data size: 1834\n", + "Collaborator 0's validation data size: 863\n", + "\n", + "Collaborator 1's training data size: 1834\n", + "Collaborator 1's validation data size: 862\n", + "\n" + ] + } + ], "source": [ "#Original TinyImageNet dataset\n", "print(f'Original training data size: {len(fl_data.train_set)}')\n", @@ -417,9 +603,329 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 9, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/html": [ + "
           INFO     Updating aggregator.settings.rounds_to_train to 3...                                                                           native.py:102\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Updating aggregator.settings.rounds_to_train to \u001b[1;36m3\u001b[0m\u001b[33m...\u001b[0m \u001b]8;id=22181;file:///home/oamontoy/workspace/sec-openfl/openfl/native/native.py\u001b\\\u001b[2mnative.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=265323;file:///home/oamontoy/workspace/sec-openfl/openfl/native/native.py#102\u001b\\\u001b[2m102\u001b[0m\u001b]8;;\u001b\\\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
           INFO     FL-Plan hash is 86b08340e96ba9e485169da1f860ea968811d1bf2e6867774fae4398426dd33c6ae56ca202002d393e3a4d91f946c1bc                 plan.py:235\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m FL-Plan hash is \u001b[34m86b08340e96ba9e485169da1f860ea968811d1bf2e6867774fae4398426dd33c6ae56ca202002d393e3a4d91f946c1bc\u001b[0m \u001b]8;id=965130;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/plan/plan.py\u001b\\\u001b[2mplan.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=969631;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/plan/plan.py#235\u001b\\\u001b[2m235\u001b[0m\u001b]8;;\u001b\\\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
           INFO     Building 🡆 Object NoCompressionPipeline from openfl.pipelines Module.                                                            plan.py:171\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Building \u001b[31m🡆\u001b[0m Object \u001b[31mNoCompressionPipeline\u001b[0m from \u001b[31mopenfl.pipelines\u001b[0m Module. \u001b]8;id=418157;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/plan/plan.py\u001b\\\u001b[2mplan.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=469081;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/plan/plan.py#171\u001b\\\u001b[2m171\u001b[0m\u001b]8;;\u001b\\\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
           INFO     Creating Initial Weights File    🠆 save/torch_cnn_mnist_init.pbuf                                                              native.py:277\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Creating Initial Weights File 🠆 save/torch_cnn_mnist_init.pbuf \u001b]8;id=949808;file:///home/oamontoy/workspace/sec-openfl/openfl/native/native.py\u001b\\\u001b[2mnative.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=806002;file:///home/oamontoy/workspace/sec-openfl/openfl/native/native.py#277\u001b\\\u001b[2m277\u001b[0m\u001b]8;;\u001b\\\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
           INFO     Starting Experiment...                                                                                                         native.py:281\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Starting Experiment\u001b[33m...\u001b[0m \u001b]8;id=768304;file:///home/oamontoy/workspace/sec-openfl/openfl/native/native.py\u001b\\\u001b[2mnative.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=820559;file:///home/oamontoy/workspace/sec-openfl/openfl/native/native.py#281\u001b\\\u001b[2m281\u001b[0m\u001b]8;;\u001b\\\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
           INFO     Building 🡆 Object RandomGroupedAssigner from openfl.component Module.                                                            plan.py:171\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Building \u001b[31m🡆\u001b[0m Object \u001b[31mRandomGroupedAssigner\u001b[0m from \u001b[31mopenfl.component\u001b[0m Module. \u001b]8;id=959697;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/plan/plan.py\u001b\\\u001b[2mplan.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=588178;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/plan/plan.py#171\u001b\\\u001b[2m171\u001b[0m\u001b]8;;\u001b\\\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
           INFO     Building 🡆 Object CutoffTimeBasedStragglerHandling from openfl.component.straggler_handling_functions Module.                    plan.py:171\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Building \u001b[31m🡆\u001b[0m Object \u001b[31mCutoffTimeBasedStragglerHandling\u001b[0m from \u001b[31mopenfl.component.straggler_handling_functions\u001b[0m Module. \u001b]8;id=199471;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/plan/plan.py\u001b\\\u001b[2mplan.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=897448;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/plan/plan.py#171\u001b\\\u001b[2m171\u001b[0m\u001b]8;;\u001b\\\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
           INFO     Importing 🡆 Object write_metric from src.mnist_utils Module.                                                                     plan.py:199\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Importing \u001b[31m🡆\u001b[0m Object \u001b[31mwrite_metric\u001b[0m from \u001b[31msrc.mnist_utils\u001b[0m Module. \u001b]8;id=125418;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/plan/plan.py\u001b\\\u001b[2mplan.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=640295;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/plan/plan.py#199\u001b\\\u001b[2m199\u001b[0m\u001b]8;;\u001b\\\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/oamontoy/workspace/llama-env/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: libtorch_cuda_cu.so: cannot open shared object file: No such file or directory\n", + " warn(f\"Failed to load image Python extension: {e}\")\n" + ] + }, + { + "data": { + "text/html": [ + "
[08:48:35] INFO     Building 🡆 Object Aggregator from openfl.component Module.                                                                       plan.py:171\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[2;36m[08:48:35]\u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Building \u001b[31m🡆\u001b[0m Object \u001b[31mAggregator\u001b[0m from \u001b[31mopenfl.component\u001b[0m Module. \u001b]8;id=299428;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/plan/plan.py\u001b\\\u001b[2mplan.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=109248;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/plan/plan.py#171\u001b\\\u001b[2m171\u001b[0m\u001b]8;;\u001b\\\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
           INFO     Using custom log metric: <function write_metric at 0x7f5c68ad28b0>                                                          aggregator.py:97\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Using custom log metric: \u001b[1m<\u001b[0m\u001b[1;95mfunction\u001b[0m\u001b[39m write_metric at \u001b[0m\u001b[1;36m0x7f5c68ad28b0\u001b[0m\u001b[1m>\u001b[0m \u001b]8;id=655419;file:///home/oamontoy/workspace/sec-openfl/openfl/component/aggregator/aggregator.py\u001b\\\u001b[2maggregator.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=161984;file:///home/oamontoy/workspace/sec-openfl/openfl/component/aggregator/aggregator.py#97\u001b\\\u001b[2m97\u001b[0m\u001b]8;;\u001b\\\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
           INFO     Building 🡆 Object Collaborator from openfl.component Module.                                                                     plan.py:171\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Building \u001b[31m🡆\u001b[0m Object \u001b[31mCollaborator\u001b[0m from \u001b[31mopenfl.component\u001b[0m Module. \u001b]8;id=390053;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/plan/plan.py\u001b\\\u001b[2mplan.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=853120;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/plan/plan.py#171\u001b\\\u001b[2m171\u001b[0m\u001b]8;;\u001b\\\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
           INFO     Building 🡆 Object Collaborator from openfl.component Module.                                                                     plan.py:171\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Building \u001b[31m🡆\u001b[0m Object \u001b[31mCollaborator\u001b[0m from \u001b[31mopenfl.component\u001b[0m Module. \u001b]8;id=612439;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/plan/plan.py\u001b\\\u001b[2mplan.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=601521;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/plan/plan.py#171\u001b\\\u001b[2m171\u001b[0m\u001b]8;;\u001b\\\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
           INFO     Waiting for tasks...                                                                                                     collaborator.py:178\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Waiting for tasks\u001b[33m...\u001b[0m \u001b]8;id=806029;file:///home/oamontoy/workspace/sec-openfl/openfl/component/collaborator/collaborator.py\u001b\\\u001b[2mcollaborator.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=928412;file:///home/oamontoy/workspace/sec-openfl/openfl/component/collaborator/collaborator.py#178\u001b\\\u001b[2m178\u001b[0m\u001b]8;;\u001b\\\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
           INFO     Sending tasks to collaborator one for round 0                                                                              aggregator.py:329\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Sending tasks to collaborator one for round \u001b[1;36m0\u001b[0m \u001b]8;id=515202;file:///home/oamontoy/workspace/sec-openfl/openfl/component/aggregator/aggregator.py\u001b\\\u001b[2maggregator.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=103453;file:///home/oamontoy/workspace/sec-openfl/openfl/component/aggregator/aggregator.py#329\u001b\\\u001b[2m329\u001b[0m\u001b]8;;\u001b\\\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
           INFO     Received the following tasks: ['aggregated_model_validation', 'train', 'locally_tuned_model_validation']                 collaborator.py:168\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Received the following tasks: \u001b[1m[\u001b[0m\u001b[32m'aggregated_model_validation'\u001b[0m, \u001b[32m'train'\u001b[0m, \u001b[32m'locally_tuned_model_validation'\u001b[0m\u001b[1m]\u001b[0m \u001b]8;id=266582;file:///home/oamontoy/workspace/sec-openfl/openfl/component/collaborator/collaborator.py\u001b\\\u001b[2mcollaborator.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=901781;file:///home/oamontoy/workspace/sec-openfl/openfl/component/collaborator/collaborator.py#168\u001b\\\u001b[2m168\u001b[0m\u001b]8;;\u001b\\\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
[08:48:36] INFO     Using TaskRunner subclassing API                                                                                         collaborator.py:253\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[2;36m[08:48:36]\u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Using TaskRunner subclassing API \u001b]8;id=457102;file:///home/oamontoy/workspace/sec-openfl/openfl/component/collaborator/collaborator.py\u001b\\\u001b[2mcollaborator.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=223379;file:///home/oamontoy/workspace/sec-openfl/openfl/component/collaborator/collaborator.py#253\u001b\\\u001b[2m253\u001b[0m\u001b]8;;\u001b\\\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/oamontoy/workspace/sec-openfl/openfl/federated/task/runner_pt.py:284: UserWarning: The given NumPy array is not writable, and PyTorch does not support non-writable tensors. This means writing to this tensor will result in undefined behavior. You may want to copy the array to protect its data or make it writable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at ../torch/csrc/utils/tensor_numpy.cpp:206.)\n", + " new_state[k] = pt.from_numpy(tensor_dict.pop(k)).to(device)\n", + "You're using a RobertaTokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding.\n" + ] + }, + { + "data": { + "text/html": [ + "
[08:49:31] METRIC   Round 0, collaborator one is sending metric for task aggregated_model_validation: acc   0.673233                         collaborator.py:415\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[2;36m[08:49:31]\u001b[0m\u001b[2;36m \u001b[0mMETRIC Round \u001b[1;36m0\u001b[0m, collaborator one is sending metric for task aggregated_model_validation: acc \u001b[1;36m0.673233\u001b[0m \u001b]8;id=512008;file:///home/oamontoy/workspace/sec-openfl/openfl/component/collaborator/collaborator.py\u001b\\\u001b[2mcollaborator.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=179476;file:///home/oamontoy/workspace/sec-openfl/openfl/component/collaborator/collaborator.py#415\u001b\\\u001b[2m415\u001b[0m\u001b]8;;\u001b\\\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
           INFO     Collaborator one is sending task results for aggregated_model_validation, round 0                                          aggregator.py:520\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Collaborator one is sending task results for aggregated_model_validation, round \u001b[1;36m0\u001b[0m \u001b]8;id=921737;file:///home/oamontoy/workspace/sec-openfl/openfl/component/aggregator/aggregator.py\u001b\\\u001b[2maggregator.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=943670;file:///home/oamontoy/workspace/sec-openfl/openfl/component/aggregator/aggregator.py#520\u001b\\\u001b[2m520\u001b[0m\u001b]8;;\u001b\\\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
           METRIC   Round 0, collaborator validate_agg aggregated_model_validation result acc:      0.673233                                   aggregator.py:559\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0mMETRIC Round \u001b[1;36m0\u001b[0m, collaborator validate_agg aggregated_model_validation result acc: \u001b[1;36m0.673233\u001b[0m \u001b]8;id=778930;file:///home/oamontoy/workspace/sec-openfl/openfl/component/aggregator/aggregator.py\u001b\\\u001b[2maggregator.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=605180;file:///home/oamontoy/workspace/sec-openfl/openfl/component/aggregator/aggregator.py#559\u001b\\\u001b[2m559\u001b[0m\u001b]8;;\u001b\\\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
           INFO     Using TaskRunner subclassing API                                                                                         collaborator.py:253\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Using TaskRunner subclassing API \u001b]8;id=239383;file:///home/oamontoy/workspace/sec-openfl/openfl/component/collaborator/collaborator.py\u001b\\\u001b[2mcollaborator.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=814426;file:///home/oamontoy/workspace/sec-openfl/openfl/component/collaborator/collaborator.py#253\u001b\\\u001b[2m253\u001b[0m\u001b]8;;\u001b\\\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
           INFO     Run 0 epoch of 0 round                                                                                                      runner_pt.py:155\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Run \u001b[1;36m0\u001b[0m epoch of \u001b[1;36m0\u001b[0m round \u001b]8;id=118270;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/task/runner_pt.py\u001b\\\u001b[2mrunner_pt.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=848215;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/task/runner_pt.py#155\u001b\\\u001b[2m155\u001b[0m\u001b]8;;\u001b\\\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "ename": "AttributeError", + "evalue": "'RobertaForSequenceClassification' object has no attribute 'step'", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", + "File \u001b[0;32m~/workspace/llama-env/lib/python3.8/site-packages/peft/peft_model.py:434\u001b[0m, in \u001b[0;36mPeftModel.__getattr__\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m 433\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[0;32m--> 434\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39msuper\u001b[39;49m()\u001b[39m.\u001b[39;49m\u001b[39m__getattr__\u001b[39;49m(name) \u001b[39m# defer to nn.Module's logic\u001b[39;00m\n\u001b[1;32m 435\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mAttributeError\u001b[39;00m:\n", + "File \u001b[0;32m~/workspace/llama-env/lib/python3.8/site-packages/torch/nn/modules/module.py:1695\u001b[0m, in \u001b[0;36m__getattr__\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m 1688\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mparameters\u001b[39m(\u001b[39mself\u001b[39m, recurse: \u001b[39mbool\u001b[39m \u001b[39m=\u001b[39m \u001b[39mTrue\u001b[39;00m) \u001b[39m-\u001b[39m\u001b[39m>\u001b[39m Iterator[Parameter]:\n\u001b[1;32m 1689\u001b[0m \u001b[39m \u001b[39m\u001b[39mr\u001b[39m\u001b[39m\"\"\"Returns an iterator over module parameters.\u001b[39;00m\n\u001b[1;32m 1690\u001b[0m \n\u001b[1;32m 1691\u001b[0m \u001b[39m This is typically passed to an optimizer.\u001b[39;00m\n\u001b[1;32m 1692\u001b[0m \n\u001b[1;32m 1693\u001b[0m \u001b[39m Args:\u001b[39;00m\n\u001b[1;32m 1694\u001b[0m \u001b[39m recurse (bool): if True, then yields parameters of this module\u001b[39;00m\n\u001b[0;32m-> 1695\u001b[0m \u001b[39m and all submodules. Otherwise, yields only parameters that\u001b[39;00m\n\u001b[1;32m 1696\u001b[0m \u001b[39m are direct members of this module.\u001b[39;00m\n\u001b[1;32m 1697\u001b[0m \n\u001b[1;32m 1698\u001b[0m \u001b[39m Yields:\u001b[39;00m\n\u001b[1;32m 1699\u001b[0m \u001b[39m Parameter: module parameter\u001b[39;00m\n\u001b[1;32m 1700\u001b[0m \n\u001b[1;32m 1701\u001b[0m \u001b[39m Example::\u001b[39;00m\n\u001b[1;32m 1702\u001b[0m \n\u001b[1;32m 1703\u001b[0m \u001b[39m >>> # xdoctest: +SKIP(\"undefined vars\")\u001b[39;00m\n\u001b[1;32m 1704\u001b[0m \u001b[39m >>> for param in model.parameters():\u001b[39;00m\n\u001b[1;32m 1705\u001b[0m \u001b[39m >>> print(type(param), param.size())\u001b[39;00m\n\u001b[1;32m 1706\u001b[0m \u001b[39m (20L,)\u001b[39;00m\n\u001b[1;32m 1707\u001b[0m \u001b[39m (20L, 1L, 5L, 5L)\u001b[39;00m\n\u001b[1;32m 1708\u001b[0m \n\u001b[1;32m 1709\u001b[0m \u001b[39m \"\"\"\u001b[39;00m\n\u001b[1;32m 1710\u001b[0m \u001b[39mfor\u001b[39;00m name, param \u001b[39min\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mnamed_parameters(recurse\u001b[39m=\u001b[39mrecurse):\n", + "\u001b[0;31mAttributeError\u001b[0m: 'PeftModelForSequenceClassification' object has no attribute 'step'", + "\nDuring handling of the above exception, another exception occurred:\n", + "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", + "File \u001b[0;32m~/workspace/llama-env/lib/python3.8/site-packages/peft/tuners/lora.py:492\u001b[0m, in \u001b[0;36mLoraModel.__getattr__\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m 491\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[0;32m--> 492\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39msuper\u001b[39;49m()\u001b[39m.\u001b[39;49m\u001b[39m__getattr__\u001b[39;49m(name) \u001b[39m# defer to nn.Module's logic\u001b[39;00m\n\u001b[1;32m 493\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mAttributeError\u001b[39;00m:\n", + "File \u001b[0;32m~/workspace/llama-env/lib/python3.8/site-packages/torch/nn/modules/module.py:1695\u001b[0m, in \u001b[0;36m__getattr__\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m 1688\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mparameters\u001b[39m(\u001b[39mself\u001b[39m, recurse: \u001b[39mbool\u001b[39m \u001b[39m=\u001b[39m \u001b[39mTrue\u001b[39;00m) \u001b[39m-\u001b[39m\u001b[39m>\u001b[39m Iterator[Parameter]:\n\u001b[1;32m 1689\u001b[0m \u001b[39m \u001b[39m\u001b[39mr\u001b[39m\u001b[39m\"\"\"Returns an iterator over module parameters.\u001b[39;00m\n\u001b[1;32m 1690\u001b[0m \n\u001b[1;32m 1691\u001b[0m \u001b[39m This is typically passed to an optimizer.\u001b[39;00m\n\u001b[1;32m 1692\u001b[0m \n\u001b[1;32m 1693\u001b[0m \u001b[39m Args:\u001b[39;00m\n\u001b[1;32m 1694\u001b[0m \u001b[39m recurse (bool): if True, then yields parameters of this module\u001b[39;00m\n\u001b[0;32m-> 1695\u001b[0m \u001b[39m and all submodules. Otherwise, yields only parameters that\u001b[39;00m\n\u001b[1;32m 1696\u001b[0m \u001b[39m are direct members of this module.\u001b[39;00m\n\u001b[1;32m 1697\u001b[0m \n\u001b[1;32m 1698\u001b[0m \u001b[39m Yields:\u001b[39;00m\n\u001b[1;32m 1699\u001b[0m \u001b[39m Parameter: module parameter\u001b[39;00m\n\u001b[1;32m 1700\u001b[0m \n\u001b[1;32m 1701\u001b[0m \u001b[39m Example::\u001b[39;00m\n\u001b[1;32m 1702\u001b[0m \n\u001b[1;32m 1703\u001b[0m \u001b[39m >>> # xdoctest: +SKIP(\"undefined vars\")\u001b[39;00m\n\u001b[1;32m 1704\u001b[0m \u001b[39m >>> for param in model.parameters():\u001b[39;00m\n\u001b[1;32m 1705\u001b[0m \u001b[39m >>> print(type(param), param.size())\u001b[39;00m\n\u001b[1;32m 1706\u001b[0m \u001b[39m (20L,)\u001b[39;00m\n\u001b[1;32m 1707\u001b[0m \u001b[39m (20L, 1L, 5L, 5L)\u001b[39;00m\n\u001b[1;32m 1708\u001b[0m \n\u001b[1;32m 1709\u001b[0m \u001b[39m \"\"\"\u001b[39;00m\n\u001b[1;32m 1710\u001b[0m \u001b[39mfor\u001b[39;00m name, param \u001b[39min\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mnamed_parameters(recurse\u001b[39m=\u001b[39mrecurse):\n", + "\u001b[0;31mAttributeError\u001b[0m: 'LoraModel' object has no attribute 'step'", + "\nDuring handling of the above exception, another exception occurred:\n", + "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", + "\u001b[1;32m/home/oamontoy/workspace/sec-openfl/openfl-tutorials/Federated_PyTorch_LLM.ipynb Cell 18\u001b[0m line \u001b[0;36m2\n\u001b[1;32m 1\u001b[0m \u001b[39m#Run experiment, return trained FederatedModel\u001b[39;00m\n\u001b[0;32m----> 2\u001b[0m final_fl_model \u001b[39m=\u001b[39m fx\u001b[39m.\u001b[39;49mrun_experiment(collaborators,{\u001b[39m'\u001b[39;49m\u001b[39maggregator.settings.rounds_to_train\u001b[39;49m\u001b[39m'\u001b[39;49m:\u001b[39m3\u001b[39;49m})\n", + "File \u001b[0;32m~/workspace/sec-openfl/openfl/native/native.py:295\u001b[0m, in \u001b[0;36mrun_experiment\u001b[0;34m(collaborator_dict, override_config)\u001b[0m\n\u001b[1;32m 293\u001b[0m \u001b[39mfor\u001b[39;00m col \u001b[39min\u001b[39;00m plan\u001b[39m.\u001b[39mauthorized_cols:\n\u001b[1;32m 294\u001b[0m collaborator \u001b[39m=\u001b[39m collaborators[col]\n\u001b[0;32m--> 295\u001b[0m collaborator\u001b[39m.\u001b[39;49mrun_simulation()\n\u001b[1;32m 297\u001b[0m \u001b[39m# Set the weights for the final model\u001b[39;00m\n\u001b[1;32m 298\u001b[0m model\u001b[39m.\u001b[39mrebuild_model(\n\u001b[1;32m 299\u001b[0m rounds_to_train \u001b[39m-\u001b[39m \u001b[39m1\u001b[39m, aggregator\u001b[39m.\u001b[39mlast_tensor_dict, validation\u001b[39m=\u001b[39m\u001b[39mTrue\u001b[39;00m)\n", + "File \u001b[0;32m~/workspace/sec-openfl/openfl/component/collaborator/collaborator.py:170\u001b[0m, in \u001b[0;36mCollaborator.run_simulation\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 168\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mlogger\u001b[39m.\u001b[39minfo(\u001b[39mf\u001b[39m\u001b[39m'\u001b[39m\u001b[39mReceived the following tasks: \u001b[39m\u001b[39m{\u001b[39;00mtasks\u001b[39m}\u001b[39;00m\u001b[39m'\u001b[39m)\n\u001b[1;32m 169\u001b[0m \u001b[39mfor\u001b[39;00m task \u001b[39min\u001b[39;00m tasks:\n\u001b[0;32m--> 170\u001b[0m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mdo_task(task, round_number)\n\u001b[1;32m 171\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mlogger\u001b[39m.\u001b[39minfo(\u001b[39mf\u001b[39m\u001b[39m'\u001b[39m\u001b[39mAll tasks completed on \u001b[39m\u001b[39m{\u001b[39;00m\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mcollaborator_name\u001b[39m}\u001b[39;00m\u001b[39m \u001b[39m\u001b[39m'\u001b[39m\n\u001b[1;32m 172\u001b[0m \u001b[39mf\u001b[39m\u001b[39m'\u001b[39m\u001b[39mfor round \u001b[39m\u001b[39m{\u001b[39;00mround_number\u001b[39m}\u001b[39;00m\u001b[39m...\u001b[39m\u001b[39m'\u001b[39m)\n\u001b[1;32m 173\u001b[0m \u001b[39mbreak\u001b[39;00m\n", + "File \u001b[0;32m~/workspace/sec-openfl/openfl/component/collaborator/collaborator.py:255\u001b[0m, in \u001b[0;36mCollaborator.do_task\u001b[0;34m(self, task, round_number)\u001b[0m\n\u001b[1;32m 252\u001b[0m func \u001b[39m=\u001b[39m \u001b[39mgetattr\u001b[39m(\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mtask_runner, func_name)\n\u001b[1;32m 253\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mlogger\u001b[39m.\u001b[39minfo(\u001b[39m'\u001b[39m\u001b[39mUsing TaskRunner subclassing API\u001b[39m\u001b[39m'\u001b[39m)\n\u001b[0;32m--> 255\u001b[0m global_output_tensor_dict, local_output_tensor_dict \u001b[39m=\u001b[39m func(\n\u001b[1;32m 256\u001b[0m col_name\u001b[39m=\u001b[39;49m\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mcollaborator_name,\n\u001b[1;32m 257\u001b[0m round_num\u001b[39m=\u001b[39;49mround_number,\n\u001b[1;32m 258\u001b[0m input_tensor_dict\u001b[39m=\u001b[39;49minput_tensor_dict,\n\u001b[1;32m 259\u001b[0m \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n\u001b[1;32m 261\u001b[0m \u001b[39m# Save global and local output_tensor_dicts to TensorDB\u001b[39;00m\n\u001b[1;32m 262\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mtensor_db\u001b[39m.\u001b[39mcache_tensor(global_output_tensor_dict)\n", + "File \u001b[0;32m~/workspace/sec-openfl/openfl/federated/task/runner_pt.py:159\u001b[0m, in \u001b[0;36mPyTorchTaskRunner.train_batches\u001b[0;34m(self, col_name, round_num, input_tensor_dict, use_tqdm, epochs, **kwargs)\u001b[0m\n\u001b[1;32m 157\u001b[0m \u001b[39mif\u001b[39;00m use_tqdm:\n\u001b[1;32m 158\u001b[0m loader \u001b[39m=\u001b[39m tqdm\u001b[39m.\u001b[39mtqdm(loader, desc\u001b[39m=\u001b[39m\u001b[39m'\u001b[39m\u001b[39mtrain epoch\u001b[39m\u001b[39m'\u001b[39m)\n\u001b[0;32m--> 159\u001b[0m metric \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mtrain_epoch(loader)\n\u001b[1;32m 160\u001b[0m \u001b[39m# Output metric tensors (scalar)\u001b[39;00m\n\u001b[1;32m 161\u001b[0m origin \u001b[39m=\u001b[39m col_name\n", + "\u001b[1;32m/home/oamontoy/workspace/sec-openfl/openfl-tutorials/Federated_PyTorch_LLM.ipynb Cell 18\u001b[0m line \u001b[0;36m1\n\u001b[1;32m 113\u001b[0m loss\u001b[39m.\u001b[39mbackward()\n\u001b[1;32m 114\u001b[0m torch\u001b[39m.\u001b[39mnn\u001b[39m.\u001b[39mutils\u001b[39m.\u001b[39mclip_grad_norm_(\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mmodel\u001b[39m.\u001b[39mparameters(),\u001b[39m1.0\u001b[39m)\n\u001b[0;32m--> 115\u001b[0m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mmodel\u001b[39m.\u001b[39;49mstep()\n\u001b[1;32m 116\u001b[0m losses\u001b[39m.\u001b[39mappend(loss\u001b[39m.\u001b[39mdetach()\u001b[39m.\u001b[39mcpu()\u001b[39m.\u001b[39mnumpy())\n\u001b[1;32m 117\u001b[0m loss \u001b[39m=\u001b[39m np\u001b[39m.\u001b[39mmean(losses)\n", + "File \u001b[0;32m~/workspace/llama-env/lib/python3.8/site-packages/peft/peft_model.py:436\u001b[0m, in \u001b[0;36mPeftModel.__getattr__\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m 434\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39msuper\u001b[39m()\u001b[39m.\u001b[39m\u001b[39m__getattr__\u001b[39m(name) \u001b[39m# defer to nn.Module's logic\u001b[39;00m\n\u001b[1;32m 435\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mAttributeError\u001b[39;00m:\n\u001b[0;32m--> 436\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mgetattr\u001b[39;49m(\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mbase_model, name)\n", + "File \u001b[0;32m~/workspace/llama-env/lib/python3.8/site-packages/peft/tuners/lora.py:494\u001b[0m, in \u001b[0;36mLoraModel.__getattr__\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m 492\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39msuper\u001b[39m()\u001b[39m.\u001b[39m\u001b[39m__getattr__\u001b[39m(name) \u001b[39m# defer to nn.Module's logic\u001b[39;00m\n\u001b[1;32m 493\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mAttributeError\u001b[39;00m:\n\u001b[0;32m--> 494\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mgetattr\u001b[39;49m(\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mmodel, name)\n", + "File \u001b[0;32m~/workspace/llama-env/lib/python3.8/site-packages/torch/nn/modules/module.py:1695\u001b[0m, in \u001b[0;36m__getattr__\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m 1688\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mparameters\u001b[39m(\u001b[39mself\u001b[39m, recurse: \u001b[39mbool\u001b[39m \u001b[39m=\u001b[39m \u001b[39mTrue\u001b[39;00m) \u001b[39m-\u001b[39m\u001b[39m>\u001b[39m Iterator[Parameter]:\n\u001b[1;32m 1689\u001b[0m \u001b[39m \u001b[39m\u001b[39mr\u001b[39m\u001b[39m\"\"\"Returns an iterator over module parameters.\u001b[39;00m\n\u001b[1;32m 1690\u001b[0m \n\u001b[1;32m 1691\u001b[0m \u001b[39m This is typically passed to an optimizer.\u001b[39;00m\n\u001b[1;32m 1692\u001b[0m \n\u001b[1;32m 1693\u001b[0m \u001b[39m Args:\u001b[39;00m\n\u001b[1;32m 1694\u001b[0m \u001b[39m recurse (bool): if True, then yields parameters of this module\u001b[39;00m\n\u001b[0;32m-> 1695\u001b[0m \u001b[39m and all submodules. Otherwise, yields only parameters that\u001b[39;00m\n\u001b[1;32m 1696\u001b[0m \u001b[39m are direct members of this module.\u001b[39;00m\n\u001b[1;32m 1697\u001b[0m \n\u001b[1;32m 1698\u001b[0m \u001b[39m Yields:\u001b[39;00m\n\u001b[1;32m 1699\u001b[0m \u001b[39m Parameter: module parameter\u001b[39;00m\n\u001b[1;32m 1700\u001b[0m \n\u001b[1;32m 1701\u001b[0m \u001b[39m Example::\u001b[39;00m\n\u001b[1;32m 1702\u001b[0m \n\u001b[1;32m 1703\u001b[0m \u001b[39m >>> # xdoctest: +SKIP(\"undefined vars\")\u001b[39;00m\n\u001b[1;32m 1704\u001b[0m \u001b[39m >>> for param in model.parameters():\u001b[39;00m\n\u001b[1;32m 1705\u001b[0m \u001b[39m >>> print(type(param), param.size())\u001b[39;00m\n\u001b[1;32m 1706\u001b[0m \u001b[39m (20L,)\u001b[39;00m\n\u001b[1;32m 1707\u001b[0m \u001b[39m (20L, 1L, 5L, 5L)\u001b[39;00m\n\u001b[1;32m 1708\u001b[0m \n\u001b[1;32m 1709\u001b[0m \u001b[39m \"\"\"\u001b[39;00m\n\u001b[1;32m 1710\u001b[0m \u001b[39mfor\u001b[39;00m name, param \u001b[39min\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mnamed_parameters(recurse\u001b[39m=\u001b[39mrecurse):\n\u001b[1;32m 1711\u001b[0m \u001b[39myield\u001b[39;00m param\n", + "\u001b[0;31mAttributeError\u001b[0m: 'RobertaForSequenceClassification' object has no attribute 'step'" + ] + } + ], "source": [ "#Run experiment, return trained FederatedModel\n", "final_fl_model = fx.run_experiment(collaborators,{'aggregator.settings.rounds_to_train':3})" From 3cacb715548646461bd9d19bb3c814a4f098af0d Mon Sep 17 00:00:00 2001 From: porteratzo Date: Tue, 17 Oct 2023 10:57:43 -0700 Subject: [PATCH 3/7] fixes --- openfl-tutorials/Federated_PyTorch_LLM.ipynb | 680 +++---------------- 1 file changed, 105 insertions(+), 575 deletions(-) diff --git a/openfl-tutorials/Federated_PyTorch_LLM.ipynb b/openfl-tutorials/Federated_PyTorch_LLM.ipynb index ca41b3c13d..78b9978604 100644 --- a/openfl-tutorials/Federated_PyTorch_LLM.ipynb +++ b/openfl-tutorials/Federated_PyTorch_LLM.ipynb @@ -30,7 +30,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -39,9 +39,8 @@ "import openfl.native as fx\n", "import torch\n", "import torch as pt\n", - "from accelerate import Accelerator\n", "from datasets import Dataset, load_dataset, load_metric\n", - "from openfl.federated import PyTorchTaskRunner, TaskRunner\n", + "from openfl.federated import PyTorchTaskRunner\n", "from openfl.federated.task.runner_pt import change_tags\n", "from openfl.utilities import Metric, TensorKey\n", "from openfl.utilities.data_splitters import EqualNumPyDataSplitter\n", @@ -52,9 +51,9 @@ "from torch.utils.data import DataLoader\n", "from tqdm import tqdm\n", "import torch.nn as nn\n", - "\n", - "from transformers import (AutoConfig, AutoModelForSequenceClassification,\n", - " AutoTokenizer, DataCollatorWithPadding)" + "from transformers.trainer_pt_utils import get_parameter_names\n", + "from transformers import (AutoModelForSequenceClassification,\n", + " AutoTokenizer, DataCollatorWithPadding, get_scheduler)" ] }, { @@ -66,134 +65,9 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Creating Workspace Directories\n", - "Creating Workspace Templates\n", - "Collecting torch==1.13.1 (from -r /home/oamontoy/.local/workspace/requirements.txt (line 1))\n", - " Using cached torch-1.13.1-cp38-cp38-manylinux1_x86_64.whl (887.4 MB)\n", - "Requirement already satisfied: torchvision==0.14.1 in ./llama-env/lib/python3.8/site-packages (from -r /home/oamontoy/.local/workspace/requirements.txt (line 2)) (0.14.1)\n", - "Requirement already satisfied: tensorboard in ./llama-env/lib/python3.8/site-packages (from -r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (2.14.0)\n", - "Requirement already satisfied: wheel>=0.38.0 in ./llama-env/lib/python3.8/site-packages (from -r /home/oamontoy/.local/workspace/requirements.txt (line 4)) (0.41.2)\n", - "Requirement already satisfied: typing-extensions in ./llama-env/lib/python3.8/site-packages (from torch==1.13.1->-r /home/oamontoy/.local/workspace/requirements.txt (line 1)) (4.8.0)\n", - "Requirement already satisfied: nvidia-cuda-runtime-cu11==11.7.99 in ./llama-env/lib/python3.8/site-packages (from torch==1.13.1->-r /home/oamontoy/.local/workspace/requirements.txt (line 1)) (11.7.99)\n", - "Requirement already satisfied: nvidia-cudnn-cu11==8.5.0.96 in ./llama-env/lib/python3.8/site-packages (from torch==1.13.1->-r /home/oamontoy/.local/workspace/requirements.txt (line 1)) (8.5.0.96)\n", - "Requirement already satisfied: nvidia-cublas-cu11==11.10.3.66 in ./llama-env/lib/python3.8/site-packages (from torch==1.13.1->-r /home/oamontoy/.local/workspace/requirements.txt (line 1)) (11.10.3.66)\n", - "Requirement already satisfied: nvidia-cuda-nvrtc-cu11==11.7.99 in ./llama-env/lib/python3.8/site-packages (from torch==1.13.1->-r /home/oamontoy/.local/workspace/requirements.txt (line 1)) (11.7.99)\n", - "Requirement already satisfied: numpy in ./llama-env/lib/python3.8/site-packages (from torchvision==0.14.1->-r /home/oamontoy/.local/workspace/requirements.txt (line 2)) (1.24.4)\n", - "Requirement already satisfied: requests in ./llama-env/lib/python3.8/site-packages (from torchvision==0.14.1->-r /home/oamontoy/.local/workspace/requirements.txt (line 2)) (2.31.0)\n", - "Requirement already satisfied: pillow!=8.3.*,>=5.3.0 in ./llama-env/lib/python3.8/site-packages (from torchvision==0.14.1->-r /home/oamontoy/.local/workspace/requirements.txt (line 2)) (10.0.1)\n", - "Requirement already satisfied: setuptools in ./llama-env/lib/python3.8/site-packages (from nvidia-cublas-cu11==11.10.3.66->torch==1.13.1->-r /home/oamontoy/.local/workspace/requirements.txt (line 1)) (68.2.2)\n", - "Requirement already satisfied: absl-py>=0.4 in ./llama-env/lib/python3.8/site-packages (from tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (2.0.0)\n", - "Requirement already satisfied: grpcio>=1.48.2 in ./llama-env/lib/python3.8/site-packages (from tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (1.48.2)\n", - "Requirement already satisfied: google-auth<3,>=1.6.3 in ./llama-env/lib/python3.8/site-packages (from tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (2.23.0)\n", - "Requirement already satisfied: google-auth-oauthlib<1.1,>=0.5 in ./llama-env/lib/python3.8/site-packages (from tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (1.0.0)\n", - "Requirement already satisfied: markdown>=2.6.8 in ./llama-env/lib/python3.8/site-packages (from tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (3.4.4)\n", - "Requirement already satisfied: protobuf>=3.19.6 in ./llama-env/lib/python3.8/site-packages (from tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (3.19.6)\n", - "Requirement already satisfied: tensorboard-data-server<0.8.0,>=0.7.0 in ./llama-env/lib/python3.8/site-packages (from tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (0.7.1)\n", - "Requirement already satisfied: werkzeug>=1.0.1 in ./llama-env/lib/python3.8/site-packages (from tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (2.3.7)\n", - "Requirement already satisfied: cachetools<6.0,>=2.0.0 in ./llama-env/lib/python3.8/site-packages (from google-auth<3,>=1.6.3->tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (5.3.1)\n", - "Requirement already satisfied: pyasn1-modules>=0.2.1 in ./llama-env/lib/python3.8/site-packages (from google-auth<3,>=1.6.3->tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (0.3.0)\n", - "Requirement already satisfied: rsa<5,>=3.1.4 in ./llama-env/lib/python3.8/site-packages (from google-auth<3,>=1.6.3->tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (4.9)\n", - "Requirement already satisfied: urllib3<2.0 in ./llama-env/lib/python3.8/site-packages (from google-auth<3,>=1.6.3->tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (1.26.16)\n", - "Requirement already satisfied: requests-oauthlib>=0.7.0 in ./llama-env/lib/python3.8/site-packages (from google-auth-oauthlib<1.1,>=0.5->tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (1.3.1)\n", - "Requirement already satisfied: six>=1.5.2 in ./llama-env/lib/python3.8/site-packages (from grpcio>=1.48.2->tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (1.16.0)\n", - "Requirement already satisfied: importlib-metadata>=4.4 in ./llama-env/lib/python3.8/site-packages (from markdown>=2.6.8->tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (6.8.0)\n", - "Requirement already satisfied: charset-normalizer<4,>=2 in ./llama-env/lib/python3.8/site-packages (from requests->torchvision==0.14.1->-r /home/oamontoy/.local/workspace/requirements.txt (line 2)) (3.2.0)\n", - "Requirement already satisfied: idna<4,>=2.5 in ./llama-env/lib/python3.8/site-packages (from requests->torchvision==0.14.1->-r /home/oamontoy/.local/workspace/requirements.txt (line 2)) (3.4)\n", - "Requirement already satisfied: certifi>=2017.4.17 in ./llama-env/lib/python3.8/site-packages (from requests->torchvision==0.14.1->-r /home/oamontoy/.local/workspace/requirements.txt (line 2)) (2023.7.22)\n", - "Requirement already satisfied: MarkupSafe>=2.1.1 in ./llama-env/lib/python3.8/site-packages (from werkzeug>=1.0.1->tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (2.1.3)\n", - "Requirement already satisfied: zipp>=0.5 in ./llama-env/lib/python3.8/site-packages (from importlib-metadata>=4.4->markdown>=2.6.8->tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (3.17.0)\n", - "Requirement already satisfied: pyasn1<0.6.0,>=0.4.6 in ./llama-env/lib/python3.8/site-packages (from pyasn1-modules>=0.2.1->google-auth<3,>=1.6.3->tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (0.5.0)\n", - "Requirement already satisfied: oauthlib>=3.0.0 in ./llama-env/lib/python3.8/site-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<1.1,>=0.5->tensorboard->-r /home/oamontoy/.local/workspace/requirements.txt (line 3)) (3.2.2)\n", - "Installing collected packages: torch\n", - " Attempting uninstall: torch\n", - " Found existing installation: torch 2.1.0\n", - " Uninstalling torch-2.1.0:\n", - " Successfully uninstalled torch-2.1.0\n", - "Successfully installed torch-1.13.1\n", - "Successfully installed packages from /home/oamontoy/.local/workspace/requirements.txt.\n", - "\n", - "New workspace directory structure:\n", - "workspace\n", - "├── logs\n", - "│ └── cnn_mnist\n", - "│ ├── events.out.tfevents.1695850586.M50CYP2SBSTD.111429.0\n", - "│ ├── events.out.tfevents.1695942084.M50CYP2SBSTD.4924.0\n", - "│ ├── events.out.tfevents.1695849809.M50CYP2SBSTD.107313.0\n", - "│ ├── events.out.tfevents.1695850472.M50CYP2SBSTD.110437.0\n", - "│ ├── events.out.tfevents.1695942744.M50CYP2SBSTD.15635.0\n", - "│ ├── events.out.tfevents.1696008244.M50CYP2SBSTD.98097.0\n", - "│ ├── events.out.tfevents.1695850981.M50CYP2SBSTD.114740.0\n", - "│ ├── events.out.tfevents.1695939101.M50CYP2SBSTD.143673.0\n", - "│ ├── events.out.tfevents.1695850850.M50CYP2SBSTD.113094.0\n", - "│ ├── events.out.tfevents.1695850404.M50CYP2SBSTD.109391.0\n", - "│ ├── events.out.tfevents.1695942232.M50CYP2SBSTD.7126.0\n", - "│ └── events.out.tfevents.1695849986.M50CYP2SBSTD.107937.0\n", - "├── .workspace\n", - "├── final_model.pth\n", - "├── plan\n", - "│ ├── plan.yaml\n", - "│ ├── defaults\n", - "│ ├── data.yaml\n", - "│ └── cols.yaml\n", - "├── agg_to_col_two_signed_cert.zip\n", - "├── requirements.txt\n", - "├── data\n", - "├── save\n", - "│ ├── torch_cnn_mnist_best.pbuf\n", - "│ ├── torch_cnn_mnist_last.pbuf\n", - "│ └── torch_cnn_mnist_init.pbuf\n", - "├── agg_to_col_one_signed_cert.zip\n", - "├── src\n", - "│ ├── pt_cnn.py\n", - "│ ├── mnist_utils.py\n", - "│ ├── __pycache__\n", - "│ │ ├── __init__.cpython-38.pyc\n", - "│ │ └── mnist_utils.cpython-38.pyc\n", - "│ ├── ptmnist_inmemory.py\n", - "│ └── __init__.py\n", - "└── cert\n", - "\n", - "8 directories, 30 files\n", - "Setting Up Certificate Authority...\n", - "\n", - "1. Create Root CA\n", - "1.1 Create Directories\n", - "1.2 Create Database\n", - "1.3 Create CA Request and Certificate\n", - "2. Create Signing Certificate\n", - "2.1 Create Directories\n", - "2.2 Create Database\n", - "2.3 Create Signing Certificate CSR\n", - "2.4 Sign Signing Certificate CSR\n", - "3 Create Certificate Chain\n", - "\n", - "Done.\n", - "Creating AGGREGATOR certificate key pair with following settings: CN=\u001b[31mm50cyp2sbstd\u001b[0m, SAN=\u001b[31mDNS:m50cyp2sbstd\u001b[0m\n", - " Writing AGGREGATOR certificate key pair to: \u001b[32m/home/oamontoy/workspace/cert/server\u001b[0m\n", - "The CSR Hash for file \u001b[32mserver/agg_m50cyp2sbstd.csr\u001b[0m = \u001b[31md49a1328c9e8ccfb65a4d583018704fd9d24b3301bb800ceb9f50b591937e1a5f8f419238b5e4c24af732693d37ce088\u001b[0m\n", - " Signing AGGREGATOR certificate\n", - "Creating COLLABORATOR certificate key pair with following settings: CN=\u001b[31mone\u001b[0m, SAN=\u001b[31mDNS:one\u001b[0m\n", - " Moving COLLABORATOR certificate to: \u001b[32m/home/oamontoy/workspace/cert/col_one\u001b[0m\n", - "The CSR Hash for file \u001b[32mcol_one.csr\u001b[0m = \u001b[31m0caea6371d4b13f51be51507794c4c18e0a9cb408f286f2f81a4b179380b15b3215e94d739ec952065fbc7eb3b2edbba\u001b[0m\n", - " Signing COLLABORATOR certificate\n", - "\n", - "Registering \u001b[32mone\u001b[0m in \u001b[32m/home/oamontoy/.local/workspace/plan/cols.yaml\u001b[0m\n", - "Creating COLLABORATOR certificate key pair with following settings: CN=\u001b[31mtwo\u001b[0m, SAN=\u001b[31mDNS:two\u001b[0m\n", - " Moving COLLABORATOR certificate to: \u001b[32m/home/oamontoy/workspace/cert/col_two\u001b[0m\n", - "The CSR Hash for file \u001b[32mcol_two.csr\u001b[0m = \u001b[31m3e6ffe3d25d39bb6f3f1fb851eb8da60d4cbf4e0bee78ad0f7731cc0e6bb47433830523f2c39dc0ca7f0ce79b69cc6c3\u001b[0m\n", - " Signing COLLABORATOR certificate\n", - "\n", - "Registering \u001b[32mtwo\u001b[0m in \u001b[32m/home/oamontoy/.local/workspace/plan/cols.yaml\u001b[0m\n" - ] - } - ], + "outputs": [], "source": [ "#Setup default workspace, logging, etc.\n", "fx.init('torch_cnn_mnist')" @@ -215,7 +89,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -242,7 +116,7 @@ " data_collator = DataCollatorWithPadding(tokenizer=tokenizer, padding=\"longest\")\n", " return data_collator, tokenized_datasets\n", "\n", - "base_model_name = \"roberta-large\"\n", + "base_model_name = \"roberta-base\"\n", "padding_side = \"right\"\n", "tokenizer = AutoTokenizer.from_pretrained(base_model_name, padding_side=padding_side)\n", "if getattr(tokenizer, \"pad_token_id\") is None:\n", @@ -259,7 +133,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -287,13 +161,13 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ "class GlueMrpcFederatedDataset(DataLoader):\n", " def __init__(self, train_set, valid_set, batch_size, data_collator=None):\n", - " self.data_splitter = EqualNumPyDataSplitter()\n", + " self.data_splitter = EqualNumPyDataSplitter(shuffle=True)\n", " if isinstance(train_set,Dataset):\n", " self.train_set = GlueMrpc.from_dict(train_set.to_dict())\n", " else:\n", @@ -326,7 +200,7 @@ " return DataLoader(self.train_set, batch_size=self.batch_size, collate_fn=data_collator)\n", " \n", " def get_valid_loader(self):\n", - " return DataLoader(self.valid_set, collate_fn=data_collator)\n", + " return DataLoader(self.valid_set, batch_size=self.batch_size, collate_fn=data_collator)\n", " \n", " def get_train_data_size(self):\n", " return len(self.train_set)\n", @@ -334,7 +208,8 @@ " def get_valid_data_size(self):\n", " return len(self.valid_set)\n", " \n", - "fl_data = GlueMrpcFederatedDataset(train_set, valid_set, batch_size=32)" + "fl_data = GlueMrpcFederatedDataset(train_set, valid_set, batch_size=32)\n", + "metric = load_metric('glue', \"mrpc\")" ] }, { @@ -346,52 +221,82 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ "class LLMTaskRunner(PyTorchTaskRunner):\n", - " def __init__(self, base_model_name, data_loader, device=None, metric=None, **kwargs):\n", - " kwargs['data_loader'] = data_loader\n", + " def __init__(\n", + " self, base_model_name, data_loader, device=None, metric=None, **kwargs\n", + " ):\n", + " kwargs[\"data_loader\"] = data_loader\n", " super().__init__(device, **kwargs)\n", " self.base_model_name = base_model_name\n", " self.metric = metric\n", " self._init_model()\n", " self._init_optimizer()\n", - " \n", + " self.save_models = []\n", + "\n", " def _init_model(self):\n", " model = AutoModelForSequenceClassification.from_pretrained(\n", - " self.base_model_name, return_dict=True)\n", - " peft_config = LoraConfig(task_type=TaskType.SEQ_CLS, inference_mode=False, r=16, lora_alpha=16, lora_dropout=0.1, bias=\"all\")\n", + " self.base_model_name, return_dict=True\n", + " )\n", + " peft_config = LoraConfig(\n", + " task_type=TaskType.SEQ_CLS,\n", + " inference_mode=False,\n", + " r=16,\n", + " lora_alpha=16,\n", + " lora_dropout=0.1,\n", + " bias=\"lora_only\",\n", + " )\n", " self.model = get_peft_model(model, peft_config)\n", - " \n", + "\n", " def _init_optimizer(self):\n", - " no_decay = [\"bias\", \"LayerNorm.weight\"]\n", + " ALL_LAYERNORM_LAYERS = [nn.LayerNorm]\n", + " decay_parameters = get_parameter_names(self.model, ALL_LAYERNORM_LAYERS)\n", + " decay_parameters = [name for name in decay_parameters if \"bias\" not in name]\n", + "\n", " optimizer_grouped_parameters = [\n", - " {\n", - " \"params\": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],\n", - " \"weight_decay\": 0.01,\n", - " },\n", - " {\n", - " \"params\": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],\n", - " \"weight_decay\": 0.0,\n", - " },\n", - " ]\n", - " self.optimizer = AdamW(optimizer_grouped_parameters, lr=0.01)\n", - " \n", + " {\n", + " \"params\": [\n", + " p\n", + " for n, p in self.model.named_parameters()\n", + " if (n in decay_parameters and p.requires_grad)\n", + " ],\n", + " \"weight_decay\": 0.01,\n", + " },\n", + " {\n", + " \"params\": [\n", + " p\n", + " for n, p in self.model.named_parameters()\n", + " if (n not in decay_parameters and p.requires_grad)\n", + " ],\n", + " \"weight_decay\": 0.0,\n", + " },\n", + " ]\n", + " self.optimizer = AdamW(optimizer_grouped_parameters, lr=0.001)\n", + " self.lr_scheduler = get_scheduler(\n", + " name=\"linear\",\n", + " optimizer=self.optimizer,\n", + " num_warmup_steps=0,\n", + " num_training_steps=len(self.data_loader.train_set) * 5,\n", + " )\n", + "\n", " self.training_round_completed = False\n", " self.initialize_tensorkeys_for_functions()\n", - " \n", + "\n", + " def train(self):\n", + " return self.model.train()\n", + "\n", " def state_dict(self):\n", " return get_peft_model_state_dict(self.model)\n", - " \n", + "\n", " def load_state_dict(self, state_dict: Mapping[str, Any], strict: bool = True):\n", - " return set_peft_model_state_dict(\n", - " self.model, state_dict\n", - " )\n", - " \n", - " def validate(self, col_name, round_num, input_tensor_dict,\n", - " use_tqdm=False, **kwargs):\n", + " return set_peft_model_state_dict(self.model, state_dict)\n", + "\n", + " def validate(\n", + " self, col_name, round_num, input_tensor_dict, use_tqdm=False, **kwargs\n", + " ):\n", " \"\"\"Validate.\n", "\n", " Run validation of the model on the local data.\n", @@ -407,40 +312,42 @@ " local_output_dict: Tensors to maintain in the local TensorDB\n", "\n", " \"\"\"\n", + " self.save_models.append(input_tensor_dict.copy())\n", " self.rebuild_model(round_num, input_tensor_dict, validation=True)\n", " self.model.eval()\n", + " \n", + "\n", " self.model.to(self.device)\n", " val_score = 0\n", " total_samples = 0\n", "\n", " loader = self.data_loader.get_valid_loader()\n", " if use_tqdm:\n", - " loader = tqdm(loader, desc='validate')\n", + " loader = tqdm(loader, desc=\"validate\")\n", "\n", " with pt.no_grad():\n", " for sample in loader:\n", - " samples = sample['input_ids'].shape[0]\n", + " samples = sample[\"input_ids\"].shape[0]\n", " total_samples += samples\n", " output = self.model(**sample)\n", " # get the index of the max log-probability\n", " logits = output.logits\n", " predictions = torch.argmax(logits, dim=-1)\n", - " metric.add_batch(predictions=predictions, references=sample['labels'])\n", - " val_score = metric.compute()['accuracy']\n", + " metric.add_batch(predictions=predictions, references=sample[\"labels\"])\n", + " val_score = metric.compute()[\"accuracy\"]\n", "\n", " origin = col_name\n", - " suffix = 'validate'\n", - " if kwargs['apply'] == 'local':\n", - " suffix += '_local'\n", + " suffix = \"validate\"\n", + " if kwargs[\"apply\"] == \"local\":\n", + " suffix += \"_local\"\n", " else:\n", - " suffix += '_agg'\n", - " tags = ('metric',)\n", + " suffix += \"_agg\"\n", + " tags = (\"metric\",)\n", " tags = change_tags(tags, add_field=suffix)\n", " # TODO figure out a better way to pass in metric for this pytorch\n", " # validate function\n", " output_tensor_dict = {\n", - " TensorKey('acc', origin, round_num, True, tags):\n", - " np.array(val_score)\n", + " TensorKey(\"acc\", origin, round_num, True, tags): np.array(val_score)\n", " }\n", "\n", " # Empty list represents metrics that should only be stored locally\n", @@ -459,12 +366,13 @@ " \"\"\"\n", " losses = []\n", " for sample in batch_generator:\n", - " self.optimizer.zero_grad()\n", + " self.model.zero_grad()\n", " output = self.model(**sample)\n", " loss = output.loss\n", " loss.backward()\n", - " torch.nn.utils.clip_grad_norm_(self.model.parameters(),1.0)\n", - " self.model.zero_grad()\n", + " torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)\n", + " self.optimizer.step()\n", + " self.lr_scheduler.step()\n", " losses.append(loss.detach().cpu().numpy())\n", " loss = np.mean(losses)\n", " if self.model.config.problem_type == \"regression\":\n", @@ -474,10 +382,14 @@ " elif self.model.config.problem_type == \"multi_label_classification\":\n", " loss_fct = BCEWithLogitsLoss()\n", " return Metric(name=loss_fct._get_name(), value=np.array(loss))\n", - " \n", - " \n", - " def save_native(self, filepath, model_state_dict_key='model_state_dict',\n", - " optimizer_state_dict_key='optimizer_state_dict', **kwargs):\n", + "\n", + " def save_native(\n", + " self,\n", + " filepath,\n", + " model_state_dict_key=\"model_state_dict\",\n", + " optimizer_state_dict_key=\"optimizer_state_dict\",\n", + " **kwargs,\n", + " ):\n", " \"\"\"\n", " Save model and optimizer states in a picked file specified by the \\\n", " filepath. model_/optimizer_state_dicts are stored in the keys provided. \\\n", @@ -497,64 +409,18 @@ " \"\"\"\n", " pickle_dict = {\n", " model_state_dict_key: get_peft_model_state_dict(self.model),\n", - " optimizer_state_dict_key: self.optimizer.state_dict()\n", + " optimizer_state_dict_key: self.optimizer.state_dict(),\n", " }\n", " pt.save(pickle_dict, filepath)" ] }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/tmp/ipykernel_159004/1723172838.py:2: FutureWarning: load_metric is deprecated and will be removed in the next major version of datasets. Use 'evaluate.load' instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate\n", - " metric = load_metric('glue', \"mrpc\")\n", - "Some weights of RobertaForSequenceClassification were not initialized from the model checkpoint at roberta-large and are newly initialized: ['classifier.dense.bias', 'classifier.out_proj.bias', 'classifier.out_proj.weight', 'classifier.dense.weight']\n", - "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n" - ] - }, - { - "data": { - "text/html": [ - "
[08:48:31] WARNING  tried to remove tensor: __opt_state_needed not present in the tensor dict                                                       utils.py:172\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[2;36m[08:48:31]\u001b[0m\u001b[2;36m \u001b[0m\u001b[31mWARNING \u001b[0m tried to remove tensor: __opt_state_needed not present in the tensor dict \u001b]8;id=932122;file:///home/oamontoy/workspace/sec-openfl/openfl/utilities/utils.py\u001b\\\u001b[2mutils.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=685149;file:///home/oamontoy/workspace/sec-openfl/openfl/utilities/utils.py#172\u001b\\\u001b[2m172\u001b[0m\u001b]8;;\u001b\\\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Some weights of RobertaForSequenceClassification were not initialized from the model checkpoint at roberta-large and are newly initialized: ['classifier.dense.bias', 'classifier.out_proj.bias', 'classifier.out_proj.weight', 'classifier.dense.weight']\n", - "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n" - ] - }, - { - "data": { - "text/html": [ - "
[08:48:34] WARNING  tried to remove tensor: __opt_state_needed not present in the tensor dict                                                       utils.py:172\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[2;36m[08:48:34]\u001b[0m\u001b[2;36m \u001b[0m\u001b[31mWARNING \u001b[0m tried to remove tensor: __opt_state_needed not present in the tensor dict \u001b]8;id=38894;file:///home/oamontoy/workspace/sec-openfl/openfl/utilities/utils.py\u001b\\\u001b[2mutils.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=682120;file:///home/oamontoy/workspace/sec-openfl/openfl/utilities/utils.py#172\u001b\\\u001b[2m172\u001b[0m\u001b]8;;\u001b\\\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "num_collaborators = 2\n", - "metric = load_metric('glue', \"mrpc\")\n", "collaborator_models = [\n", " LLMTaskRunner(\n", " base_model_name,\n", @@ -567,25 +433,9 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Original training data size: 3668\n", - "Original validation data size: 1725\n", - "\n", - "Collaborator 0's training data size: 1834\n", - "Collaborator 0's validation data size: 863\n", - "\n", - "Collaborator 1's training data size: 1834\n", - "Collaborator 1's validation data size: 862\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "#Original TinyImageNet dataset\n", "print(f'Original training data size: {len(fl_data.train_set)}')\n", @@ -603,332 +453,12 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
           INFO     Updating aggregator.settings.rounds_to_train to 3...                                                                           native.py:102\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Updating aggregator.settings.rounds_to_train to \u001b[1;36m3\u001b[0m\u001b[33m...\u001b[0m \u001b]8;id=22181;file:///home/oamontoy/workspace/sec-openfl/openfl/native/native.py\u001b\\\u001b[2mnative.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=265323;file:///home/oamontoy/workspace/sec-openfl/openfl/native/native.py#102\u001b\\\u001b[2m102\u001b[0m\u001b]8;;\u001b\\\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
           INFO     FL-Plan hash is 86b08340e96ba9e485169da1f860ea968811d1bf2e6867774fae4398426dd33c6ae56ca202002d393e3a4d91f946c1bc                 plan.py:235\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m FL-Plan hash is \u001b[34m86b08340e96ba9e485169da1f860ea968811d1bf2e6867774fae4398426dd33c6ae56ca202002d393e3a4d91f946c1bc\u001b[0m \u001b]8;id=965130;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/plan/plan.py\u001b\\\u001b[2mplan.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=969631;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/plan/plan.py#235\u001b\\\u001b[2m235\u001b[0m\u001b]8;;\u001b\\\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
           INFO     Building 🡆 Object NoCompressionPipeline from openfl.pipelines Module.                                                            plan.py:171\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Building \u001b[31m🡆\u001b[0m Object \u001b[31mNoCompressionPipeline\u001b[0m from \u001b[31mopenfl.pipelines\u001b[0m Module. \u001b]8;id=418157;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/plan/plan.py\u001b\\\u001b[2mplan.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=469081;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/plan/plan.py#171\u001b\\\u001b[2m171\u001b[0m\u001b]8;;\u001b\\\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
           INFO     Creating Initial Weights File    🠆 save/torch_cnn_mnist_init.pbuf                                                              native.py:277\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Creating Initial Weights File 🠆 save/torch_cnn_mnist_init.pbuf \u001b]8;id=949808;file:///home/oamontoy/workspace/sec-openfl/openfl/native/native.py\u001b\\\u001b[2mnative.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=806002;file:///home/oamontoy/workspace/sec-openfl/openfl/native/native.py#277\u001b\\\u001b[2m277\u001b[0m\u001b]8;;\u001b\\\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
           INFO     Starting Experiment...                                                                                                         native.py:281\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Starting Experiment\u001b[33m...\u001b[0m \u001b]8;id=768304;file:///home/oamontoy/workspace/sec-openfl/openfl/native/native.py\u001b\\\u001b[2mnative.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=820559;file:///home/oamontoy/workspace/sec-openfl/openfl/native/native.py#281\u001b\\\u001b[2m281\u001b[0m\u001b]8;;\u001b\\\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
           INFO     Building 🡆 Object RandomGroupedAssigner from openfl.component Module.                                                            plan.py:171\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Building \u001b[31m🡆\u001b[0m Object \u001b[31mRandomGroupedAssigner\u001b[0m from \u001b[31mopenfl.component\u001b[0m Module. \u001b]8;id=959697;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/plan/plan.py\u001b\\\u001b[2mplan.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=588178;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/plan/plan.py#171\u001b\\\u001b[2m171\u001b[0m\u001b]8;;\u001b\\\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
           INFO     Building 🡆 Object CutoffTimeBasedStragglerHandling from openfl.component.straggler_handling_functions Module.                    plan.py:171\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Building \u001b[31m🡆\u001b[0m Object \u001b[31mCutoffTimeBasedStragglerHandling\u001b[0m from \u001b[31mopenfl.component.straggler_handling_functions\u001b[0m Module. \u001b]8;id=199471;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/plan/plan.py\u001b\\\u001b[2mplan.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=897448;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/plan/plan.py#171\u001b\\\u001b[2m171\u001b[0m\u001b]8;;\u001b\\\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
           INFO     Importing 🡆 Object write_metric from src.mnist_utils Module.                                                                     plan.py:199\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Importing \u001b[31m🡆\u001b[0m Object \u001b[31mwrite_metric\u001b[0m from \u001b[31msrc.mnist_utils\u001b[0m Module. \u001b]8;id=125418;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/plan/plan.py\u001b\\\u001b[2mplan.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=640295;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/plan/plan.py#199\u001b\\\u001b[2m199\u001b[0m\u001b]8;;\u001b\\\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/oamontoy/workspace/llama-env/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: libtorch_cuda_cu.so: cannot open shared object file: No such file or directory\n", - " warn(f\"Failed to load image Python extension: {e}\")\n" - ] - }, - { - "data": { - "text/html": [ - "
[08:48:35] INFO     Building 🡆 Object Aggregator from openfl.component Module.                                                                       plan.py:171\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[2;36m[08:48:35]\u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Building \u001b[31m🡆\u001b[0m Object \u001b[31mAggregator\u001b[0m from \u001b[31mopenfl.component\u001b[0m Module. \u001b]8;id=299428;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/plan/plan.py\u001b\\\u001b[2mplan.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=109248;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/plan/plan.py#171\u001b\\\u001b[2m171\u001b[0m\u001b]8;;\u001b\\\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
           INFO     Using custom log metric: <function write_metric at 0x7f5c68ad28b0>                                                          aggregator.py:97\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Using custom log metric: \u001b[1m<\u001b[0m\u001b[1;95mfunction\u001b[0m\u001b[39m write_metric at \u001b[0m\u001b[1;36m0x7f5c68ad28b0\u001b[0m\u001b[1m>\u001b[0m \u001b]8;id=655419;file:///home/oamontoy/workspace/sec-openfl/openfl/component/aggregator/aggregator.py\u001b\\\u001b[2maggregator.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=161984;file:///home/oamontoy/workspace/sec-openfl/openfl/component/aggregator/aggregator.py#97\u001b\\\u001b[2m97\u001b[0m\u001b]8;;\u001b\\\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
           INFO     Building 🡆 Object Collaborator from openfl.component Module.                                                                     plan.py:171\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Building \u001b[31m🡆\u001b[0m Object \u001b[31mCollaborator\u001b[0m from \u001b[31mopenfl.component\u001b[0m Module. \u001b]8;id=390053;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/plan/plan.py\u001b\\\u001b[2mplan.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=853120;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/plan/plan.py#171\u001b\\\u001b[2m171\u001b[0m\u001b]8;;\u001b\\\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
           INFO     Building 🡆 Object Collaborator from openfl.component Module.                                                                     plan.py:171\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Building \u001b[31m🡆\u001b[0m Object \u001b[31mCollaborator\u001b[0m from \u001b[31mopenfl.component\u001b[0m Module. \u001b]8;id=612439;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/plan/plan.py\u001b\\\u001b[2mplan.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=601521;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/plan/plan.py#171\u001b\\\u001b[2m171\u001b[0m\u001b]8;;\u001b\\\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
           INFO     Waiting for tasks...                                                                                                     collaborator.py:178\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Waiting for tasks\u001b[33m...\u001b[0m \u001b]8;id=806029;file:///home/oamontoy/workspace/sec-openfl/openfl/component/collaborator/collaborator.py\u001b\\\u001b[2mcollaborator.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=928412;file:///home/oamontoy/workspace/sec-openfl/openfl/component/collaborator/collaborator.py#178\u001b\\\u001b[2m178\u001b[0m\u001b]8;;\u001b\\\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
           INFO     Sending tasks to collaborator one for round 0                                                                              aggregator.py:329\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Sending tasks to collaborator one for round \u001b[1;36m0\u001b[0m \u001b]8;id=515202;file:///home/oamontoy/workspace/sec-openfl/openfl/component/aggregator/aggregator.py\u001b\\\u001b[2maggregator.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=103453;file:///home/oamontoy/workspace/sec-openfl/openfl/component/aggregator/aggregator.py#329\u001b\\\u001b[2m329\u001b[0m\u001b]8;;\u001b\\\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
           INFO     Received the following tasks: ['aggregated_model_validation', 'train', 'locally_tuned_model_validation']                 collaborator.py:168\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Received the following tasks: \u001b[1m[\u001b[0m\u001b[32m'aggregated_model_validation'\u001b[0m, \u001b[32m'train'\u001b[0m, \u001b[32m'locally_tuned_model_validation'\u001b[0m\u001b[1m]\u001b[0m \u001b]8;id=266582;file:///home/oamontoy/workspace/sec-openfl/openfl/component/collaborator/collaborator.py\u001b\\\u001b[2mcollaborator.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=901781;file:///home/oamontoy/workspace/sec-openfl/openfl/component/collaborator/collaborator.py#168\u001b\\\u001b[2m168\u001b[0m\u001b]8;;\u001b\\\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
[08:48:36] INFO     Using TaskRunner subclassing API                                                                                         collaborator.py:253\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[2;36m[08:48:36]\u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Using TaskRunner subclassing API \u001b]8;id=457102;file:///home/oamontoy/workspace/sec-openfl/openfl/component/collaborator/collaborator.py\u001b\\\u001b[2mcollaborator.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=223379;file:///home/oamontoy/workspace/sec-openfl/openfl/component/collaborator/collaborator.py#253\u001b\\\u001b[2m253\u001b[0m\u001b]8;;\u001b\\\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/oamontoy/workspace/sec-openfl/openfl/federated/task/runner_pt.py:284: UserWarning: The given NumPy array is not writable, and PyTorch does not support non-writable tensors. This means writing to this tensor will result in undefined behavior. You may want to copy the array to protect its data or make it writable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at ../torch/csrc/utils/tensor_numpy.cpp:206.)\n", - " new_state[k] = pt.from_numpy(tensor_dict.pop(k)).to(device)\n", - "You're using a RobertaTokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding.\n" - ] - }, - { - "data": { - "text/html": [ - "
[08:49:31] METRIC   Round 0, collaborator one is sending metric for task aggregated_model_validation: acc   0.673233                         collaborator.py:415\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[2;36m[08:49:31]\u001b[0m\u001b[2;36m \u001b[0mMETRIC Round \u001b[1;36m0\u001b[0m, collaborator one is sending metric for task aggregated_model_validation: acc \u001b[1;36m0.673233\u001b[0m \u001b]8;id=512008;file:///home/oamontoy/workspace/sec-openfl/openfl/component/collaborator/collaborator.py\u001b\\\u001b[2mcollaborator.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=179476;file:///home/oamontoy/workspace/sec-openfl/openfl/component/collaborator/collaborator.py#415\u001b\\\u001b[2m415\u001b[0m\u001b]8;;\u001b\\\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
           INFO     Collaborator one is sending task results for aggregated_model_validation, round 0                                          aggregator.py:520\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Collaborator one is sending task results for aggregated_model_validation, round \u001b[1;36m0\u001b[0m \u001b]8;id=921737;file:///home/oamontoy/workspace/sec-openfl/openfl/component/aggregator/aggregator.py\u001b\\\u001b[2maggregator.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=943670;file:///home/oamontoy/workspace/sec-openfl/openfl/component/aggregator/aggregator.py#520\u001b\\\u001b[2m520\u001b[0m\u001b]8;;\u001b\\\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
           METRIC   Round 0, collaborator validate_agg aggregated_model_validation result acc:      0.673233                                   aggregator.py:559\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0mMETRIC Round \u001b[1;36m0\u001b[0m, collaborator validate_agg aggregated_model_validation result acc: \u001b[1;36m0.673233\u001b[0m \u001b]8;id=778930;file:///home/oamontoy/workspace/sec-openfl/openfl/component/aggregator/aggregator.py\u001b\\\u001b[2maggregator.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=605180;file:///home/oamontoy/workspace/sec-openfl/openfl/component/aggregator/aggregator.py#559\u001b\\\u001b[2m559\u001b[0m\u001b]8;;\u001b\\\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
           INFO     Using TaskRunner subclassing API                                                                                         collaborator.py:253\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Using TaskRunner subclassing API \u001b]8;id=239383;file:///home/oamontoy/workspace/sec-openfl/openfl/component/collaborator/collaborator.py\u001b\\\u001b[2mcollaborator.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=814426;file:///home/oamontoy/workspace/sec-openfl/openfl/component/collaborator/collaborator.py#253\u001b\\\u001b[2m253\u001b[0m\u001b]8;;\u001b\\\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
           INFO     Run 0 epoch of 0 round                                                                                                      runner_pt.py:155\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Run \u001b[1;36m0\u001b[0m epoch of \u001b[1;36m0\u001b[0m round \u001b]8;id=118270;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/task/runner_pt.py\u001b\\\u001b[2mrunner_pt.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=848215;file:///home/oamontoy/workspace/sec-openfl/openfl/federated/task/runner_pt.py#155\u001b\\\u001b[2m155\u001b[0m\u001b]8;;\u001b\\\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "ename": "AttributeError", - "evalue": "'RobertaForSequenceClassification' object has no attribute 'step'", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", - "File \u001b[0;32m~/workspace/llama-env/lib/python3.8/site-packages/peft/peft_model.py:434\u001b[0m, in \u001b[0;36mPeftModel.__getattr__\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m 433\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[0;32m--> 434\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39msuper\u001b[39;49m()\u001b[39m.\u001b[39;49m\u001b[39m__getattr__\u001b[39;49m(name) \u001b[39m# defer to nn.Module's logic\u001b[39;00m\n\u001b[1;32m 435\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mAttributeError\u001b[39;00m:\n", - "File \u001b[0;32m~/workspace/llama-env/lib/python3.8/site-packages/torch/nn/modules/module.py:1695\u001b[0m, in \u001b[0;36m__getattr__\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m 1688\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mparameters\u001b[39m(\u001b[39mself\u001b[39m, recurse: \u001b[39mbool\u001b[39m \u001b[39m=\u001b[39m \u001b[39mTrue\u001b[39;00m) \u001b[39m-\u001b[39m\u001b[39m>\u001b[39m Iterator[Parameter]:\n\u001b[1;32m 1689\u001b[0m \u001b[39m \u001b[39m\u001b[39mr\u001b[39m\u001b[39m\"\"\"Returns an iterator over module parameters.\u001b[39;00m\n\u001b[1;32m 1690\u001b[0m \n\u001b[1;32m 1691\u001b[0m \u001b[39m This is typically passed to an optimizer.\u001b[39;00m\n\u001b[1;32m 1692\u001b[0m \n\u001b[1;32m 1693\u001b[0m \u001b[39m Args:\u001b[39;00m\n\u001b[1;32m 1694\u001b[0m \u001b[39m recurse (bool): if True, then yields parameters of this module\u001b[39;00m\n\u001b[0;32m-> 1695\u001b[0m \u001b[39m and all submodules. Otherwise, yields only parameters that\u001b[39;00m\n\u001b[1;32m 1696\u001b[0m \u001b[39m are direct members of this module.\u001b[39;00m\n\u001b[1;32m 1697\u001b[0m \n\u001b[1;32m 1698\u001b[0m \u001b[39m Yields:\u001b[39;00m\n\u001b[1;32m 1699\u001b[0m \u001b[39m Parameter: module parameter\u001b[39;00m\n\u001b[1;32m 1700\u001b[0m \n\u001b[1;32m 1701\u001b[0m \u001b[39m Example::\u001b[39;00m\n\u001b[1;32m 1702\u001b[0m \n\u001b[1;32m 1703\u001b[0m \u001b[39m >>> # xdoctest: +SKIP(\"undefined vars\")\u001b[39;00m\n\u001b[1;32m 1704\u001b[0m \u001b[39m >>> for param in model.parameters():\u001b[39;00m\n\u001b[1;32m 1705\u001b[0m \u001b[39m >>> print(type(param), param.size())\u001b[39;00m\n\u001b[1;32m 1706\u001b[0m \u001b[39m (20L,)\u001b[39;00m\n\u001b[1;32m 1707\u001b[0m \u001b[39m (20L, 1L, 5L, 5L)\u001b[39;00m\n\u001b[1;32m 1708\u001b[0m \n\u001b[1;32m 1709\u001b[0m \u001b[39m \"\"\"\u001b[39;00m\n\u001b[1;32m 1710\u001b[0m \u001b[39mfor\u001b[39;00m name, param \u001b[39min\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mnamed_parameters(recurse\u001b[39m=\u001b[39mrecurse):\n", - "\u001b[0;31mAttributeError\u001b[0m: 'PeftModelForSequenceClassification' object has no attribute 'step'", - "\nDuring handling of the above exception, another exception occurred:\n", - "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", - "File \u001b[0;32m~/workspace/llama-env/lib/python3.8/site-packages/peft/tuners/lora.py:492\u001b[0m, in \u001b[0;36mLoraModel.__getattr__\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m 491\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[0;32m--> 492\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39msuper\u001b[39;49m()\u001b[39m.\u001b[39;49m\u001b[39m__getattr__\u001b[39;49m(name) \u001b[39m# defer to nn.Module's logic\u001b[39;00m\n\u001b[1;32m 493\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mAttributeError\u001b[39;00m:\n", - "File \u001b[0;32m~/workspace/llama-env/lib/python3.8/site-packages/torch/nn/modules/module.py:1695\u001b[0m, in \u001b[0;36m__getattr__\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m 1688\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mparameters\u001b[39m(\u001b[39mself\u001b[39m, recurse: \u001b[39mbool\u001b[39m \u001b[39m=\u001b[39m \u001b[39mTrue\u001b[39;00m) \u001b[39m-\u001b[39m\u001b[39m>\u001b[39m Iterator[Parameter]:\n\u001b[1;32m 1689\u001b[0m \u001b[39m \u001b[39m\u001b[39mr\u001b[39m\u001b[39m\"\"\"Returns an iterator over module parameters.\u001b[39;00m\n\u001b[1;32m 1690\u001b[0m \n\u001b[1;32m 1691\u001b[0m \u001b[39m This is typically passed to an optimizer.\u001b[39;00m\n\u001b[1;32m 1692\u001b[0m \n\u001b[1;32m 1693\u001b[0m \u001b[39m Args:\u001b[39;00m\n\u001b[1;32m 1694\u001b[0m \u001b[39m recurse (bool): if True, then yields parameters of this module\u001b[39;00m\n\u001b[0;32m-> 1695\u001b[0m \u001b[39m and all submodules. Otherwise, yields only parameters that\u001b[39;00m\n\u001b[1;32m 1696\u001b[0m \u001b[39m are direct members of this module.\u001b[39;00m\n\u001b[1;32m 1697\u001b[0m \n\u001b[1;32m 1698\u001b[0m \u001b[39m Yields:\u001b[39;00m\n\u001b[1;32m 1699\u001b[0m \u001b[39m Parameter: module parameter\u001b[39;00m\n\u001b[1;32m 1700\u001b[0m \n\u001b[1;32m 1701\u001b[0m \u001b[39m Example::\u001b[39;00m\n\u001b[1;32m 1702\u001b[0m \n\u001b[1;32m 1703\u001b[0m \u001b[39m >>> # xdoctest: +SKIP(\"undefined vars\")\u001b[39;00m\n\u001b[1;32m 1704\u001b[0m \u001b[39m >>> for param in model.parameters():\u001b[39;00m\n\u001b[1;32m 1705\u001b[0m \u001b[39m >>> print(type(param), param.size())\u001b[39;00m\n\u001b[1;32m 1706\u001b[0m \u001b[39m (20L,)\u001b[39;00m\n\u001b[1;32m 1707\u001b[0m \u001b[39m (20L, 1L, 5L, 5L)\u001b[39;00m\n\u001b[1;32m 1708\u001b[0m \n\u001b[1;32m 1709\u001b[0m \u001b[39m \"\"\"\u001b[39;00m\n\u001b[1;32m 1710\u001b[0m \u001b[39mfor\u001b[39;00m name, param \u001b[39min\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mnamed_parameters(recurse\u001b[39m=\u001b[39mrecurse):\n", - "\u001b[0;31mAttributeError\u001b[0m: 'LoraModel' object has no attribute 'step'", - "\nDuring handling of the above exception, another exception occurred:\n", - "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", - "\u001b[1;32m/home/oamontoy/workspace/sec-openfl/openfl-tutorials/Federated_PyTorch_LLM.ipynb Cell 18\u001b[0m line \u001b[0;36m2\n\u001b[1;32m 1\u001b[0m \u001b[39m#Run experiment, return trained FederatedModel\u001b[39;00m\n\u001b[0;32m----> 2\u001b[0m final_fl_model \u001b[39m=\u001b[39m fx\u001b[39m.\u001b[39;49mrun_experiment(collaborators,{\u001b[39m'\u001b[39;49m\u001b[39maggregator.settings.rounds_to_train\u001b[39;49m\u001b[39m'\u001b[39;49m:\u001b[39m3\u001b[39;49m})\n", - "File \u001b[0;32m~/workspace/sec-openfl/openfl/native/native.py:295\u001b[0m, in \u001b[0;36mrun_experiment\u001b[0;34m(collaborator_dict, override_config)\u001b[0m\n\u001b[1;32m 293\u001b[0m \u001b[39mfor\u001b[39;00m col \u001b[39min\u001b[39;00m plan\u001b[39m.\u001b[39mauthorized_cols:\n\u001b[1;32m 294\u001b[0m collaborator \u001b[39m=\u001b[39m collaborators[col]\n\u001b[0;32m--> 295\u001b[0m collaborator\u001b[39m.\u001b[39;49mrun_simulation()\n\u001b[1;32m 297\u001b[0m \u001b[39m# Set the weights for the final model\u001b[39;00m\n\u001b[1;32m 298\u001b[0m model\u001b[39m.\u001b[39mrebuild_model(\n\u001b[1;32m 299\u001b[0m rounds_to_train \u001b[39m-\u001b[39m \u001b[39m1\u001b[39m, aggregator\u001b[39m.\u001b[39mlast_tensor_dict, validation\u001b[39m=\u001b[39m\u001b[39mTrue\u001b[39;00m)\n", - "File \u001b[0;32m~/workspace/sec-openfl/openfl/component/collaborator/collaborator.py:170\u001b[0m, in \u001b[0;36mCollaborator.run_simulation\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 168\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mlogger\u001b[39m.\u001b[39minfo(\u001b[39mf\u001b[39m\u001b[39m'\u001b[39m\u001b[39mReceived the following tasks: \u001b[39m\u001b[39m{\u001b[39;00mtasks\u001b[39m}\u001b[39;00m\u001b[39m'\u001b[39m)\n\u001b[1;32m 169\u001b[0m \u001b[39mfor\u001b[39;00m task \u001b[39min\u001b[39;00m tasks:\n\u001b[0;32m--> 170\u001b[0m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mdo_task(task, round_number)\n\u001b[1;32m 171\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mlogger\u001b[39m.\u001b[39minfo(\u001b[39mf\u001b[39m\u001b[39m'\u001b[39m\u001b[39mAll tasks completed on \u001b[39m\u001b[39m{\u001b[39;00m\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mcollaborator_name\u001b[39m}\u001b[39;00m\u001b[39m \u001b[39m\u001b[39m'\u001b[39m\n\u001b[1;32m 172\u001b[0m \u001b[39mf\u001b[39m\u001b[39m'\u001b[39m\u001b[39mfor round \u001b[39m\u001b[39m{\u001b[39;00mround_number\u001b[39m}\u001b[39;00m\u001b[39m...\u001b[39m\u001b[39m'\u001b[39m)\n\u001b[1;32m 173\u001b[0m \u001b[39mbreak\u001b[39;00m\n", - "File \u001b[0;32m~/workspace/sec-openfl/openfl/component/collaborator/collaborator.py:255\u001b[0m, in \u001b[0;36mCollaborator.do_task\u001b[0;34m(self, task, round_number)\u001b[0m\n\u001b[1;32m 252\u001b[0m func \u001b[39m=\u001b[39m \u001b[39mgetattr\u001b[39m(\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mtask_runner, func_name)\n\u001b[1;32m 253\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mlogger\u001b[39m.\u001b[39minfo(\u001b[39m'\u001b[39m\u001b[39mUsing TaskRunner subclassing API\u001b[39m\u001b[39m'\u001b[39m)\n\u001b[0;32m--> 255\u001b[0m global_output_tensor_dict, local_output_tensor_dict \u001b[39m=\u001b[39m func(\n\u001b[1;32m 256\u001b[0m col_name\u001b[39m=\u001b[39;49m\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mcollaborator_name,\n\u001b[1;32m 257\u001b[0m round_num\u001b[39m=\u001b[39;49mround_number,\n\u001b[1;32m 258\u001b[0m input_tensor_dict\u001b[39m=\u001b[39;49minput_tensor_dict,\n\u001b[1;32m 259\u001b[0m \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n\u001b[1;32m 261\u001b[0m \u001b[39m# Save global and local output_tensor_dicts to TensorDB\u001b[39;00m\n\u001b[1;32m 262\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mtensor_db\u001b[39m.\u001b[39mcache_tensor(global_output_tensor_dict)\n", - "File \u001b[0;32m~/workspace/sec-openfl/openfl/federated/task/runner_pt.py:159\u001b[0m, in \u001b[0;36mPyTorchTaskRunner.train_batches\u001b[0;34m(self, col_name, round_num, input_tensor_dict, use_tqdm, epochs, **kwargs)\u001b[0m\n\u001b[1;32m 157\u001b[0m \u001b[39mif\u001b[39;00m use_tqdm:\n\u001b[1;32m 158\u001b[0m loader \u001b[39m=\u001b[39m tqdm\u001b[39m.\u001b[39mtqdm(loader, desc\u001b[39m=\u001b[39m\u001b[39m'\u001b[39m\u001b[39mtrain epoch\u001b[39m\u001b[39m'\u001b[39m)\n\u001b[0;32m--> 159\u001b[0m metric \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mtrain_epoch(loader)\n\u001b[1;32m 160\u001b[0m \u001b[39m# Output metric tensors (scalar)\u001b[39;00m\n\u001b[1;32m 161\u001b[0m origin \u001b[39m=\u001b[39m col_name\n", - "\u001b[1;32m/home/oamontoy/workspace/sec-openfl/openfl-tutorials/Federated_PyTorch_LLM.ipynb Cell 18\u001b[0m line \u001b[0;36m1\n\u001b[1;32m 113\u001b[0m loss\u001b[39m.\u001b[39mbackward()\n\u001b[1;32m 114\u001b[0m torch\u001b[39m.\u001b[39mnn\u001b[39m.\u001b[39mutils\u001b[39m.\u001b[39mclip_grad_norm_(\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mmodel\u001b[39m.\u001b[39mparameters(),\u001b[39m1.0\u001b[39m)\n\u001b[0;32m--> 115\u001b[0m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mmodel\u001b[39m.\u001b[39;49mstep()\n\u001b[1;32m 116\u001b[0m losses\u001b[39m.\u001b[39mappend(loss\u001b[39m.\u001b[39mdetach()\u001b[39m.\u001b[39mcpu()\u001b[39m.\u001b[39mnumpy())\n\u001b[1;32m 117\u001b[0m loss \u001b[39m=\u001b[39m np\u001b[39m.\u001b[39mmean(losses)\n", - "File \u001b[0;32m~/workspace/llama-env/lib/python3.8/site-packages/peft/peft_model.py:436\u001b[0m, in \u001b[0;36mPeftModel.__getattr__\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m 434\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39msuper\u001b[39m()\u001b[39m.\u001b[39m\u001b[39m__getattr__\u001b[39m(name) \u001b[39m# defer to nn.Module's logic\u001b[39;00m\n\u001b[1;32m 435\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mAttributeError\u001b[39;00m:\n\u001b[0;32m--> 436\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mgetattr\u001b[39;49m(\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mbase_model, name)\n", - "File \u001b[0;32m~/workspace/llama-env/lib/python3.8/site-packages/peft/tuners/lora.py:494\u001b[0m, in \u001b[0;36mLoraModel.__getattr__\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m 492\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39msuper\u001b[39m()\u001b[39m.\u001b[39m\u001b[39m__getattr__\u001b[39m(name) \u001b[39m# defer to nn.Module's logic\u001b[39;00m\n\u001b[1;32m 493\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mAttributeError\u001b[39;00m:\n\u001b[0;32m--> 494\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mgetattr\u001b[39;49m(\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mmodel, name)\n", - "File \u001b[0;32m~/workspace/llama-env/lib/python3.8/site-packages/torch/nn/modules/module.py:1695\u001b[0m, in \u001b[0;36m__getattr__\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m 1688\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mparameters\u001b[39m(\u001b[39mself\u001b[39m, recurse: \u001b[39mbool\u001b[39m \u001b[39m=\u001b[39m \u001b[39mTrue\u001b[39;00m) \u001b[39m-\u001b[39m\u001b[39m>\u001b[39m Iterator[Parameter]:\n\u001b[1;32m 1689\u001b[0m \u001b[39m \u001b[39m\u001b[39mr\u001b[39m\u001b[39m\"\"\"Returns an iterator over module parameters.\u001b[39;00m\n\u001b[1;32m 1690\u001b[0m \n\u001b[1;32m 1691\u001b[0m \u001b[39m This is typically passed to an optimizer.\u001b[39;00m\n\u001b[1;32m 1692\u001b[0m \n\u001b[1;32m 1693\u001b[0m \u001b[39m Args:\u001b[39;00m\n\u001b[1;32m 1694\u001b[0m \u001b[39m recurse (bool): if True, then yields parameters of this module\u001b[39;00m\n\u001b[0;32m-> 1695\u001b[0m \u001b[39m and all submodules. Otherwise, yields only parameters that\u001b[39;00m\n\u001b[1;32m 1696\u001b[0m \u001b[39m are direct members of this module.\u001b[39;00m\n\u001b[1;32m 1697\u001b[0m \n\u001b[1;32m 1698\u001b[0m \u001b[39m Yields:\u001b[39;00m\n\u001b[1;32m 1699\u001b[0m \u001b[39m Parameter: module parameter\u001b[39;00m\n\u001b[1;32m 1700\u001b[0m \n\u001b[1;32m 1701\u001b[0m \u001b[39m Example::\u001b[39;00m\n\u001b[1;32m 1702\u001b[0m \n\u001b[1;32m 1703\u001b[0m \u001b[39m >>> # xdoctest: +SKIP(\"undefined vars\")\u001b[39;00m\n\u001b[1;32m 1704\u001b[0m \u001b[39m >>> for param in model.parameters():\u001b[39;00m\n\u001b[1;32m 1705\u001b[0m \u001b[39m >>> print(type(param), param.size())\u001b[39;00m\n\u001b[1;32m 1706\u001b[0m \u001b[39m (20L,)\u001b[39;00m\n\u001b[1;32m 1707\u001b[0m \u001b[39m (20L, 1L, 5L, 5L)\u001b[39;00m\n\u001b[1;32m 1708\u001b[0m \n\u001b[1;32m 1709\u001b[0m \u001b[39m \"\"\"\u001b[39;00m\n\u001b[1;32m 1710\u001b[0m \u001b[39mfor\u001b[39;00m name, param \u001b[39min\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mnamed_parameters(recurse\u001b[39m=\u001b[39mrecurse):\n\u001b[1;32m 1711\u001b[0m \u001b[39myield\u001b[39;00m param\n", - "\u001b[0;31mAttributeError\u001b[0m: 'RobertaForSequenceClassification' object has no attribute 'step'" - ] - } - ], + "outputs": [], "source": [ "#Run experiment, return trained FederatedModel\n", - "final_fl_model = fx.run_experiment(collaborators,{'aggregator.settings.rounds_to_train':3})" + "final_fl_model = fx.run_experiment(collaborators,{'aggregator.settings.rounds_to_train':1,\"tasks.train.kwargs.epochs\":10})" ] }, { From 00a7d92a978400071c1036828d7ff23c5dde8542 Mon Sep 17 00:00:00 2001 From: porteratzo Date: Thu, 26 Oct 2023 09:52:32 -0700 Subject: [PATCH 4/7] changes --- openfl-tutorials/Federated_PyTorch_LLM.ipynb | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/openfl-tutorials/Federated_PyTorch_LLM.ipynb b/openfl-tutorials/Federated_PyTorch_LLM.ipynb index 78b9978604..ba5b2bede9 100644 --- a/openfl-tutorials/Federated_PyTorch_LLM.ipynb +++ b/openfl-tutorials/Federated_PyTorch_LLM.ipynb @@ -4,14 +4,14 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Federated PyTorch TinyImageNet Tutorial" + "# Federated PyTorch LLM Tutorial" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "This notebook is an example of Transfer Learning \n", + "This notebook is an example of LLM fine-tuning\n", "\n", "Custom DataLoader is used with OpenFL Python API" ] @@ -77,7 +77,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Now we are ready to define our dataset and model to perform federated learning on. The dataset should be composed of a numpy arrayWe start with a simple fully connected model that is trained on the MNIST dataset. " + "Now we are ready to define our dataset and model to perform federated learning on. The dataset should be composed of a numpy arrayWe start with a simple Roberta model that is trained on the glue mrpc dataset. " ] }, { @@ -458,7 +458,7 @@ "outputs": [], "source": [ "#Run experiment, return trained FederatedModel\n", - "final_fl_model = fx.run_experiment(collaborators,{'aggregator.settings.rounds_to_train':1,\"tasks.train.kwargs.epochs\":10})" + "final_fl_model = fx.run_experiment(collaborators,{'aggregator.settings.rounds_to_train':10,\"tasks.train.kwargs.epochs\":2})" ] }, { From d1b0b1e9d9a6e7f420d32242b0e22c1932f14a43 Mon Sep 17 00:00:00 2001 From: porteratzo Date: Mon, 27 May 2024 10:58:41 -0700 Subject: [PATCH 5/7] Phi finetuning demo --- .../Phi3/Workflow_Interface_Phi3.ipynb | 639 ++++++++++++++++++ 1 file changed, 639 insertions(+) create mode 100644 openfl-tutorials/experimental/Phi3/Workflow_Interface_Phi3.ipynb diff --git a/openfl-tutorials/experimental/Phi3/Workflow_Interface_Phi3.ipynb b/openfl-tutorials/experimental/Phi3/Workflow_Interface_Phi3.ipynb new file mode 100644 index 0000000000..ebba043085 --- /dev/null +++ b/openfl-tutorials/experimental/Phi3/Workflow_Interface_Phi3.ipynb @@ -0,0 +1,639 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "14821d97", + "metadata": {}, + "source": [ + "# Workflow Interface\n", + "## Fine-tuning Phi3 using OpenFL\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/intel/openfl/blob/develop/openfl-tutorials/experimental/Phi3/Workflow_Interface_Phi3.ipynb)" + ] + }, + { + "cell_type": "markdown", + "id": "bd059520", + "metadata": {}, + "source": [ + "In this tutorial, we build on the ideas from the [first](https://github.com/intel/openfl/blob/develop/openfl-tutorials/experimental/Workflow_Interface_101_MNIST.ipynb) quick start notebook, and demonstrate how to fine-tune a Large Language Model (LLM) in a federated learning workflow. \n", + "\n", + "We will fine-tune **Microsoft's [Phi3](https://huggingface.co/docs/transformers/main/en/model_doc/phi3)** model on the [Math_10k](https://github.com/AGI-Edgerunners/LLM-Adapters/tree/main) dataset, an open-source mathematical question-answer pair dataset collected from multiple smaller math datasets." + ] + }, + { + "cell_type": "markdown", + "id": "39c3d86a", + "metadata": {}, + "source": [ + "# What is it?" + ] + }, + { + "cell_type": "markdown", + "id": "a7989e72", + "metadata": {}, + "source": [ + "The workflow interface is a new way of composing federated learning expermients with OpenFL. It was borne through conversations with researchers and existing users who had novel use cases that didn't quite fit the standard horizontal federated learning paradigm. " + ] + }, + { + "cell_type": "markdown", + "id": "124ae236-2e33-4349-9979-f506d796276d", + "metadata": {}, + "source": [ + "### Installing OpenFL\n", + "- Lets now install OpenFL and the necessary dependencies for the workflow interface by running the cell below:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c808dd12-6795-4203-9221-0f6b43fc785f", + "metadata": {}, + "outputs": [], + "source": [ + "!pip install git+https://github.com/intel/openfl.git\n", + "!cd /home/oamontoy/workspace/openfl/openfl-tutorials/experimental/Phi3 && pip install -r ../requirements_workflow_interface.txt\n", + "!pip install numpy --upgrade\n", + "!pip install transformers peft datasets trl" + ] + }, + { + "cell_type": "markdown", + "id": "fc8e35da", + "metadata": {}, + "source": [ + "## Import libraries" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0c5c9347", + "metadata": {}, + "outputs": [], + "source": [ + "import hashlib\n", + "import os\n", + "\n", + "import numpy as np\n", + "import requests\n", + "import torch\n", + "import transformers\n", + "from datasets import load_dataset\n", + "from peft import LoraConfig, get_peft_model\n", + "from peft.utils import get_peft_model_state_dict, set_peft_model_state_dict\n", + "from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments\n", + "from transformers.trainer_callback import PrinterCallback\n", + "from trl import SFTTrainer\n", + "\n", + "from openfl.experimental.interface import Aggregator, Collaborator, FLSpec\n", + "from openfl.experimental.placement import aggregator, collaborator\n", + "from openfl.experimental.runtime import LocalRuntime" + ] + }, + { + "cell_type": "markdown", + "id": "b8c24994-1b30-4f03-82ba-5a58bb347b70", + "metadata": {}, + "source": [ + "### Acquiring and preprocessing dataset\n", + "We can download the dataset directly from the [LLM-Adapters\n", + " repository](https://github.com/AGI-Edgerunners/LLM-Adapters)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a6674c17-1652-4e87-a885-bc10bf3624c6", + "metadata": {}, + "outputs": [], + "source": [ + "def file_checksum(file_path, algorithm=\"sha256\"):\n", + " hash_func = hashlib.new(algorithm)\n", + " with open(file_path, \"rb\") as f:\n", + " for chunk in iter(lambda: f.read(4096), b\"\"):\n", + " hash_func.update(chunk)\n", + " return hash_func.hexdigest()\n", + "\n", + "\n", + "if not os.path.exists(\"math_10k.json\"):\n", + " r = requests.get(\n", + " \"https://raw.githubusercontent.com/AGI-Edgerunners/LLM-Adapters/main/ft-training_set/math_10k.json\",\n", + " # \"math_10k.json\", timeout=10\n", + " )\n", + " with open(\n", + " \"math_10k.json\",\n", + " \"wb\",\n", + " ) as f:\n", + " f.write(r.content)\n", + "\n", + " actual_checksum = file_checksum(\"math_10k.json\")\n", + " if (\n", + " actual_checksum\n", + " != \"0342d0d860ad8592b579329337c90e42eefd3d9f2898043140cbd120630418b8\"\n", + " ):\n", + " raise ValueError(\n", + " \"Checksum verification failed. The file may have been altered.\"\n", + " )\n", + "\n", + "raw_dataset = load_dataset(\"json\", data_files=\"math_10k.json\")" + ] + }, + { + "cell_type": "markdown", + "id": "c9c16b3f-963e-4531-94a5-258d7f61fe08", + "metadata": {}, + "source": [ + "## Initialize arguments and configurations" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8a9fb316-c274-4236-a468-8fadccf27c1f", + "metadata": {}, + "outputs": [], + "source": [ + "training_config = {\n", + " \"bf16\": True,\n", + " \"use_ipex\": False,\n", + " \"use_cpu\": True,\n", + " \"do_eval\": False,\n", + " \"learning_rate\": 5.0e-06,\n", + " \"log_level\": \"info\",\n", + " \"logging_steps\": 20,\n", + " \"logging_strategy\": \"steps\",\n", + " \"lr_scheduler_type\": \"cosine\",\n", + " \"num_train_epochs\": 1,\n", + " \"max_steps\": -1,\n", + " \"output_dir\": \"./checkpoint_dir\",\n", + " \"overwrite_output_dir\": True,\n", + " \"per_device_eval_batch_size\": 1,\n", + " \"per_device_train_batch_size\": 1,\n", + " \"remove_unused_columns\": True,\n", + " \"save_steps\": 100,\n", + " \"save_total_limit\": 1,\n", + " \"seed\": 0,\n", + " \"gradient_checkpointing\": True,\n", + " \"gradient_checkpointing_kwargs\": {\"use_reentrant\": False},\n", + " \"gradient_accumulation_steps\": 1,\n", + " \"warmup_ratio\": 0.2,\n", + "}\n", + "\n", + "peft_config = {\n", + " \"r\": 1,\n", + " \"lora_alpha\": 2,\n", + " \"lora_dropout\": 0.05,\n", + " \"bias\": \"none\",\n", + " \"task_type\": \"CAUSAL_LM\",\n", + " \"target_modules\": \"all-linear\",\n", + " \"modules_to_save\": None,\n", + "}\n", + "model_kwargs = dict(\n", + " use_cache=False,\n", + " trust_remote_code=True,\n", + " torch_dtype=torch.bfloat16,\n", + " device_map=None,\n", + ")\n", + "train_conf = TrainingArguments(**training_config)\n", + "peft_conf = LoraConfig(**peft_config)" + ] + }, + { + "cell_type": "markdown", + "id": "ab360bb3-bdf6-4aed-966c-e21fc4d51847", + "metadata": {}, + "source": [ + "## Load and initialize model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7fddae57-ba98-4445-a37d-2f9e188c2cd8", + "metadata": {}, + "outputs": [], + "source": [ + "checkpoint_path = \"microsoft/Phi-3-mini-4k-instruct\"\n", + "model = AutoModelForCausalLM.from_pretrained(\n", + " checkpoint_path, return_dict=True, **model_kwargs\n", + ")\n", + "model = get_peft_model(model, peft_conf)\n", + "\n", + "tokenizer = AutoTokenizer.from_pretrained(checkpoint_path)\n", + "sequence_max_length = 512\n", + "val_set_size = 2000\n", + "tokenizer.pad_token_id = 0 # unk. we want this to be different from the eos token\n", + "tokenizer.padding_side = \"left\" # Allow batched inference" + ] + }, + { + "cell_type": "markdown", + "id": "ee99692c-0f1f-46d7-86c9-c2e0bfc52839", + "metadata": {}, + "source": [ + "## Preprocess dataset" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ee344bba-89c2-45e9-946f-262ca81b93fc", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "def generate_prompt(data_point):\n", + " if data_point[\"input\"]:\n", + " return f\"\"\"Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. \n", + "\n", + " ### Instruction:\n", + " {data_point[\"instruction\"]}\n", + " \n", + " ### Input:\n", + " {data_point[\"input\"]}\n", + " \n", + " ### Response:\n", + " {data_point[\"output\"]}\"\"\"\n", + " else:\n", + " return f\"\"\"Below is an instruction that describes a task. Write a response that appropriately completes the request. \n", + "\n", + " ### Instruction:\n", + " {data_point[\"instruction\"]}\n", + " \n", + " ### Response:\n", + " {data_point[\"output\"]}\"\"\"\n", + "\n", + "\n", + "def tokenize(prompt, add_eos_token=True):\n", + " # there's probably a way to do this with the tokenizer settings\n", + " # but again, gotta move fast\n", + " result = tokenizer(\n", + " prompt,\n", + " truncation=True,\n", + " max_length=sequence_max_length,\n", + " padding=False,\n", + " return_tensors=None,\n", + " )\n", + " if (\n", + " result[\"input_ids\"][-1] != tokenizer.eos_token_id\n", + " and len(result[\"input_ids\"]) < sequence_max_length\n", + " and add_eos_token\n", + " ):\n", + " result[\"input_ids\"].append(tokenizer.eos_token_id)\n", + " result[\"attention_mask\"].append(1)\n", + "\n", + " result[\"labels\"] = result[\"input_ids\"].copy()\n", + "\n", + " return result\n", + "\n", + "\n", + "def generate_and_tokenize_prompt(data_point):\n", + " full_prompt = generate_prompt(data_point)\n", + " tokenized_full_prompt = tokenize(full_prompt)\n", + " user_prompt = generate_prompt({**data_point, \"output\": \"\"})\n", + " tokenized_user_prompt = tokenize(user_prompt, add_eos_token=False)\n", + " user_prompt_len = len(tokenized_user_prompt[\"input_ids\"])\n", + "\n", + " tokenized_full_prompt[\"labels\"] = [-100] * user_prompt_len + tokenized_full_prompt[\n", + " \"labels\"\n", + " ][user_prompt_len:]\n", + " return tokenized_full_prompt\n", + "\n", + "\n", + "train_val = raw_dataset[\"train\"].train_test_split(\n", + " test_size=val_set_size, shuffle=True, seed=42\n", + ")\n", + "\n", + "processed_train_dataset = train_val[\"train\"].shuffle().map(generate_and_tokenize_prompt)\n", + "processed_test_dataset = train_val[\"test\"].shuffle().map(generate_and_tokenize_prompt)\n" + ] + }, + { + "cell_type": "markdown", + "id": "df64996d-3afa-443e-b897-761af04793c0", + "metadata": {}, + "source": [ + "Next we import the `FLSpec`, `LocalRuntime`, and placement decorators.\n", + "\n", + "- `FLSpec` – Defines the flow specification. User defined flows are subclasses of this.\n", + "- `Runtime` – Defines where the flow runs, infrastructure for task transitions (how information gets sent). The `LocalRuntime` runs the flow on a single node.\n", + "- `aggregator/collaborator` - placement decorators that define where the task will be assigned" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c4064a34-f152-4da3-a5c6-285ac5ffc280", + "metadata": {}, + "outputs": [], + "source": [ + "def FedAvg(peft_params, model, weights=None):\n", + " state_dicts = peft_params\n", + " state_dict = get_peft_model_state_dict(model)\n", + " for key in peft_params[0]:\n", + " dtype = state_dicts[0][key].dtype\n", + " state_dict[key] = torch.from_numpy(\n", + " np.average(\n", + " [state[key].to(torch.float).numpy() for state in state_dicts], axis=0, weights=weights\n", + " )\n", + " ).to(dtype)\n", + " set_peft_model_state_dict(model, state_dict)\n", + " return model" + ] + }, + { + "attachments": { + "image.png": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAt0AAAI6CAYAAAD7dvTIAAAgAElEQVR4nOzde3RUVZ43/C8mIVW5VlJFQqBCCk3R3BKCIB0gQRAaW2Y6QDuCtHar8D79rofhomucfp9xtThqr561enyWArbvWj1L2m7bB9RRSGba+wUJQqRBYkBAKkoCBSFQRSrXSkjFPH8Ue3NO3VKV5FTl8v2s5ZJUqs4+lfrVPr/z2/vsM6a3t7cXRERERESkmVtivQNERERERCMdk24iIiIiIo0x6SYiIiIi0hiTbiIiIiIijTHpJiIiIiLSGJNuIiIiIiKNMekmIiIiItIYk24iIiIiIo0x6SYiIiIi0hiTbiIiIiIijTHpJiIiIiLSGJNuIiIiIiKNMekmIiIiItIYk24iIiIiIo0x6SYiIiIi0hiTbiIiIiIijTHpJiIiIiLSGJNuIiIiIiKNMekmIiIiItIYk24iIiIiIo0x6SYiIiIi0hiTbiIiIiIijTHpJiIiIiLSGJNuIiIiIiKNMekmIiIiItIYk24iIiIiIo0x6SYiIiIi0hiTbiIiIiIijTHpJiIiIiLSGJNuIiIiIiKNMekmIiIiItIYk24iIiIiIo0x6SYiIiIi0hiTbiIiIiIijTHpJiIiIiLSWHysd4CIhof9tS7sr3Wh/KQTddc6AQAutyfGe0UUHoM+HovzDbBk6rByphGL8w2x3iUiGmXG9Pb29sZ6J4ho6Npf68Jj+75F9cW2WO8K0aApmpiCrYsm4uF542O9K0Q0SjDpJqKAXG4PHtn9DfadcMR6V4g0UzQxBZ/+4ywY9Bz4JSJtMekmIj/VF9uwetfXchoJ0Uhm0Mfj03+chaKJKbHeFSIawZh0E5GKy+3B7OeOMeGmUcWgj8fxx+fAkqmL9a4Q0QjF1UuISOWR3d8w4aZRR0ynIiLSCpNuIpL2nXBwDjeNWvtrXXjlyOVY7wYRjVBMuolIevr9+ljvAlFMPbbv21jvAhGNUEy6iQiAt8rNZQFptHO5Pax2E5EmmHQTEQAw4Sa64bNvm2O9C0Q0AjHpJiIAwFeX2mO9C0RDwv5aV6x3gYhGICbdRASAlW4iweX2xHoXiGgEYtJNRACYaBAJ/C4QkRaYdBMRACYaREREWmLSTURERESksfhY7wAR0XC086f5WHSbAfkmPZLGeusXzvZuXGy+jgPfurD57dqgr33mHgsmZ+rw89fORGt3Y9ImERHdxKSbiCgCeRk6HNxSBLMh0e93xuQEGJMTUDghGasKTCjZUY36pk7Vcz76n4VYOiUDFSed0drlmLRJRERqTLqJiCLw4f8shNmQiI7r32PXFw14o/oqKr/zrutcems6Hl+Si2VTMmA2JOLgliLkPl2lev2kDF3U9zkWbRIRkRrndBMRhWnt7HGwjtMDANbvOYPNb9fKhBsAKr9rxsqXT+Jf/vodAMBsSMQz91hisq9ERDS0MOkmIgrTz27PBgDYXV14/fjVoM/bceAi7K4uAMDkTFaZiYgIGNPb29sb650gotgb89hnsd6FIW/nT/OxqXQiAMDyzBd+87VD2bJoIravzvd73HbVjSm/PSJ/Lr01Hb8ruxXm9ETVvHHbVTe+qG/xuxCyfMNMlM00ouKkE19dasPGhRNgTE6As70bNocbxXlpfbZJ/nqfvzPWu0BEIwwr3UREYXqj+mZ1++CWooimjjS2Xoftqhsd178H4F3pxHbVjfOKxH3nT/NxYHMRivPSkJmUANtVt3yNdZweD87Nxtkn5gXc/rTsJDy5PA+AN6nWJ8ShVvH6YG0SEVF0sNJNRABY6Q6Xstot2K66cbqxAx/bmrDjwMWQrz/7xDxYx+lRcdKJlS+flI/nZehw6n/dgaSxt/j9DrhZ0QaA+/98Sk5vUT5ec6kds/79qNyeqMQHa5OCY6WbiAYbK91ERBHY/HYttu6tlXO2AcA6To+ymUZsX52P3ufvxFf/PBdbFk0MsRV/G4rH41pHN5zt3QET45Uvn5QV6xnjkwNu47cf1ct/RzL1hYiItMekm4goQjsOXETu01VYtLMafznaiJpL7TIhBoDCCcnYvjofhx+dHfY2t71bh9ynq2D69aGgz7nY7E30Z01ICfj7UBd3EhFRbHGdbiKifqr8rlm1ZODa2ePw99ONuGdaJozJCSjOS0P5hpn9mtKxdvY4ZKeOxR25qchJG4sfZCUFvCGPYLvq7td7ICKi6GDSTUQ0SF4/fhWvH7+KvAwdKv6fmSickIxlUzLCfr1YuSTQiiMd179Hx/Xv5S3niYhoeGHvTUQUJsdvFqD3+Tv7nK9d39SJl79oAICwk+S1s8fhvf+3UCbctqtuVJx04i9HG7F1by2S/79KOb2EiIiGH1a6iYjCpE+IAwAstWb0uUpJpJ5YloeksbfA7upCyY7qgBdCTkwPPr2EiIiGNla6iYjCVNPQBgBYNiUDa2ePC/ncDT/MARD+XGt9grc7/tLeFjDh3rJoIqeWEBENY+zBiYjCdP+fTst51Xt+MR2HH52tmmqSl6HDM/dYcPaJeSic4F3W78WD6oq4u9u7ykmyTwItHl84OQ15Gepbx+/8aT7+7e9u7fd+B2uTiIiih9NLiIjCVN/UifV7zuC5sttgNiSiOC8NxXlpAW/v3nH9e+z6osFvGkrNpTYUTkjG0ikZOPvEPJxv6sSy/78Gv/2oHrvunwpjcgLqtv1QVsgnpiciaewtcLZ342JzF6zj9BHvd7A2iYgoeph0ExFFQKxQsvOn+Vh0mwET08fCmJwgfy/uTrnl7dqA00R+/toZ5KSNxXxLOqzj9HKetlhj+4llecg36WVybbvqxvtnrmHz27XY+dN8WMdNxMLJ/qubhBKsTSIiih7eBp6IAPA28ERKvA08EQ02TvAjIiIiItIYk24iIiIiIo0x6SYiIiIi0hiTbiIiIiIijTHpJiIiIiLSGJNuIiIiIiKNMekmIiIiItIYk24iIiIiIo0x6SYiIiIi0hiTbiIiIiIijTHpJiIiIiLSGJNuIiIiIiKNMekmIiIiItIYk24iIiIiIo0x6SYiIiIi0hiTbiIiIiIijTHpJiIiIiLSGJNuIiIiIiKNMekmIiIiItIYk24iIh+lt6aj9/k7cfaJebHelX4T76H01vRY7woREYFJNxGRn8eX5MJ21Q3rOD22LJoY690hIqIRID7WO0BENNQsnJyG3V9eAZCJdbdnYceBi6rfn31iHqzj9ACAipNOlM00YtHOalR+14wtiyZi++p8AICzvRs2hxsAMP+F4yjfMBNZqQkozksDAGzdW4sdBy6qtuds74bp14dkW+UbZqJsphEAUFXfAqtJj91fXsHmt2sBAL3P36nat4qTTjz36QUc2FwEADiwuQgvVl7E5rdrVfsGQD4OAI7fLIDN4UZxXprfPhAR0cCx0k1EpLDzp96kdPPbtXj/zDWZIAuHH52NzKR4jHnsM4x57DMsnHzz96W3pmP76ny8WHkRYx77DLu/vOL3+uK8NGzdW4sxj30mE25nR7fc3ufnWuD4zQK5LyKhH/PYZwAAY3KC3JbjNwtQcdIpXytOAABg0c5q+X9lwi3a3rq3FptKJ6oq+VaTHmMe+4wJNxGRBph0ExEp3D01E5+fawEAWQUWiTjgTZqf+aBe/qz8t5iWIl63+e1a2K66Vdt3tnfLynnpremwjtNj/gvH5e9XvnwSxuQEbFk0EXdPzUTFSScqv2sGANXzAMD060NY+fJJ+fPHtqag72vd7Vmoqm+Rbe84cBFV9S3YVHIz6Rbvm4iIBh+nlxAR3SCS4A17vpGPVdW34O6pmfL3AHDc3iZ/r/x3VmoCnB3dqm36/nytwyP/vaZoHAD/KSKAt+qcmRSP802d6u21d/s9Vzk9JRhjUgJON3aoHjt6vhXrbs+SP/u2RUREg4dJNxHRDY8vyQUAOR9aacuiiaoEe7CEmj+tTIgDEcm27aobYx77zG/ONhERDR1MuomIblg4OU11caHg+M0C1QWVs80pcsrHbHOKfN6V1m5My05SvdaY5F/9FmwON4zJCSi9NV1uT+lahweTMnTq7d2Y0y2q8uICzr44O7qRlZqgemzupFRV5Z2IiLTDOd1ERPDO2zYmJ/gl3IB3rrO4ILKqvgXblufJ3yn//dynF2Adp5dzwHf+ND/ktI8dBy7CdtWNvetnyMe2LJqI3ufvxJZFE/H+mWsom2mU01oOPzrbbxvKpD9UlVtc1CkunNyyaCKK89Lw4sGLQV9DRESDh5VuIiJ4L6Csqg98IeFzn15A2UwjyjfMxPwXjuPsE/PkPGzliiGV3zVj695abF+dj02lE+Fs7w66TWHKb4+otgdAVb2elKGT012q6lvknO7K75rxYuVFbF+dL5PtRTurcWBzEdYUjZMXcSqXDASger5YspCIiLQ3pre3tzfWO0FEsZfxxOdwuTnVIFJiHrVY0s/X2Sfm4XRjh2qVkYHoff5OJstREOjiViKigeD0EiKiCPQ+fyfKN8yUP6+7PUsuC1i+YaYqWRPzrkMt5RfK2SfmqW5FL6atMOEmIhp+WOkmIgDA7OeOofri4K/OMdL4rhBiu+rGlN8ekT/7Lt830Kq0b8U1WEWdBk/RxBQcf3xOrHeDiEYYJt1EBAB4bN+3eOEze6x3gyjmHp43Hn9c94NY7wYRjTCcXkJEAICVNy4GJBrt+F0gIi0w6SYiAMDifAMsmbq+n0g0glkydVicb4j1bhDRCMSkm4ikp+7O6/tJRCPYQ3dkw6DnarpENPiYdBOR9PC88azy0ai1ON+AR+80x3o3iGiEYtJNRCp7189gpY9GHYM+Hs+vuo2xT0SaYdJNRCoGfTyOPz6HyQeNGgZ9PPaun4GiiSmx3hUiGsG4ZCARBeRye7Dk919x7W4a0RbnG/DHdT/gRcREpDkm3UQU0guf2bH9wEXUXeuM9a4QDRqDPh5P3Z2Hh+eN56gOEUUFk24iCsu+Ew589m0z9te6WP0OR1cb0HJZ/d/VWu/js/8BmP5j9XOP/ydgrwYazwAJeiBjEjDuNiA1G0gbD6Td+L+eF7r2h0EfD0umDpZMHR66IxuL8w1Mtokoqph0ExH1g8vlQl1dnfyvvr4e1dXV8nGXyxX0tU899RT+9V//Ffv370d5eTleeeWVkM9XSklJQV5enuo/i8Ui/52TkzNYb5GIiAYRk24ioiCCJdYiqQ6VKBsMBlgsFvlfXl4eioqKEBcXh7/+9a94//33UV1dLZ9bVFSErVu3or29HXv37sX777+PtrabIwppaWno6elBe3t7yH3W6XR+ibgyOTebuSQeEVEsMOkmolFrINVqg8GgSqxFUltUVASLxQKDQT0NJFBV22AwYOvWrVi1ahWKior82njrrbfwxhtv4I033pCPxcXFYfny5SgqKkJOTg7q6+tRX18v9//q1ash33NCQkLQKrn4b8yYMZH8GYmIKAxMuoloxBLV6FDV6lCCVavF476JdaD2X3jhBZSXlwesai9evLjPbQBAc3OzTL4/+ugj+bjJZMKaNWuwZs0a3HnnnQCAtrY2v0Rc+V9DQ0PItsaMGRMwGVf+HB/PudBERJFi0k1Ew9pAq9UDSaqD2b9/P/70pz9h3759sn2LxYKHHnooaFU7XOfOncMbb7yBN998E8eOHZOPT5kyRSbgBQUFQV/f2dnpl4wrf7bb7X3uQ25ubsgpLImJif1+f0REIxWTbiIa0gaSVAOBE2tRYRb/DdZ+BqtqP/XUUzKZH0xffvmlrICfO3dOPl5cXIz77rsPa9asiXgOd3d3d9BKufi5LxMmTAg5hSU5OTni90pENNwx6SaimNPigsWBVqvD3e/q6mps374d+/fv96tqP/zww7BYLJq1r/TRRx/JBLy5uVk+fs8998gKeFJS0oDb+f777/2mrPgm5x6PJ+Q2srKyQk5hSUtLG/B+EhENNUy6iUhz0bxgMRpCVbWff/75mO2X8Oabb+KNN97Af/7nf8rHEhISZPJdVlamafsXLlwIOJ9cPNbV1RXy9UajMWSlPDMzU9P9JyLSApNuIhqwWF+wGA1DqaodrqamJln9/uSTT+TjWVlZMgEvLS2N+n5dunQp5MWefS2LmJ6eHnQ+eV5eHsaNGxeld0JEFD4m3UQUlqF4wWI01NXV4ZVXXsGf/vQn1NXVARhaVe1wffvttzIBF9V5AJg6dapMwGfMmBHDPbzpypUrIS/2bGlpCfl65Q2EAiXnvIEQEcUCk24iAjB4FywWFRUhLy8PBoNBkwsWo0FUtR977DHVe1+8eDFWrlyJhx9+eFi9H19Hjx6VCbjywsgFCxbIBHwoJ6ZOpzPkxZ5NTU0hX6/T6UIuicgbCBGRFph0E40iw/WCxWjpq6o9kKX+hqoPPvhAJuCtra3y8b/7u7+TCbhOp4vhHkbO5XKFvNgzkhsIBUvOiYgixaSbaAQZaRcsRsNIr2qH6/vvv5cXYL799tvy8cTERJl8//3f/30M93DwiBsIBbvY8/LlyyFff8stt/R5V0/eQIiIfDHpJhpGRsMFi9FSXV2NP/3pT363ZV+8eLFcV3u0cjqdsvq9f/9++XhOTg7WrFmD++67DwsXLozhHmrL7XaHXBIx3BsIhbrYkzcQIhp9mHQTDTGj9YLFaFBWtZUXE462qnYkbDabTMBramrk49OnT5cV8GnTpsVwD6Ovu7s75JKIkd5AKFByzhsIEY08TLqJomwwLlgUUz6UU0BEYk3+9u/fj/Lycr+q9qpVq7B169ZRXdWOxJEjR2QCfuHCBfl4SUmJTMCzs7NjuIdDg/IGQsGS856enpDbyM7ODlol5w2EiIYnJt1EGgiUWIu1nXnBYnS4XC7s378fTz/9NKvaGnjvvfdkAq5cV/snP/mJTMDHjh0bwz0c2s6fPx9yCku4NxAKNoUlIyMjSu+EiMLFpJuoHwbjgkVRrU5PT5f/ZlI9cKxqR5fH45HJd3l5uXxcr9fL5HvFihUx3MPhSdxAKNh65R0dHSFfbzAYQl7syRsIEUUfk26iAHjB4vDicrmwb98+bN++nVXtGLp69apMwA8cOCAfnzhxokzAi4uLY7iHI0djY2PISnm4NxAKtiTi+PHjo/ROiEYPJt00avGCxeEvWFX74YcfxkMPPcSqdgx98803MgE/efKkfHzmzJkyAf/BD34Qwz0c2ZxOZ8iLPfu6gZBerw9ZKecNhIgix6SbRqyBXrDom1TzgsWhgVXt4aeqqkom4BcvXpSPL1q0SCbgnO4QXcobCAVKzsO5gVCoJRF5AyEif0y6aVhTJtbV1dVobm7mBYsjVLCq9tatW7Fq1SpWtYeJd955RybgbrdbPr5q1SqZgMfFxcVwDwnw3kAo2Hzy+vr6iG4gFCw55+dMow2TbhrSOAVkdHO5XPK27KxqjyzXr1+Xyfd//dd/yceTk5Nl8v3jH/84hntIoShvIBQoOVeOaAQzadKkkFNYeAMhGmmYdFNMaXXBokiqmZANT6xqjy6NjY0yAT948KB8PDc3Vybg8+bNi+EeUqSuX78e8uZB4d5AKNQUlqSkpCi8E6LBw6SbNMdqNYXD5XLhhRdeQHl5Oavao9jp06dlAn7q1Cn5+KxZs3DfffdhzZo1sFqtMdxDGgziBkKhLvaM5AZCgZJz3kCIhhom3TRgg7FmNS9YHL1Y1aZgPv/8c7zxxht488030dDQIB9fvHixrIAbjcYY7iFpSXkDoUDJeTg3EApVKecNhCjamHRTWEQCLaZ9KC9YrKurC/laVqvJV6Cqtrhh0EMPPYRVq1YxLkjlv//7v2UFXJls/fSnP5UJ+JgxY2K4hxRtFy9eDDmFJZIbCAVKzrmiDg02Jt0EgFNAKDpY1aaB6uzslMn3X//6V/l4amqqTL6XL18ewz2koULcQCjYFJbW1taQr09NTQ25JCJvIESRYtI9yuzfv58XLFJUhapqb926FYsXL2b8UL80NDTIBPzQoUPy8by8PKxZswa/+93vYrh3NNQ5HI6QF3qGewMh3yr5z372syi9AxpumHSPIi6XK+gcNlarSSvKuLNYLHL6CKvaNJi+/vprmYCfOXMGAPDmm2/iH/7hH2K8ZzRciRsIBVuv3OFwBHzdtm3b8PTTT0d5b2k4GLVJt6vLg1fONKC8zgFXlwd1rZ1wdXlivVvae/VG5SczGzBmA+Z8wDge0KfEdr8GmSVVhyJTKmYZk/HorFwYEuNjvUsARnHcffoWYL7NG28jLNaUGHdDRO0J4MxR4O8fifWeRAXjLkY6O4BrjTf/c172/n/FL4Cc0bEIwFCNvaFq1CXdri4Pnj5ah1fONIysLz+F9OisXDy/MD9m7TPuRifGHcUC445iJdaxN9SNqqS72tGG1e+eQF1rZ6x3hWLAkqrD3nsKUGSKbqWVcTe6Me4oFhh3FCuxir3hYNQk3dWONiwpP86z7lHOkBiPT1fOjlpnwLgjgHFHscG4o1iJduwNF7fEegeiwdXlwSOfnGZHQHB1eaJ2UGDckcC4o1hg3FGsRDP2hpNRkXS/8NUFVDvaYr0bNES4ujx47PNazdth3JES445igXFHsRKt2BtORnzS7eryYPsJu+btrM3PQu/GJejduASlOVxeDwDKVxSgd+MSbCk0R/W14XjlTIOmcw6jFXcA0P7LRejduAQ7S61RaW+o21JoRu/GJShfURDV14ZjJMXdV2vvQO/GJTh875yotDcc9G5cgrMPFEf9tX0ZSXH36rLp6N24BI71JVFpbzg4+0Axejcuifprw6F17A03Iz7pjtbV04/OyoWzsxsA8PjsXM3bo4Hb991VzbYdrbjbWWpFUnwcnJ3duHuSUfP2aOBGQtyV5hhQaEyBs7MbhcZk5KXqNG+TBmYkxB0A3DMpE87Obhh1CZoVZWhwaRl7w82IT7o/u9QclXYKjcmwNbthb+vCwvHpUWmTBkbL2IhW3C2aYICzsxvVjjZY0/UcZRkGRkLc/XLGBADAu+evISk+Do8XsdAw1I2EuFubnwWjLgHvnr8GAFhnzY5KuzQw0YqP4WDEJ93VjlbN2xDVxg8vXMP+Sy4YdQl4Zt5kzdulgdl/KfQtfgciGnEnqo3Vjjb8x6lLAICn7hgdN2QYzoZ73AHeaqO9rQs//+gUOjw9WDV5XFTapf4bCXH36Czvyd2vv/gONc42FGencZRlGNAy9oabEX/rINd17Ye8Fk0woMPTg21HziEvVYcHp2Rj5WQTth05F/D5ry6bjnsmZcKoS4Czsxvvnr+GtLFxKLOYMOalT+XzSnMMeHGRFYVG75I7Nc42bDpgw4HVs1FR58DKd04E3afyFQUos5hgefUwKlYUqLZR9s4JFGen4dkf3gpruh4dnh4cvtyCDZ+eQb1i7lVeqg57ls9AoTEZSfFx6PD0oMbZjvs/+Fr1PAB4Zt5kPDI1B+aURLm9YJTPBQB7Wxf+vfo8dtREZ06goOVwaDTiTlQb/+PUJbxeewXPLcjH/PFpQZ+/pdCMfy6apPqMDl1uxpNzLdh60Kb6+4cbo4Ha2F5ixdaDNpRZTJg/Pg1J8XGwt3Xh8UO1qGpswZ7lM1Cc7d1PEdOVDS7VdspXFGDh+HQYdQnyeb89Vo/Xa6+onrc2PwtPzMlTxfeBS+ptBXuueF8//+hU0PejheEed6La+JezjQCAw5dbsNScgbX5WX6fDxBZPxZJjPrq3bgEFXUOnG/txDprtozd3bZGbK60oXxFAZaZM2Q8BupzAvVNfzzT4NeX+/aNYnuBBOpHA/W3WhvucQd4R5RrnG2ob+1E+TkHCo0p+M0Pbw34Hc5L1eHlJVNVfdC/V5/HpgLvlJQpr1XJ5w7kWCvm4T/5xXd4bkG+KnaXVVTjmXmTsXHmRFVf6ru/gfqmzy83B2xX2TeK7QUTqB8N1N9qjSuY3DTyk26NP2xRbaxq9CaZ9a2dqHG2odCYgtIcg19wf1RWhKXmDDg7u1FR50CWfiwenJKNDk+P33bf+0khkuLjUNXYgivu61g4Ph3v/aQwov07uPp2uHu+R0WdA5ZUHQqNKfiwrAgTk8eixtmO003tuN2UiqXmDOwotcoveaD2s/RjUZydhlPr5mH67iPygPHMvMl4cq4FHZ4efGz3ntGKjs7XzlIrNhWY5XPbPT1YOD4d20usMOkSgp6oDDfR6GTE3EaR6Oy/5MKDU7Kxs9SKzZU21XODfUaBkvRwYzSUfy6aBH38LfjI3oTk+DgsNWfg94umwO353i8eX75rquoAeOEXC2BOSYSt2Y3PLzcjOT4O88enYc/yGchOGisTpdIcA3bdNdXvOyIOqkpr87P8njstIxkPTslGTtJYLKuoDvu9DWXRun4FAP7wtXd05T9OXcJScwYenZXrl3RH0o9FEqPB3G5KxTJzBg5fbkG7pwfLzBnYVGDGogkGTExOVMXjvxXfivJzDtmP+cY9ACwcn44n51owy5SiSoAOrr5dxujppnZMy0jG9hL/C5nzUnV+z83Sj8VScwYOrr4duX8+FPZ7G8qief2KOKneduQc/qkoF/dMygz4/GCfUYenBxfbr8vnDcaxNjMxHrvumooaZzu+dLTKY+pXa+9AfrpeFY8PTsnG3660yH7MN+7bPT2YlpGMMosJZx8oVvWNwfrmQA7fOwfF2WnyuaIffe8nhfjxf9VEPfEmrxGfdGtNVBtf+OqCfEycgf9yxgRVYJfmGLDUnAFbs1v1RRLVQaXfLbgNSfFxftWdsw8Uw5quD3v/rnV1Y9brf5M/X/jFAljT9aqz97xUHU6tm4dpGcl+7b94wq5K4ETSvGf5DMx/6xgAYOPMiejw9KgScbFNZeKdl6rD+mk5cHZ2Y86bR1XPPXbfXPxTUe6ISbq15lttBLxDrg9OycaiCf7zukN9RkqRxGgo+vhbVJ+xOFj4btc3nneWWmFOSURVY4uML/F+d901FdvmWuT3IZLvyLM/vBVJ8XG4/4OvVYnhV2vvCFmlJX+i2ij6NjHKUmhM9ntuJAK2C/kAACAASURBVJ9RuDEaijklUfUZi4QmP12v2q4YCVw52YQdNXYZ9/a2LpTs/VLV/sHVt6PMYpJFFBGjvhVQsU2lHTee+5ezjarq5qvLpuPBKdl4ddn0qI+0DFdiRFl5PAo2yhLJZzQYx1rRF4vPMi9Vh7qfz0ehMUW1XdGPLjVnyMdE3K//5IzqPYikWRRRIumbtxSaUZydhhpnm+r4vzY/C3uWz8CLi6yqxyl6Rvycbq35VhsB7xl4h6fH7wxcJOh7bI2qx3fU2FHjVK9vWmhMhr2ty2/488kvvoto/14+3aD62d3zPQBgi6Ljqm/tVJ35A0BxdhrsbV1+FdPNlTbY27rk9ACR/B2+3KIaKq1v7fSbYvJ4US6S4uPw7vlrfs/dbWtEUnwc58KHybfaCPiPsgihPqO3v3OothtJjIby+eVmVVvtN6rkvts93dSu+lmswHL/B1+rHn+99goOX25RrVgQ7Dvi20ZpjgHWdD1qnG1+ifWmA974/h/TJ4T93kYz32qjsP+SC0nxcX7LVobbj0USo6HYmt2qz9hxY0Up3+2KSrogVpz645kGv/b/eKZB9RxxUrvFp2/0/RnwVso7PD1+ifXPPzoFZ2c3Fgc4QSZ/YkS5xqnuL8S1LKI/FIJ9RivfOeE3YjdYx1rlZyxiyHe7vm1sKTTLuPftm0QfKPrEUH2zva1L9Zi4wPS3x+pVj79eewVVjS0oNKZwLnyMsNI9AOJAASDgOpdJ8XHYUmiWX7T8G2fNvokwANS1dsr5XKU5BiTFx+Gwy39e9Ou1V7Bn+YwB73s4cwm/cXUEfNze3iXnPGYnjQXgnzwBwKHLzVhqzpA/T7rxJc9P1/utg5yl925nFm8ZGxZRVTywenbA3z8+O1dWIktyvKvpHLrsfwX53660qIYnw43R/hJJUCj2tq6A8Xm6qV0VT0nxcahp94+7l0834Mm5Ny8onT3Ou8/6+Lig629P4gEoLCKZ2VRgDjiNx5sgeBOdSPqxSGK0P9rDnBoVKO4/vNCkiif9jSU6fWO0vrXTL/kR826DxZ3oRyk0kXAWZ6cFPNaKZSvFZzIxOTFoP+I7tUTLY60ocvUlUNzXt3bKZYgBIG2sd9T4wwv+FyV+4+pQxZLIS342JRs/8/nuiN+JUR6KLibdAyDOrsX8PyUxb3CdNXvUBnawBEtUyal/RLVRzD/0tcycMaqXrQx2QmlN10c0NYvURLVRzJH1dbspVS5bOVrniwZKsoy6BL8pDRQZMaL8eYDkVFwb8nhRrt/I7GgR7KSScTf0MOkeADEsFezKZsf6ErmkUX1rp0yQNkzL8Zu7bFFU2sQB6weGJL9trs3PGqzd71Og9gHAnHzzjLqxw/uelPPBBWVVEgBarns7hr5WIaDQRLUx0CoywM25gM/Mm4xtR87B1uwGACwIkIjfkaU+AQo3RrVkTklUVa0E3xjr8PSoYlHwvWGGiNG+ViGg0JTD24GuvRDXezx1hwXLKqoj6sciiVEtBYr7H+Wq+zG3pwfGdH3AGLWm6+V7ASAv2lPOwaXIiBHlj+1NAb+/pTkGHFg9G6smj5NJ97UuD6xBPqOJyWNltXuoHGsDxX1eqg5GXQKu3bhIVRw/f5Sb4XdS69s3um8k4aFWmqLY4JzufhLVxv1BlicDIJfy+c0PbwUAPHfce7Hl/T4L+q/Nz/Ibtq9qbIE5JdEvgXhiTt6A9z0con3fOZrKC90A7xCcs7Mb88enqTqpvFSdX7VVzD8ONCx9+N45mt72faS4ObexLWhFV1zUu3Kyt8qxo8YuPyPlPL68VJ3fdQeRxKgW3j/vBAC/Yd21+VmYP957Jb44YatxtgeM0Q3TclQ/ixhdZs7wm8e4s9Sq6W3fR5J7JmXKpVED2VxpQ4enR7XaSLj9WCQxqgUR949MzfFr/5GpOarniPnsvjH66rLpftutcbbDmq73e/9r87PQ/stFmt32fSQRI8pi/ravygYXapxtMKckymOQ6Ed2+PQNry6b7reqViyPtcq4903yRXyJ9yKOn74xujY/y28ET8Sob7+Wl6rDhV8sQPsvF3FOd4yw0t1PotqovJDN1x++voQHp2TLg0Zlgwsf25uw1JwBx/oSfH65GVn6sSg0JqPD06PqDH516Fu895NCbC+xYp01G1fc13G7KRWZuuh8ZKL9TQVmzM1KUy0Z2OHpwa8OfSuf+8zROmwvsWLXXVPlBWmBlvmqbHDhxRN2bCoww7G+BNWONrk8kjVdH/BiFlIT1cZga1ED3iTz94umqJatfOnkRTw514JT6+bJC1yLTCnQx6vPuyOJUS1srrRh1eRxKM5Ow9kHinG6qV0udZUUH4f/XX1zlaD7P/gap9bNU8XowvHpfu8JAP7xwFnsumsqTq2bhxpnu188i4SKAhPVRnGyHUyNs1214kIk/Vi4MaoFZdwfu2+unMYg1jj+2N4kq4ubK224e5JRFaOiD/O9SO/+D77GwdW3Y3uJFRum5aCutVMVz74XxZG/QmOy32IFvg5ccqHQmCKXrRT9iFh273RTOyypOnnNilKsj7Ui7sXx0/eYKKr3lQ0uVNQ5UGYxyRhV9mHKvnlzpQ1zs9JQZjHhwi8W4MsbNy9SxnM014inm1jp7gdRbbS3dYWcu1jZ4IK9rUu14sKyimo5B7zMYoI1XY9dpxv8Vg+pbHBh/Sdn5F23yiwmXOvqViUdWqpscGH67iM3rnT2rhlaaExGVWMLpu8+onrfO2rsuP+Dr1Hb7MZScwaWmjNQ42wPuK+bK2149mgdrnV5sNScgTKLCZmJ8aioc6Bk75dReW/DmTiBe66POKh2eFcaESsubDtyzvt37/T+3eePT0O1ow0f2f0vygk3RrWS++dDqKhzIDMxHmUWE5aaM1Db7Mb9H3ytqrLWt3b6xei1Lg/Wf3LGb5uv117B+k/OoLbZ7RfPXLO2b6La+OGF4DfiUP5erLgQST8WSYxqYVlFNZ49Wge353uUWUwos5jg9nyPZ4/W+a3jPuW1KlWMZibG49mjdX7fkfrWTpTs/RJVjS2YmJyoiuetB21cIrUPYkRZ9GfBiP5QXFAJQPF3H3vjM0oI2DfE+li77cg51fFTeUz0Xcd95TsnVDFaaExGRZ0j4Hdk/lvHVP24iOcXT9hHzH0JhqMxvb29vbHeCS0NlzlNYk3QvvZXrMnJ+amDI9CV8INhuMSdWLc2nHn24cYo9W20x10k/VgkMUqhjfa4A7x/A9+1rgPhsXZwaRV7ww0r3VG0pdCM9l8u8luLWszJUq6DfPaB4oDz/cR81f9zlsOSFL5A85bFvPsOT4/q5g3hxihRXyLpx8KNUaK+lK8ogGN9iep+BcDNefdfKKZJ8VhL0cQ53VFUfs6Bfyu+FU/OteBHuZm44r4u5/d1eHpUC9mfbmpHmcUk59UCkPO8qhr9F9InCsXW7FbNbwRuzu978cTNZCaSGCXqSyT9WLgxStSXj+1NKLOY8N5PCuUt2MX8Z3tbF36tuPENj7UUTZxeEmWlOQb8bsFtKDQmywsfapxt+O2xer8v985SK1ZNHicXvRfrlHKoa/CMluHWvFQddpRaZRIDeG9C88czDX7zSiOJUeqf0RJ3QPj9WCQxSv0zmuJuS6EZG6blyFWXOjw9qHG2B1xqlcda7XF6iReTbhrVRtNBiIYOxh3FAuOOYoVJtxfndBMRERERaYxJNxERERGRxph0ExERERFpjEk3EREREZHGmHQPcVsKzX7r1/ZuXBJwXVFf5SsK0Ltxibwb5mAIt20a/nw/67MPFId1MUygmB2ocNum4c/3s46kHxvs/kmLPpSGJt/POpJ+bLD7Jy36UBoamHRTQHmpOpSvKJA3EyCKlp2lVhy+d06sd4NGmS2FZhYUKOpKcww4fO8cntiNErw5zjAUjeWZVk42ocxiQkWdI+pt09DU122TB8umAjNsze6YtE1DT7TWSt5UYIY1XR+Ttmno2VFjj8pdUB+fnYvi7DTstt2882W02qboY6WbiIiIiEhjrHQPop2lVmwqMOMvZxvx849OBfzdiyfs2Fxpk3dfu92UqroLlq3ZHfCOWUq9G5fA1uxWVf+emTcZj0zNgTklER2eHhy+3BLwteG0W76iAGUWEwCgzGJC78Yl2HrQhh019oBtr83PwhNz8uSdvwLdzWtLoRnbS6zYetCGpeYMLDNnICk+Ds7Obrx7/prf34vCV5pjwIHVs1HjbMOs1/8W8HdVjS2Y/9YxAN5YvHuSUVb1Ojw9qG1293nHybMPFMOarleNdvh+9jXONhy45Ar4+r7aFTECANZ0PXo3LkFFnQMr3zkRsO28VB32LJ8h75wZ7I5zYsrAk198h2d/eKtsv8bZhk0HbKhsCLy/1DfH+hLo429B8h8OBPwdAJh2HQRw8w6B+el6eadTW7Mbe2yNIe84Kfoj0QcB/p+9va0L/159PuDrw2lXOR9X2ccFalvsk/LOmYHu2Cpea3n1MPYsn4Hi7DQAkPvKSmb/Hb53Doqz03D/B1/79Vnid4v2Hkdlg0veYdearlfd6fRLR2vIkQzRH4k+SFB+9uL4FUg47Yp+DQC2l1ixvcSKMS99GrRt5XFebM/3jq081g5trHQPos2VNnR4erB4gsHvd4smGNDh6cHmShsA4MOyIpRZTLjW1Y2KOoecxlGcnYY9y2dE1O4z8ybjybkWZOri8bG9CYcvt2D++DQsNWf4PTecdj+2N6Gq0Zu025rdqKhz4PjVtqBt71k+A/npenxsb0JFnQPXujwos5gCzo/856JJWDg+HYcvt+BjexP08bfgwSnZ2Flqjeg9002VDS7UONtQaExBXqpO9btfzpgAAHjhqwsAvAeMTQVm6ONukZ//xfbrKDSm4PeLpkTUbmmOAbvumopCYwqqGltQUefAxOREbCrwn5sYTrvHr7bJeHR2euPzY3tT0LZPrZuH4uw01DjbUVHnQI2zHcXZaTi1bp7f3yEzMR677poKt6cHFXUO2JrdKDSmYO89MyN6z6T27vlrSIqPwzPzJqseX5ufBaMuQSYkIhGYmJyIw5e9sVLV2AJruh5PzrWgNMe/zwzl4OrbUZydhovt11FR54C753t5wqYUbrsVdQ44O7vlv98/7wza9oVfLLjRh3pkjOan67Fn+YyA83IPrr4d5uRE2bY5JRHbS6wRv2e6SfRn/2P6BL/fFRqTUeNsQ2WDC3mpOrz3k0IUGpPlsexjexMydfEos5giPu58dOP4CUD2Iw9OyfablhRuu++fd8qpdKIPDdX2k3Mt0Mff7EP18bfgybmWgBdc8lg7NLHSPcjEgb80xyAraKU5BpmYiJ8zE+NV1UfBsb5EVkTCtXHmRHR4ejB99xFZ4ctL1eHUunmyshNJu6ICU5ydhtNN7SGrAaLt9Z+cUVUcRLVhZ6lVnmgAgD7+Fsx586jcz7X5WdizfAYWBThRofAduORCoTEFjxflqv7e90zKhLOzW3420zKS4ezsRu6fD6leLz6vLYXmsCtwv1twG5Li4/yqgMrqjRBuu5UNLvRuXIJrXZ6QcSfaFiNHghhR2rN8hirGjboEvxGor9begUJjCtbmZ4Ws8FNwf/j6Eh6cko0f5Waqqm2PzsqVvwcgCwCr3z2pGlkQn9fjs3PDHnHYWWqFOSUxYAVSJERCuO2K0RSjLiFk3Im2ffvQtflZ2HXXVGyba/H7/lzr6laNQL26bDoenJKNX86YwFGWfnq99gp+v2gKikwpqsd3llqRFB8nR9s2TMsBAOw63aDqJ8QI4N2TjABsCEdpjgFLzRl+I73KEToh3HY3V9pQvkIHa7oeu22NQfte0ba9rQsle79UHecPrr4dZRaTKucAeKwdqljpHmTiDFxUGJX/Fr+rbHDBtOugX+ILANe6PBG1JypKhy+3qIbU61s7/aaYDGa7gLezEW37Ji33f/A1ANzoXG76/HKzaj/F6/SKkwOKnBhlUXaovtVGwHtBohjuV7rivh5xm4XGZNjbuvwOFHsUFwRp0S7gPSG0t3WpDmiA9+9gb+sKeOLqO6xadyMOs5PG9msfSDnKkqx6XFltBLwXJI556VO/JNP3gtlwiBjf4vPZ+/482O0CN/sz0b8Jr9deweHLLTDqEvyq3S+fblD9/Lcr3n45bSz7vIF49/w1GHUJWJufJR/zHVHeduQckv9wwK+f6M/JjjiO+/ZvO2rssLd1qR4bzHYB78WWAPDHMw1+x/k/nmlQPUfgsXZoYqV7kIkz8HsmZcrHFk8wqKqNSlsKvVfMT0rVYVpGsl+FsC8iYTjd1O73u0OXmwNOMRmMdn3b8VXf2imHayk6fEdZxNCrqDYqrc3PwozMZMwypcCSqkN+Pz7/pPg41LT7x93Lpxvw5FxLwNcMRrvCN66OgI/b27vknEfSnhhlEaNaz8ybrKo2KpXmGDB7XAqWmjOQpR/br35Hf2N+qu91L/WtnX7Jz2C2K9jbugJec3O6qT1of0uDT4yyPDorF6/XXkFeqk41oqyUl6rDyskm3JGVhpyksfiBISni9sRJ0ocX/Ke8fePqCNjnDEa7Sr4ncGJ/gvW3NPQw6dbAu+ev4cEp2Vibn4VL7ddhTknEX86qz44/KitSddDOzm5cbO+Cs7NbXnQxUI4ASW802qXYeOGrC9izfIYcti4ypaiqjYB3+HX9tBw57ajD04OL7ddxsf36gBIRpUAJSTTapdjYXGnD+mk5sgL9o9xMVbUR8J5s/X7RFFUfY2t242J716D2O+6e71U/R6tdij7fUZbHi7yVXjGiDHiT3g/LilR9jL2ta9BPzNs9Paqfo9UuDT9MujUgzsCVF3koq407S61Yas5AVWMLXvjqgqoCLuYVhquxwzs8Py0j2e93vlWXwWxXacH4dL/H8lJ1MOoS+jVthfpHjLIsnmDAM/Mmw6hLUK39WppjwKYCM+xtXfiX6u/8VmOINPnt8PTAnOx/APEdXh/sdoVgVaNA+0TaUo6yFGen+VUbf79oCvTxt+DZo3V4+fTNIfJA82H74vb0wJiuR16qzu8Ez5quV00dGcx2BXNKYsC2A/XBpC0xyvLMvMlYNXmc34jynuUzYE3X4y9nG/GHry+pChCR3kGy5bo3sf5RbobfNBHfz34w21XaMC3Hb6WfH+VydGU44ZxuDYgz8CJTSsBq46QbKyt8eOGaqoNYm58VcQLyeu0VODu7MX98mmpuW16qDgt9kuHBbBfwzmUL1DYAuRJKqFUAaPC9e/4azCmJuN+a7VdtnD3Oe9HRN64OVeIbKFbCUeNshzkl0e9qeHERkVbtApCrQPi2rbzQjaJHVBdfvmuq6mfBqEvAtU4Pth05p0pWfWMlHGLaiu8qT4HunjuY7QI3+zPfttfmZ2H++DQ4O7u5FGAUiWtZ7rdmw5yS6Ld8nygk/fqL71TH4P7caVkUzh6ZmqNaHSnQ8XMw2wWA545fCNh2XqoOj0zNUT2HhjZWujVy4JJLLp222+fCi68cbSizmPBPRblYMD4d7Z4eWG7MR+vw9KhWHAnHM0frsL3Eil13TZXV9fnj/S8ki6Td8nMObC/xruddvqIAzx2/EPAikJdOXsSTcy2y7XZPj5wjHuhCN9KWGGWxpuv9Es/jV9vQ4enBUnMGDt87B1fc15GlH+t3EVy47v/ga5xaNw+bCsyYm5WGK+7rWDg+Hfp49bl8pO3a27owMXksylcU4GN7U8Ak5leHvsV7PylUtZ2lH4vi7DR0eHrwq0Pf9us9Uf+IURZruj7g9Sv2Nu+w+tkHinG6qR3J8XEoMqX4xUo4NlfacPckI4qz0+T2RJ/T4TPMH0m751s7YU3X46OyIpxuag/Yd22utGHV5HGqtpPj4zB/fBqS4uPwv6uZ+ESbGGUB/K9fEZ/psfvm4vMb1x6Je1T4xkpfKhtcqKhzoMxikttT9jnK42ck7Yrj8oZpOVhqzgi4ek5lgwsf25uw1Jyh2qZYL/xjexNXwhkmWOnWiDgD9602At4rm188YYfb8z2WmjNQZjFBHx+HZ4/W4e3vvOt0+q57G8qOGjvu/+Br1Da7sdScgaXmDNQ42/0OAJG0W9/aiY/tTTCnJKLMYpLVSl/bjpxTtV1mMSEzMR4VdQ6/5eFIe2KUBfCvNlY2uLD+kzNydY8yiwnm5ER8ZG/C+k/OAIDfkmuh1Ld2YvruI6hqbEGhMVmuXSy21d929527Kh8LdmFaZYPLr+1CYzKqGlswffcRHoBiQFQZA90spGTvl6hxtsGarkeZxYQiUwpszW5M330EHZ4e3G5KjaitKa9VoaLOgczEeNnnPHu0Dhfb1avhRNLuf5y6BGdnN5aaM/xWXVLK/fMhVdtLzRmovXFzsVA3+SFtiH7Od0QZAJZVVMs1qsssJiwzZ+BaV7c8ZllvTFMK18p3TuDZo3Vwe76XfU5FnQMf+dxPIJJ2Xz7dIO8bEKr/XVZRrWq7zGKC2/M9nj1ah2UV1WG/B4qtMb29vb2x3gktKe9gR+RrIPPrQmHcUSiMO4oFxh3FilaxN9yw0k1EREREpDEm3UREREREGmPSTURERESkMSbdREREREQaY9JNRERERKQxJt1ERERERBpj0k1EREREpDEm3QPkWF+C3o1L5H/lKwrCfm35igKcfaAYgPf21Y71JWG97uwDxRG1MxBbCs2arK+p3ObZB4qxpdAc8DmlOYZBb3skOHzvHFXciTgKh/IzLc0xhP13VsZrNPRuXBIwLgZCGWvlKwpU3yPl3zPc7+Jos7PUqvo7Rdo3KD9Tx/oS7Cy19vkarfqgYLToX5WxtqXQrPoela8oYNz1QfRTyv8i6RuUn+nhe+fg8L1zwnqdFn1QMFr0r76xFux7dPaB4rD/JjQwTLr7SXQCu22NGPPSp/K/MouJwduHLYVm2Jrd8mdrut7vVt9cSD84cWBWxp3ycQpOGWvTMpLx8Y07yTnWl6CiziH/nte6PFE9wRgODt87B+us2aq4q6hz8OQ4DMpYW2rOwOmmdgDek5gyi0n+PW3Nbsadj52lVhxYPRuL9h6Xf6etB23YXmIN66RtNFPGmu9xV9hZaoU1XR/tXRu14mO9A8PV3ntmoqLO4XeL90V7j+PA6tnYUmiWB3dlAlnV2IL5bx3rc/uH752D4uw0+bOzsxumXQdVz1Fud9He4/IWuKU5BhxYPVv+rqLOgZXvnJDbBSC3vWjvccwel4LtJerOa+tB7/sSj/duXIKtB23YUWNH+YoC1e1qRdui3arGFhRnpwV9r9Z0PZyd3XJfxb+V71tsg9QO3zsH17o8fn/XKa9VwbG+BOUrClSftfJvGM5d47YUmgPGgvKk6OwDxbKTfvGEXfUdUP5OGbM7S624e5IRmYnxMOoS5Ot8T65ErIrHt5dYsdScgZXvnPDbN2XbjvUlsDW7UZydFvC7AvjHWmZiPI5fbUNpjgFGXYL8u4lt+/4dRrOdpVYUZ6f5xdDKd07g8L1z8PJdUzHltSr53E0FN6uDvvETTLBYEJT9jm/fEmmfpIxTALA1uzHltSr5uDVdj7MPFMv3FKwPD9Sf+t6KHLgZawCQpR+Lo1daAAB3TzKios4hn7fb1si4UyjNMWBTgRlbD9pUf9cdNXZY0/XYVGCWfUCo414owWJBWGrOkJ+Jb98SaZ/kG6eAt19WPt67cYn8nkXan/pSxpryuKu0qSBwMk7aGPFJtyExHq4uz6BuUxyknzt+we93lQ0u1YGpd+MS1ZdfTEEJ1RmUryiANV0vtyM6E+XryiwmVRJ8YPVsjHnpU/lc8SUUP+8stcovZXF2muqgtL3Eqjownn2gGNvmWuQXfHuJVe6L6ByUP4u2lQIleL5JoPJAJjqaK+7rqvcxXGkRd4C349xtawz4O+XB4PC9c1QxdPjeOXCsLwmYjAqhYkH8bE3Xo6LOgSmvVckDjq3ZjR01dlmhUx4wlImLNV3vd1BSfjdEbJXmGDDmpU9VJ3qiLd+fRdti+4HizjcJVMbdgdWzsfWgze91wQ5QQ51WcTc3y5uwBqJMfsXfWvQvO0ut2F5ixfGrbQGTUSFULAjTMpLl56TsRyPtk0SiLH4WsbSz1CoT79NN7ao+O1QfruxPlXz7MOW/i7PTMClVp0ruAGCdNXtYJkBaxd2a/HFwdnYHPGnbXGnzS7hDHfcCCRUL4nXK2FL2aZH2ScpRDaF34xIcvncO5r91DOUrCjAtI1nGRKT9qZJjfQmMugQA3lhT9n/KbRy+dw5ePGHH3ZOMQf9GNLhG/PQSw9jBP6+YPS4FAEIeRADvl8zZ2e1XQfM90/W18p0TquSossHllwBUNbbIL/bKd07A2dmNnaVWPD47F7Zmt/wiVja4UFHnwDprtnytrdkt912cJCg7NTEcFcjC8el48cTN54q2lXMggyWF8986hjEvfQpnZ7ccKqxqbMGLJ+yyYwmnMjFYDInanXNqEXcAYNQlhHVQLs5OwzNH6+TP8986BqMuIeRwbDixoIznHTV2VDW2YJ01G6U5BljT9djwyRn53A2fnIE1Xa+aE6k8QJh2HVR93mL4PZB11mxVzIu2lQeTzy83B3zt5kqbX6y9eML7et/3q2wv2PYGarjG3RX39T6fJyq3on/ZXGmDrdmN3y24LeTrwokFZWy9eMKOhePTAUTeJ81/65gq2Q1VhQ+nD1f2p0ri+6SMtUV7j8PZ2Y0xL33qV8Xv3bgExdlpqvc5mIZj3E1K1eFaGMl8OMe9QMKJBWVsiT6tNMcQcZ8k+iGlYH15f/pTJdOug6pYUx53xfvdUmiGUZcQ8qRksGgZe8PNiP9LWNJ0qGvtjEnbgTqMN2qvYlOBOaw5kL4VOiUxZCRc6/JgUqoOWfqxfgn6x/Ym1UEiWAVPWQEM9pxASZ+t2Y0s/Vj5sxhGDcaoS5AHqXCTSC1YUnXabTuGcSdiy/dzsDW7MSlVF9bfO1gs+L72ivs6pmUkBzwRFSeLlyMbGgAAIABJREFUyuHRQHyHdwMx6hL8TgCOXmlRHVTP9/H3VsbapFRd0CTSsb4E17o8mp0AjtS4A7zTKHw/h9NN7ZiWkRzW64PFgrOzWxVbtma3rOT1t08KNNTvK5w+vK8REWWszR6XEjCJFLEmKrThTsmJxEiOu3COe6GEioU3aq/Kf4sYnD0upd99ku+UlEDxM5D+VLkNZawpj7sAsG2uBavfPRlyG4NFy9gbbkZ80r3SYsL+i6Er0pESHXhpjiFghSPY4+FSJtti2FLLi+REgiWGUMtXFMgq0mBSTi9RJnXbS6yqKSzRsjLMDrm/2x7suAOg6nR9DTTugOjFAnAzwbI1uzHmpU8DzicfDMrvk4g1QTmFRfzsO6dzsA3XuFMmsUqDEXfRigVAnWApp6sMNt/pJb7T6QLNOa5scMHW7MZSc8agJ93DMe7Ot3aG7H8GGnvRigVAnWyLPkeri2aV00t8487W7MbppnZ8frl5wN/bcGkZe8PNiJ9e8vDUnEHfpugYH5+dG/D3Yv71+dZOZPoMq6zJHye3EYwYoh3z0qdBnzfJ58xRVJiuuK/LL5uw1JwR9KxYDJ/6DncGEyjps6brwxp6nv/WMVTUOeR723rQJg+y0U64AeDhaYMfG3LbGsQd4B2uDDb/7uW7puLsA8WqaoySNV0fshocTiz4xpaoMClPRAVx7UOgyroYPlUOd4YSKOmbm5UW1tDz5kpbwFgT/1Ym3FWNLZom3MDwjLvdtsagFzY/PjtXHtjFiJvStIzkkFW5cGLBN+6Uc+4j7ZPEdJRw+pz+9uHi976xZmt2y2sIVr5zIuxlEwfDcIy7zZU2GHUJAZft21JolosWRHrcE8KJBWU/qhxFjLRPEtNRgk1pU4q0P/Vl2nVQFWvK4+6U16qwcHw6yiwmufyiNV2P4uw0zYp7WsbecDPik25DYrwmHYKY1+e7nqs4k1z5zgnZYSifs6nArLpaPRjll/nwvXP8OhTlRUblKwrk3Kznjl+ANV0vO/LSHAPKLKag86wB9QFtS6E55JDc55ebVVNeRNvhDsVn6cfKxC+WF6s9OitX0yEvreJu5Tsn5MoKSqJSKJKWqsYWbJtrkb8/fO8cODu7+5y/11csKGNrS6EZxdlp2G1rlCeiL981VT735bumqi4qCkR5QAtV2RRJnzj4iraV8y1DCbViDuCtDIW7stBADNe421Fjh63Z7VcFFJVCsdrR++edqr5JLEf2q0Pf9tlGX7GgXIp1U4FZzpftT5+kPDEIVW0cSB8OBF8xR/Ddd/H3CnSR/kAM17gDvKNu20usqsRbVI0r6hzYUWPv13FP6CsWlP2o6NMqG1z96pOU/atYMCGQ/vanSr4r5igLLqZdB1VLf9qa3ahqbAl5oX1/aR17w82In14CAM8vzMf+i02DOudsR40dO2rsfjeI8D1wi1UYfIftQ5nyWpXfa6oaW1TzIivqHKqhS3GmXtngkssWis482BXOgPegMjcrTbbl7OyWa6CW5hiwo8aObXMtchheTDlQvudIqtTKA/DcrDS/uenRYEnV4SlFR6oVLeIO8P69zz5Q7DfvWvk5zH/rmLyBjvh9Xx1qX7EAeOfK3j3JiN6N3thSTs0QKz+I14eaplHZ4JLL8okES8TtmvxxqGxwoaqxRbVkIKCeHhLJvFdlrK3JH6eqFu0stcKoS4BRl+CXVAZbAq4/hnvcTXmtyu+7D6j/RqKfUfZNff0N+4oF8VldcV8P2I9G2ietfvckDqyeLZ//4gk7MMmIuVneSv77553YVGCWqzz0pw8XlPsvvkPKv4VYcjHY8q+DYbjHnXK50GDL80V63BP6igXAe2IUqE8TfU+4fdL8t47Jm+kB3lyhos4hp888d/yC3BdRkQ63P/XlG2vhnvgOtmjF3nAypre3tzfWOxEN1Y42zH7jb7HeDYoxQ2I8/njXVKyaPC4q7THuCGDcUWww7ihWoh17w8WIn14iFJlScHzNHRzmGMVi0Qkw7ohxR7HAuKNYYcId3KipdCs98skZvHKmIda7QVFiSIzHqsnj8NQdlpgeDBh3owvjjmKBcUexMlRibygblUk3ANS1dmLfd1dRXufQZKkjii1DYjyKTCm4M8eAVbeOQ5Eppe8XRQHjbmRj3FEsMO4oVoZq7A1VozbpHo3GjBkDAOBHTtHEuKNYYNxRLDDuKJRRM6ebiIiIiChWmHQTEREREWmMSTcRERERkcaYdBMRERERaYxJNxERERGRxph0ExERERFpjEk3EREREZHGmHQTEREREWmMSTcRERERkcaYdBMRERERaYxJNxERERGRxph0ExERERFpjEk3EREREZHGmHQTEREREWmMSTcRERERkcbiY70DFD379u2L9S4QERERjUpjent7e2O9E7Hg6vLglTMNKK9zwNXlQV1rJ1xdnljvFg0SS6oORaZUzDIm49FZuTAkDo3zS1eXB/vOXUX5OSfqWt2jI+4ufef9/4RbY7sfGjMkxsMwNh5FplSsnGzEw1NzYr1LEuNu5GLcDTGjJO6AoR17Q9WoS7pdXR48fbQOr5xpGPlffpIenZWL5xfmx6x9xt3oxLijWGDcUazEOvaGulGVdFc72rD63ROoa+2M9a5QDFhSddh7TwGKTClRbbeutROPfHIa+y+6otouDQ1FphTsvacAllRdVNtl3I1ujDuKlVjF3nAwapLuakcblpQf51n3KGdIjMenK2dHLfGua+3Ekn3HeaI3yhkS43F8zR1ROwgx7ghg3FHsRDv2hotRsXqJq8uDRz45zYSb4OryRO3kS8QdD0Dk6vJgyb7jUWuLcUcA445iJ5qxN5yMiqT7ha8uoNrRFuvdoCHC1eXBY5/Xat7OC19d4BArSXWtnfjXI+c0b4dxR0qMO4qVaMXecDLik25XlwfbT9hjvRs0xLxypkHTigzjjgLZfsLOuKOoY9xRrGgde8PNiE+6efU0BbPvu6vabfvcVcYd+XF1eRh3FHWMO4oVrWNvuBnxSfdnl5pjvQs0RGkZG+XnnJptm4Y3xh3FAuOOYoV52E0jPumudrTGehdoiNp/qUmzbTPuKBjGHcUC445iRcvYG25GfNLtus4hLwpMy+FQxh0Fw7ijWGDcUaxw6tFNIz/p5odNMcC4o1hg3FEsMO6IwjPik24iIiIiolhj0k1EREREpDEm3UREREREGouP9Q6Mdr0bl/TrdVsP2rCjRrubEZSvKECZxYSKOgdWvnNCs3aIiIiIRgMm3USj0NkHimFN10f8Oq1PwrYUmrG9xApbsxtTXqvSrB2KDXEyH6loxIMogIx56VNN26HoE/1Kf2gdD6Iv1rqQRkMDk+4YC/aFFgcAfhGJiIiIhj8m3RQQp5SMbMGqhqLqwmlFpIVgMSUq4BzhIC3sqLEHLF4pK+Ac4aBoYNJNRENGsIMjkdaYdFEs8CRzdGHSPcwp54MtNWdgmTkDSfFxsLd14fFDtXi99goA7xn9hmk5mJicCKMuAQDQ4elBbbMb5ecc2HbknGq7wS6kVM57fHXZdCyeYIA5JREAYG/rwr5zV7G50haNt05EREQ0bDDpHiE2FZhhTdfD3taFiz3XMTF5LKoaWwAAh++dg+LsNACAs7MbtmY3AMCarkehMQWFxhTMMqVENJ1AbFNsLzMxHuaURGwqMGNaRjKWVVQP/pukIUN5UvaVow0bZ06EUZcAZ2c3dtsa5YlXaY4Bv1twG8zJifLkDPBeGPdFYwt+/tEp1XaDXUipPLk06RKwcrIJhcYUAN6Y/vxyM7ZU2lDf2hmFd0+xooyPJ7/4Ds/+8FZY0/Xo8PTg8OUW2e/kpeqwo9SKaRnJqguG7W1d+MbVgQ2fnvGLlUAXUirj/P+cbcSjs3JRaExGUnwcOjw9qHG241eHvkVlgysK755iScSH5dXD2LN8hjym1jjbUPbOCRlPO0utuHuSEROTxyIpPg7AzePuC19dkIUwIdCFlMo4/1FFNV5eMhVFphRZMKtxtuHl0w0cFRyGmHSPENZ0Pf5ytlEmMXmpOtS3dmJLoVl2Dr4XZeal6lCxogCFxhQsM2dE1F5xdhpePGFXVbXFAWqpOQOlOQYeiEaBaRnJKLOY5EFlYvJYeVK3s9SKTQVmAN5RFfH4xOSxsKbrYU3X44fZaRENr66zZqM4O01uTx93C8wpiSizmHC7KRW5fz40+G+Shhx93C3YdddUAJAn/e2eHgDA2vws7Lprqkx4RNyJwoA5JRGn1s3D9N1Hwj5Js6Tq5DaVRYvi7DS895NC/Pi/atjfjRIflhXBmq6X/Y8+Pg71rZ3IS9Xh4OrbVSO/F3uuyz7KqEvAnuUzkJ00NuxkWR93i9ymva1L9rGFxhRsL7HCpEvwG6WmoY1J9wjR4elRVQ3FwWSpOUMmRL5f9PrWTvz2WD32LJ+BpPg4maiHo6LO4TeNZOU7J9D+y0VIio/DmvxxPAiNAtZ0PWqcbZj1+t8A3DzZy0vVYf20HACBlxkUJ2jWdD3W5mf5VX+CKc5OQ1VjC+a/dUw+JqpC5pRE7Cy1cnrTKGBOSYSzsxtz3jwq+6y8VB0A4Nkf3oqk+Di/CiRwM1aS4uPwmx/e6jfSEkyhMQX2ti5M33szUS/NMWDvPTNh1CXgdwtuU8UkjVzWdD3u/+Br2WeJuNtRapVxufrdk6rjnzJWNkzLCTvpNqckosPT49eeKJZtnDmRSfcwwztSjhAX268HfHzlOydg2nUw6AFBmeysnBz++rkf25tC7sekGx0RjXy/PVYv/y0Skg3TcnCt0wNnZ3fAaUsr3zmBjhuVyRmZyWG3ZWt2+8Xyjho7apxtAIC5WWkR7z8NT++ev6ZKqOtbO1GaY5A/bzrgP91IGSs5SWPDbqvD04OSvV+qtlfZ4MK7568BQL/WvKfhqcbZpjpuipiwpOrQ4enBblujX8FJGSuZiQkRtfcvVd/5tSf6XKMuQRXzNPSx0j1CnG5qD+t5Wwq9w/1LzRnI0o/t98Gi/JyjX6+jkSdQlXrbkXN9VmAutl+HNV2PWaaUsNsKFud1rZ0oNN6c80gj39+utPg9Vtng6nO6koiVSAoDF9uvBxwF/NuVFjw4JZtxN4rUBRkNFqN9wYhYUV7b8n/Zu/vgJs48X/RfBxtL+EUykm0cZFskFgMBGxt7GBMsEiCTnHBrTRjuhrDMbM3APVP3csFkarNza1ObzG6yNeeeOTmV4WWyVXMrZGqyuSR7KsPYt06ymUlCgp3Yw0AwdhIIYhIDIsYg2bKxLRnL8f1DPE23uiVLtlryy/dTRWG99fOo9eunf/3000/HQqtX/I2L1/H6wysAAFX52TyrPIMw6Z4DGips0oWW4byB0UktkxesEXBnvOxEtpcVoHDBfHy7IBdFC+bjW+YFce98AOCsZzDuz9DsFMspemeRGVX52XCYjCjJMagurIzVZbZ3dFuks7xypTkGbFlihdWQgVXWbNhzDCibRNy5B0cmU0Waxph0z3LhF7Nd7Pej62YAl28G0NLdjzcuXpeuyiZKJDFzibiQV244OIbh4Jh0sVusPJM8SKS5Q8xcIqZPDecNjMbdMy0u0iSK5pDTgR0O7TMf3sBo3O2df+ybRFWNpgkm3bOcuJgt/OIzYXtZQbKrRHOA1gwS5/qGMHBrDH++PoCDHW5pqiyiRNGaQeIL3zCGgmN4z92Hxq88OOh0oN4e+/UrRLHQmpr3uv8WznoG8ccrfajKz5bufklzF5PuWU4kPUddPZqv/83SwmRWh+aIp6tLpZs0hV+AJizOiv1CNqJY/Mt37pFmfNj1/nnN6w3svMibEmx7WYGUcIdPpSv8eMXdya4WTUOcvWSWEzNE7L7d4y2U5hikaduIEs14+2DvE89NzYS7ocIW96lWoonkzg/F1NWhW5oJd+kkx9YSRVMomwkn0pSl39EYZkdzD5PuWe7IuW4AoXlmh368Hhd21uLCzlp0/WAt6u1WuPr9k76YkigS/+2DvXWLTNI8tsIhpwP/pfaeVFSLZrmBW6G4E/O/yzVU2NCydTUP9ijheobvTNn76kP3KV5zFplxdvu3OZSOAHB4yay3r9mFvpEgnnAUSncBDN2++M5tZFu3VcNiyMAORyFvK0sJ8fPTl3Bk4zJYDBno+sFaxd0oF6TPgzcwKk0ZSJQo//inL/Hg3WbYsjPx+sMr8ML9ZfCPfYOFmemwGDKktq/CEvs0lUQTeePidTy5qhi1hbn4/tJCPFqyEL0jQelulEDouiqti8ppbmHSPU2lvXQ8pvfFcgvtieZM1rrAUuuGJrHUK55betPsJU7tP11dirLbB3tA6ILKdy57sa/ZhUNOBxzlNqxbZEplVWkWuXQzgLpjn+D1h1fAYTIqLqhs6vKgodmFkmwDTmytgsNkhLPIzDmOKSHWvnkajZvLsdqaI9323RsYRVvPAH768V/Q3O2DZ1cdLIYM3jl3DksbHx8fT3Ul9BRr8kpzk17TJTLuKBrGHaUC445ShVMTh3BMNxERERGRzph0ExERERHpjEk3EREREZHOmHQTEREREemMSTcRERERkc6YdBMRERER6YxJNxERERGRzph0T5FnVx3G92yQ/jVuLo/5s42by3FhZy2A0K2xPbvqYvrchZ21cZUzFQ0VNl3m15Qv88LOWjRU2DTf4ywyJ7xsIiIiomRj0j1JziIzxvdswFFXD9JeOi79q7db0bqtOtXVm9YaKmzSbcEBwGEyqm4/z4n0o2vdVq042BMHb7GQH0iJOI7l4EZ+kJgM43s2aB6MTYX8AK9xc7ni4FW+PmM9AJ5rDjkdivUU73Yq/009u+pwyOmY8DN6HfhHokenhjzWGipsiu2ocXM5424Cop2S/4unbZD/pq3bqmPeR+vRBkWiR/saHmuRtqMLO2uZtyQJbwM/ScceXYmmLo/qVq7rj53Bia1VaKiwSYmkPNDbegY0b7sernVbNWoLc6XH3sAorEdaFO+RL3f9sTPS7YydRWac2FolvdbU5ZFu6y42LLHs9cfOoCo/GwfqlDu//S2h7yWeH9+zAftbXDjY4Ubj5nLU262qskW5bT0DqC3MjfhdHSYjvIFRqa7ib/n3FssgNc+uOrj6/Yq7wF3YWQvPrjpVjJCS/ABveV4WDneG/vbsqlNsJxd21uLCzlosfa0tZXWdblq3VcNhMiriTiSM8vaH1OSxtsmWh3N9QwBCBzH1dqu0Tlu3VTPuwhxyOrC33KaIsYYKGw7UOeAwGXk79SjksRbe2SUccjoU+2TSF5PuSXAWmWExZOCFM1dUrzV3+xQ7pfE9GxQ7czEERTzW0ri5XLFzE8ms/HP1dqsiCT6xtQppLx2X3nu40419zS7p8SGnQ2qcagtzFYnygTqHtCwglHA8W2OXErgDdQ6pLiLhlj8WZctp3RY4/EBCftAwvmcD0l46juv+W4rvQUqt26rROxJUHcwsfa0Nnl11ihgJX9+x3KpZ7Mzk5LEBhOLDYTICgBRnWq/JDxQPOR14pMSChZnpsBgypM+F97yIbUU8f6DOgU22PGx5q1NVN3nZ4kCktjBX8wAVUB/gLcxMx5kbg9L2LN8mD3e6VethLjvkdKC2MFcVQ1ve6kTrtmq8vHGZlCiKJEkIj59IIsWCID/YDz+gj7cjQB6nAODq92Ppa23S8w6TUZH8Ruo40erE0Dr4ELEGAAXG+Th1fQAA8EiJBU1dHul9R109jDsZZ5EZe8tt2N/iUqzXgx1uOExG7C23SW1AtM6maCLFgrDJlif9JuFtS7xtUnicAqF2Wf682BeG1y2W9jScPNYiJdZ7y7WTcdIHk+5JqMrPBoAJe3YOOR3wBkZVO3P5DklLeEPR3O1TbSxtPQPSjmzLW53SqdqSHANc/X5pA2zu9qGpy4MdjkLpOVe/X6p7+EECAJzrG8K6RSbNuq1bZJJ6bORlN24ulw5Cjrp6ND8rdlSeXXXY+vanaO72oXVbNU5dH5DqFksjOZc5TMaI61e+MwjvlWzdVj1hT3i0AzDx2GEyoqnLg6WvtUk7HFe/Hwc73NJpTPkOQ564OExG1U5JvmMUOx5nkRlpLx1XnF0RZYU/FmWL5WsdWIQngfIE6sTWKuxvcak+x54fpZqCUMKqRZ78hvdKHnI6cKDOgTM3BqO2l9FiQVielyX9TvLOi3g7AkSiLB6LWDrkdEiJ97m+IUVHSbSOE3knhlx4Eij/u7YwFyU5BlWP9g5HIRMgmcfL8uENjGoetO1rdqkS7midTVqixYL4nDy25G1avG1S+FkNIBRLrduqsfbN02jcXI7leVlSTMTbnsp5dtXBYsgAEIo1efsnX0brtmoc7nTjkRJLxHVEicUx3ToqyTGgdySoeO7fL94AgJjG0MrHT4oNSBBHr0LvSBAlOQYUGOerkoX33H2Kz0dKJkRZ4UfichZDhmqn4Or3o8A4X3osenSiLUPsoLSWR5HFur5qC3Px3Kku6fHaN0/DYsiIOoZWHIDJd3Di1KQgP4g82OFGW88AdjgK4Swyw2EyYvf756X37n7/PBwmo2JMpHwHYT3SojjIes/dF7FuOxyFigNNUbZ8Z/LRtX7Nz+5rDiXVbT0DONzpRtpLx3G4M/T58O8rLy/S8uYiiyED1/23Jnyf6LkV2/e+Zhdc/X784v57o34ulliQx9bhTrfUMaDVEeANjCrGZcsPVNe+eVqR7EbrhY/UcSJvI+WdGHJie5LH2vpjZ+ANjCLtpeOqXvzxPRtQW5ir+J5zndY+VMtTVcURO5uiiSUW5LEl2jRnkTnuNkm0Q3KR2vLJtKdy1iMtilhLe+k4vIFRrD92Rvq+DRU2WAwZHJ6TZOzpngSRVDqLzJqNbaTnYyXvmRM9KHpeYCN6/kRvTuPm8og93VMhH+4g7208UOdQDGGhqREHdOEHP65+v3QmZCLy30d+kBb+2ev+W1iel6V59kecoZGfHtUSfnpXi8WQoToAOHV9QLFTvXwzMOEyRP1LcgwRk0jPrjr0jgR51mUSFmamq36Hc31DWJ6XFdPnI8WCNzCqiC1Xv1/qSJhsR4DWqf5wkTpO9pbbpO1sojMi8lirys/WTCJFrIke2liH5FBIpM6miX5fIVosiI4y4E77VpWfPek2KXxIilb8TKU9lS9DHmvyzi4AeLbGjq1vfxp1GZR4TLonobnbB1e/H09VFWsm1ye2VqGpy4PLNwOq5PXxsnxpGU9VFWsuX/QWRdvpl+QYFI/lO7vwHdwmW17EDVT05MR6AZ58oxccJmNMvYLiFBoAaYzu3nIbLxqKg9b6F6Z6sAck7wAMuJNgiYtCtcaTJ4L8IFYc4AnyISzicfiYTgrFnTyJlUtE3CUrFgBlgiUfrpJo4cNLwq9h0Wrjxb5lky2PSTeguQ+Vm2rsJSsWAGWyLdocvWaDkg8vCY87V78f5/qG8NG1fl78nAIcXjJJ4hRj+NRSIqi3vNWJfc0uWAwZivfsLbcpLpyJRL6Da91WrRpeIh/v2Li5XDpN9MKZK3CYjNIwAmeRGfV2a8RxwAAUy26osEXtHfjoWr/i9JkoO9ZewQLjfOnggONm4/fRtf6I4+9e3rgMF3bWKnpj5BwmY9TeYHEAFn7qWy48DkUPk/zsjyAuUNTqWRenT+WnO6PRSvpqCnJjOvW8r9mF/S0uKaETO1fxtzzhbusZYMKt4airJ+JsQk9VFUs7djHMTW55XlbU7TyWWAiPO3nbEakjINKZDDEcJZYza5dvBrAwU9k3Je84iUZ+vYyINVe/X7qGQH4tDmkT+1CtafsaKmzSTGHX/bdUMRKts0mIJRbk7aj8LGK8bZIYjhJpSJtcvO1pOOuRFkWsNXV50NTlQdpLx7H0tTasW2RCvd0qDSl1mIyoLczllJVJwKR7kg52uKV5ueVzh4bvtMPfE8sV1Utfa0NtYa70mev+W2jrGVD0YDd1eXBia5U0Bls0Gs3dPqw/dgZ7y0Nz28ovLtGyr9mFtp4Bqaxna+zY3xJq6JxFZhzscMMbGJXmK93yVieaujyK8d/xDAtxmIzS6bqaglzV2HSKbstbndLMCnKip1DEXlvPAJ6tsUuvt26rhjcwOuH4vYkOwOQHdA0VNtQW5uKoq0fqoXt54zLpvS9vXKa4qEiLfIcWrWdTJH1i5yvKlo+3jCbaNJVAqGco1uk856KDHW64+v2qXkDRUyimGH3nslfRISCmI/vpx3+ZsIyJYkE+j/Decpt0dm0yHQHyA4NovY1T6TgBIs+YI4TXXawvrZmx5qqmLg8O1DkUibfoNW7q8uBgh3tSnU3CRLEgb0dFm9bc7ZtUmyRvX8UsZVom257Khc+YI+9wsR5pUdxfxNXvR1vPAKecTQIOL5miWBLOSO+R7xTkV2JPtNyJeuK0ZiQRtJIKrefkG3b4hhhpZxatXK1lRUtwYlnWXJX20nFc2FmrGnctX19r3zwt3UBHvD5Rg7qv2YWaglzFZ/a3uHCgziElUa5+Px4psWB8T2hHIx+aIWZ+EJ+PNkyjudsnTcsnEiwxx/3jZflo7vahrWdAMWUgoBweEs+4V/kB3uNl+YreokNOByyGDFgMGaqkkvNP37H0tTbpgj85+ToSbZh8WMVE63CiWBC/1XX/LdXwJwDSMCh5vaK1HVvf/lTqsABuXyhXYkFNQagn/53LXuwtt0mzPIiZdLTKnoi8/mIbkq8LMeVipHsu0J2hiOFDw+SdSaKz6cTWKukgJlpnkzBRLAChAyOtNk20PbG2SWvfPC3dwRoIdYw0dXmk4TMvnLki1UX0SMfanoYLj7VYD3xJf2nj4+Pjqa6Enpi4UTR6jd9j3FE0jDtKBcYdpQrvMh0y64eXmDPZmU9EREREqTX7k+75TLpJm54HZDzYo1Rg3FEqMO6IYjPrk257rmHiN9GcZM/RLzZ4sEeRMO4oFRh3lCp6xt5MM+uT7i0xTo5Pc4+esfHg4jzdlk0zW6U1R7dlM+4oEsYwc97hAAAgAElEQVQdpYqesTfTzPqk+4fLilJdBZqmfrhcv9h44G59bihDM98qS2x3Z5wMxh1FwrijVNEz9maaWZ90mzPTmXiTypOrinU95fXYknzdlk0zlzkzXdeDPcYdaWHcUaroHXszzaxPugHgxXVlHFNEEnuOAT+T3fBAD+bMdN3LoJnnh8uK9B1by7gjDYw7ShW9Y2+mmRNJtzkzHcceLZ/4jTTrmTPT8WJdWVKutv+nNUtQac2e+I00JyTjYA9g3JES445SJVmxN5PMiaQbACqt2Tjz+Ld5xDWHmTPT8crGZUk9FXrs0XJOp0XSgX+yYoFxRwDjjlIn2bE3U8z6O1Jq+dH75/Gb892prgYliTkzHY8tycfPvm1PyUFX180Atr7diXbPYNLLptSrtGbj2KPlSY89xt3cxrijVElV7M0EczLpBkINw++/vIHGLg8+uOpLdXUowcyZ6ai0ZuOBIjMeuyd/Wpz2/KeTX+GfT3WluhqUJObMdOwvt+HJVcUp7e1h3M0tjDtKlekSe9PZnE2656K0tDQAAH/y1Prl2Sto7PKg3TMI30gw1dWhBJIf7P1w+fS6gIhxN3sx7ihVpnPsTUdMuucQJt2UCow7SgXGHaUC446imTMXUhIRERERpQqTbiIiIiIinTHpJiIiIiLSGZNuIiIiIiKdMekmIiIiItIZk24iIiIiIp0x6SYiIiIi0hmTbiIiIiIinTHpJiIiIiLSGZNuIiIiIiKdMekmIiIiItIZk24iIiIiIp0x6SYiIiIi0hmTbiIiIiIinaWNj4+Pp7oSqeAbCeI357vR2OWBbySIrpsB+EaCqa6Wvjo+Cv1fsS619UgCe44BldYcrLJk4clVxTBnpqe6SgDmaNwdfzP0/4Ztqa1HEjDuphHGXcox7ma/6Rp709WcS7p9I0H886ku/OZ89+zf+Eny5KpivLiuLGXlM+7mJsYdpQLjjlIl1bE33c2ppLvdM4itb3ei62Yg1VWhFLDnGHDs0XJUWrOTWi7jbm5j3FEqMO4oVVIVezPBnEm62z2D2NB4hkfdc5w5Mx3Ht1QlrTFg3BHAuKPUYNxRqiQ79maKOXEhpW8kiB+9f44NAcE3EkzaToFxRwLjjlKBcUepkszYm0nmRNL9y7NX0O4ZTHU1aJrwjQTxk48u6l4O447kGHeUCow7SpVkxd5MMuuTbt9IEAc63QlbXuPmcozv2YCGClvCljlZDRU2jO/ZgMbN5amuyoQu7KzF+J4NSf9sJL85363rmMNEx910+63H92zAhZ21qa7GhKayveqxrc+0uAOm12+tR1ugh6lsr3ps6zMl7mbC7zudcoCJTGXbTdRvoXfszTSzPunm1dMUye+/vKHbshl3FAnjjlKBcUepomfszTSzPun+8Ov+VFeBpik9Y4NxR5Ew7igVGHeUKoyPO2Z90t3uuZnqKtA09cHXfbotm3FHkTDuKBUYd5QqesbeTDPrbx3ku5WcU16Nm8uxbpEJFkMGAKDDO4ifn76ENy5eV7xve1kBnq4uRYUlNI2ONzCKj671Y8tbndJ7SnMMOOh0YLU1B7bsTOl9rn4/nvjDZ7gU5/goMabrmT99iRfuL4MtOxPDwTG0XhvAQ03teG7NEuxZuRgWQwa8gVG8fbkXP3j387jrrbUuxPK0lOYY8PrDK1BhycKC9HlSnXYfPx/3d5wMPU+HJivunluzBD9aViTFiXtwBK+c78azJ79SvE9rXXd4h1TxdMjpwCMlFjhMRgDAcHAMF/v9mrE8kcbN5ai3W2F/tRVNm8ul2OnwDqL+rU7UFubi+e/cA4fJGPG3j7Xe4etCLC9a3cK3170nXGju9sX1HSdjNsTdZNsDQLttbKiwYffyIpSZjFiQPg8A4Or343VXjyqWJ9JQYcOBOgf2t7hQb7di7aJcLEifB/fgCJ76+CLaegbw+sMrUFuYK9VH67efbJve4R3Eia+140hre/1v7ZdxsCOx4/C1zIa4C8f97p26y9tJEVdaUrHf5dCjO2Z/0p2EH/vK394PW3YmXP1+fHStH1np87B2US5ef3gFChfMlxpUsTMYDo6hrWcA1/23sDwvC/V2K96tr8RDTe0AgD/WV8JhMqLDO4hPbvcgrFtkQm1haJlr3zwddx0XZqbjyMZl6PAO4RPPTay25mCTLQ9nt38bZSYjWq8NYCg4hodsefj+0kL8+fqAVO/n1izBMzV2DAfH8J67D0PBManeF3bWYulrbVI579ZXYpMtD97AKJq6PCgwzsf3lxaq6lOaY0DL1tXSejvXN4QC43xssuWhZetqFP/247i/43SSjLgLX9dAKE6eqbFjlTVbaphLcwz4fMcaLEifhw7vILpuBlBgnI/awlzFuhZJsntwRFre8rwsVFiy8av1S+NOuoWWravhH/sGTV0e2HMMqLBk44/1lVicNR8d3iGc6xuS4vGg0yHV21lkxn/8VQUWpM+TthdR7893rMF9R09KO4nwGAUgJVvhWrdVo7YwV1pvYnv9j7+qwH/6/zqSknjrJRlxF097cGFnLRwmoxRTYl0f2bgMXw/dQnO3T2oXvYFRqR0Sv/MzNXb88UrfpH6Tv68sgTH9Lrzr7kNW+jxssuXhV+uXwh/8RhWPL29cpqh3rG26s8iMIxuXKWJ03SIT9parL7I75HRgb7lNsd7WLTLhQJ0DVkNG3AcX00kqkirud+/Ea/i+dHleFg7UOVT1me373Zlg1ifdejvkdMCWnYm2ngHFRrm9rABHNi7DszV2aSP6+8oSDAfHsOv984oE5uz2b+Nb5gUozTGgJNuAhZnpquUBgGdXndQ7Ey+LIQP/dqFHOpIuzTGg6wdrUWHJxv4Wl6qB2mTLk57bs3KxZr1F8nLI6cC+ZhecRWZssuXB1e9XNAhimXIHb683eZ0A4NWH7sP3lxbi1YfuUx310x1iXbsHR1B37BMp+RSNar3dCmeRGc3dPry8IZQUHO50Y1+zS1rGqw/dh0dLFqKhwoaDHW4sz8uCNzCqanjF7yzeF6/ekVGseuPP0uMrf3s/HCYjmro8qgOD5XlZ0vt+cf+9mvUWyYt8RyhiVJ6Iyw82hIYKG2oLc9HhHVTUaXtZAV5/eAUOr3conie1WNuD59YsgcNkjNg2/njF3Wju9mGTLQ8AsPXtTxXJtfidn6oqnlTSbUy/C9X/45QUD+IgNbx9EgcG8nJjbdNFjMrbUK1lluYYsGt5EbyBUUWdSnMMOP3XNfi7yuIZnXQnG/e7d7YzsS7k7SlwpxNFjvvd1Jv1Y7r19kiJBQDwxB8+Uzz/xsXraL02AIshAw0VNjiLzLBlZ6LDO6TqMVz1xp9R/NuPcelmAM3dPliPtGgeVfdOsTdBvjGJRt89OKLYWYQnVQ0VNlgMGWi9NqCqt/jOYh38eMXdAIDXXT2K9x3scMM9OKJ4bt0iE4aDY6oN/Afvfg5vYBQP3m2O+/vNJU9VFQMAXjnfrTgleOlmAK+c71a8p9KaDW9gVJG4AqF1bT3SIv3mS19rg/VIi6qs6/5bU6rry+e6FY/9Y98AABpk9bl0M4CrQ8pyagtz4R4cUdV7X7ML7sERaUe4vaxAitHwdRE+xGSHI3TW5eenLymef+PidbT1DKDCko3SHMNkvuacEE978N3ihQCAn378F8X73rh4HVm/PiFt+1ve6kTaS8dVibWr3z+lun50rV8RD0PBMQDq9ulc35DicaxtOgBUWLJUbahWGU9VFmNB+jy8fblXFaNHXT1YkD4Pz61ZMpmvOSdxv3tnHay/va9sCGsnwx8D3O9OB+zpTgD34IjmWKhzfUNSL05VfmhMVjwJTEOFDQ6TESU5BizPy1L0nCSKSIAm8vE19dXHl24G4A2MSo9z54d6FP94RX3RxBe+YWmcHABpHFukuWjl76XIwhNaILT+n6mxS48thoy4EpjtZQVYsTALq6zZsOcYUKZD3AGIafzgF75hzefdQyNSjBQumA9AnTwBobgV2yAAaezn3ywtxN+EDXsSr21ZYk3KGNuZLJb2QKzPWHupnUVmVOVnY5MtDwXG+bq0dwDgkdUxkljadAChIVtD6rh7+Vy3YhssuX0gV2Yyqtq8AmMoflfxdtlx4X43xJg+D97AqGpdXLoZUHV2cb+beky6pyFxGlTwBkZxdWgE3sCotCObaUQvk5zFkKE6/UWpc8jpwK7lRdJwjOHgGK4O3cLVoVu6JUB6i5RgMe6mj+1lBfjV+qWKts3V78fVoZEZ295FOqCc7DAF0t9s3O9qJffc76YWk+4EsGVnojTHoGpo5eNTz9wI3R5X9GrIvfrQffjePVbsev886opM2GTLQ1vPAH559ori1NKFnbUp2/jvX2RSPVeaY4DFkCGdfhu4FUqsv1ucp+rdkq8L4E5CJx9bSfHbvbxINRb0u8V5isfewCgWZqo3dWkcc6cb/37xBvaW2+AeHME/tH+p6Olt3FyesqT7W+YFms/bsu70yPQMh3qxwmMMgGInCgD+2wd/aS8dT1QV56RY2gNvYBQOk1G6tkBu6Mfr0eEdwto3T+NX65fCmH4Xnj/VhZfP3RkupXUtSLLE0qYDoXZMHotC+N0KRdsYPvabJo/73dB25g+OwWIyaq4Lh8moOMvJ/W7qcUz3FL1z2QsAeP3hFYrnt5cVYO2i0AwJBzvcaO72wT04ggpLFraXFUjvK80xSOOo3rh4XToN+ccrvYoNf3tZQUoSn4MdbngDo1i7KFdRb+DOdxbr4NeffQ0A+NGyIsW4WK26d3iH4DAZVTun7WUFGPrx+mlz2+np6oUzVwCo13VpjgE/WlakeE+7ZxAWQwYOOZUJzJOrQmO+W7r7pdOwX/iGFUlBaY4B6zQa/mRo6xmALTtTVW/5RVRAaLvRilGtuoup3MJPr5bmGHDlb+/H0I/Xc0x3FPG0B3+8Epoq9Bf336t43yGnAwvS5+Hi7WTAYshAbyCIZ09+pUgadi8v0u17RBNrmw6E2jGtGA2vu2gbtWY1ad1WPWNuKz5dcL97Zx2INi18Xbz60H2q5XK/m3rs6Z6ifc0uPLYkH7WFubiwsxbn+oakqYsWpM/Df2+/Ir33qY8v4sjGZTiycRn+8313S1NGWQwZONwZasTPegZRb7fi7yqLcf8iE4aCY9K0VsPBMc0p0PT20qdX8UyNXVFvMdZNfqFbc7cPTV0e1NutOP3XNfjoWr809Vd43Z/4w2do2boaB+oc2L28CF03A4r1Fn4hEik1d/vwnrsPm2x50roGIMXTe+4706ztPn4eLVtXY2+5DTUFudKUWWJmiTcuXoezyIzh4Bg22fLQuq1amp6vwqLuPU6Wn378F/zHX1Uo6i2PJ/kFes+d6sKBOocUo0BoysBw+5pdqCnIRb3diit/e79iajCx3pIxR/xMFmt78OzJr/Dd4oWKtlH8fu7BEfzjn74EEBqba8vOVLSfldZsGNNT0ycUT5v+xB8+w+c71ihidN0ik6ruzd0+HO50Y2+5DZ5ddWj3DKrWG3vA74g05vjyzQD2Nbu435VtZ/uaXXikxKJYF+J9w2HDOrnfTT32dCdA8W8/RlOXBwsz01Fvt2KTLQ8Xb0+oLz/1/8bF69j1/nlc7Pdjky0P9XYr/MFv8PypLsWO6nCnG/7gN9J7jOnz8PypLvzuy9Dcycm+yv3Zk1/hiT98pqj3wsx0NHV5VNPLbXmrE8+f6oI/+A3q7VZUWLLQ1OXBu27lxZWXbgZQd+wTtPUMYHFWpmK97W9xcfqsGDzU1K5Y1/J4EnPPAsp1XWHJUvx+4mr95m4fdr1/XpoVpN5uhS0rE++6+7Dr/fMAkj8Ournbh/uOnlTUu8KShbaeAdx39KRiyMLBDrciRjfZ8tDhHVLsfIW1b56W5iGXr7fDnW7FeiNt8bQHYl2LtlH8fvJpLuuOfYIO7yAcJiPq7VZUWrPh6vfjvqMnMRwcw2prTtK/Y6xt+qWbAVWM9o4EpW1Gbl+zC8+f6kLvSFC13uqOfZLMrzftie0y/J+YsQPgfldu6WttinWxMDMdz5/qUs0Ixf1u6qWNj4+Pp7oSeuLYTYpmfM8GXZbLuKNoGHeUCow7ShW9Ym+mYU83EREREZHOmHQTEREREemMSTcRERERkc6YdBMRERER6YxJNxERERGRzph0ExERERHpjEk3EREREZHOmHQTEREREemMSfc011Bhw/ieDYrb4o7v2YALO2sn/Gzj5nKM79mAhgpbwuoTa9k084X/1hd21sZ0gwOtmJ2qWMummS/8t46nHUt0+6RHG0rTU/hvHU87luj2SY82lKYHJt2kqTTHgMbN5Xj1oftSXRWaYw45HWjdVp3qatAc01BhY4cCJZ2zyIzWbdU8sJsj0lNdAYpfMm65u2WJFfV2K5q6PEkvm6anpa+1JaWcveU2uPr9KSmbpp8tb3UmpZy95TY4TMaUlE3Tz8EONw52uHUv56mqYtQW5uKoqyfpZVPysaebiIiIiEhn7OlOoENOB/aW2/BvF3rwg3c/13ztcKcb+5pdKM0x4KDTgdXWHNiyMwEA3sAoXP1+PPGHz3DpZiBiOeN7NsDV71f0/j23Zgl+tKwItuxMDAfH0HptQPOzsZTbuLkc9XYrAKDebsX4ng3Y3+LCwQ63ZtnbywrwdHUpKizZ0vI+utav6iUa37MBTV0enPUMYs/KxbAYMqS67j5+Pup3psicRWac2FqFDu8gVr3xZ83X2noGsPbN0wBCsfhIiUXq1RsOjuFivx8/P30Jb1y8HrGcCztr4TAZFWc7wn/7Du8gTnzt0/z8ROU2VNhwoM4BAHCYjFK8bHmrU7Ps0hwDXn94BSosWViQPg/DwTF0eIdU248YMvDMn77E89+5Ryq/wzuIvSdcaO7Wri9NzLOrDsb0u5D16xOarwGA9UgLgNDwjd3Li1BmMmJB+jwAgKvfj9ddPXj25FcRyxDtkWiDAPVv7x4cwX9rv6z5+VjKlY/HlbdxWmWLOq1bZILFkAEgFEvh24/4rP3VVrz+8ArUFuYCgFRX9mROXuu2atQW5uKJP3ymarPEa+uPnUFztw/OIjN+cf+9cJiM0u/lHhzBJ56bUc9kiPZItEGC/Lf3Bkbx9uVezc/HUq5o1wDgQJ0DB+ocSHvpeMSy5ft5sbxXzncrth/x2f0tLmyy5eEhWx4WpM+T6hqem1Bysac7gfY1uzAcHMODd5tVr62/24zh4Bj2NbsAAH+sr0S93YrekVE0dXmkYRy1hbl4/eEVcZX73JoleKbGjoWGdLzn7kPrtQGsXZSLTbY81XtjKfc9dx/aekJJu6vfj6YuD87cGIxY9usPr0CZyYj33H1o6vKgdySIertVc3zkamsO/q6yWFquP/gNNtny4v7OdEdztw8d3kFUWLJRmmNQvPbjFXcDAH559gqA0A5jb7kNxnl3Sb//1aFbqLBk41frl8ZVrrPIjCMbl6HCko22ngE0dXmwOCsTe8vVYxNjKffMjUEpHr2BUHy+5+6LWPbnO9agtjAXHd4hNHV50OEdQm1hLj7fsUa1HhZmpuPIxmXwB8fQ1OWBq9+PCks2jj26Mq7vTEpvX+7FgvR5eG7NEsXz28sKYDFkSAmJSAQWZ2Wi9VooVtp6BuAwGfFMjR3OInWbGU3L1tWoLczF1aFboXZk7BvpgE0u1nKbujzwBkalv9+57I1Y9pW/vf92GxqUYrTMZMTrD6/QHJfbsnU1bFmZUtm27EwcqHPE/Z3pDtGe/ef77la9VmHJQod3EM3dPpTmGPAff1WBCkuWtM95z92HhYZ01NutOORUx0w0797efwKQ2pHvLy1UDUuKtdx3LnuloXSiDY1W9jM1dhjT77ShxvS78EyNXfOCy7+vLMG6RSa0XhvAe+4+GNPvwveXFsb9nSmx2NOdYGLH7ywySz1oziKzlJiIxwsz0xW9j4JnV53UIxKrPSsXYzg4hvuOnpR6+EpzDPh8xxqpZyeeckUPTG1hLs71DUXtDRBl73r/vKLHQfQ2HHI6pAMNALBlZyp6J0pzDDj91zWosGTF9Z1J6cTXPlRYsvFUZbFifT9ashDewKi0vpfnZcEbGEXxbz9WfF78Xg0Vtph74H5x/71YkD5P1Qso770RYi23uduH8T0b0DsSjBp3omxx5kgQZ5Ref3iFIsYthgzVGaiz27+NCks2tpcVRO3hp8h+/dnX+P7SQny3eKGit+3JVcXS6wCkDoCtb3+qOLMgfq+nqopjPuNwyOmALTtTswdSJERCrOWKsykWQ0bUuBNlh7eh28sKcGTjMjxbY1dtP70jo4ozUK8+dB++v7QQP15xN8+yTNIbF6/jV+uXotKarXj+kNOBBenzpLNtu5cXAQCOnOtWtBPiDOAjJRYALsTCWWTGJlue6kyv/AydEGu5+5pdaNxsgMNkxFFXT8S2V5TtHhxB3bFPFPv5lq2rUW+3KnIOADCm34Xq/3FKeu/2sgK8/vAKrNfoFKTkYU93gokjcNHDKP9bvNbc7YP1SIsq8QWA3pFgXOWJHqXWawOKU+qXbgZUQ0wSWS4QamxE2eFJyxN/+AwAbjcud7j6/Yr3XroZQO9IUHFwQPETZ1nkDWp4byMQuiBRnO6Xu+6/FXeZFZYsuAdHVDuK12UXBOlRLhA6IHQPjih2aEBoPbgHRzQPXMNPq3bd3l4KF8yfVB1IfpZFedAs720EQhckpr10XJVkhl8wGwsR4w1hv33440SXC9xpz0T7Jrxx8Tparw3AYshQ9Xa/fK5b8fjP10Ptcu58tnlT8fblXlgMGdheViA9F35G+dmTXyHr1ydU7cRkDnbEfjy8fTvY4YZ7cETxXCLLBUIXWwLAK+e7Vfv5V853K94jfHStX/Fesd81cl+bUuzpTjBxBP5oyULpuQfvNit6G+UaKkJXzJfkGLA8L0vVQzgRkTCc6xtSvfbxtX7NISaJKDe8nHCXbgak07WUHOFnWcSpV9HbKLe9rAArFmZhlTUb9hwDyibx+y9In4eOIXXcvXyuG8/U2DU/k4hyhS98w5rPu4dGpDGPpD9xlkWc1XpuzRJFb6Ocs8iMqvxsbLLlocA4f1LtjvH2+NTwa0Au3Qyokp9Eliu4B0c0rz851zcUsb2lxBNnWZ5cVYw3Ll5HaY5BcUZZrjTHgC1LrPh2QS6KFszHt8wL4i5PHCT98Yp6yNsXvmHNNicR5cqFH8CJ+kRqb2n6YdKtg7cv9+L7SwuxvawAXw/dgi07E/92QXl0/G59paKB9gZGcXVoBN7AqHTRxVR5NJLeZJRLqfHLs1fw+sMrpNPWldZsRW8jEDr9umt5kXRmYTg4hqtDt3B16NaUEhE5rYQkGeVSauxrdmHX8iKpB/q7xQsVvY1A6GDrV+uXKtoYV78fV4dGEtru+Me+UTxOVrmUfOFnWZ6qDPX0ijPKQCjp/WN9paKNcQ+OJPzAfCg4pnicrHJp5mHSrQNxBC6/yEPe23jI6cAmWx7aegbwy7NXFD3gYlxhrHqGQ6fnl+epx0SH97oksly5+xeZVM+V5hhgMWRMatgKTY44y/Lg3WY8t2YJLIYMxdyvziIz9pbb4B4cwT+0f6majSHe5Hc4OAZblnoHEn56PdHlCpF6jbTqRPqSn2WpLcxV9Tb+av1SGNPvwvOnuvDyuTunyLXGw07EHxyDxWREaY5BdYDnMBkVQ0cSWa5gy87ULFurDSZ9ibMsz61ZgseW5KvOKL/+8Ao4TEb824Ue/PqzrxUdEPHeQXLgViix/m5xnmqYSPhvn8hy5XYvL1LN9PPdYp5dmUk4plsH4gi80pqt2dtYcntmhT9e6VU0ENvLCuJOQN64eB3ewCjWLspVjG0rzTFgXVgynMhygdBYNq2yAUizkUSbBYAS7+3LvaGLVR2Fqt7GqvzQRUdf+IYVia9WrMSiwzsEW3am6mp4cRGRXuUCkGaBCC9bfqEbJY/oXXx54zLFY8FiyEBvIIhnT36lSFbDYyUWYthK+IxHWnfPTWS5wJ32LLzs7WUFWLsoF97AKKcCTCJxLcsTjkLYsjNV0/eJjqR//NOXin3wZO60LDrOfrSsSDE7ktb+M5HlAsALZ65oll2aY8CPlhUp3kPTG3u6dXLia580ddrRsAsvznoGUW+34u8qi3H/IhOGgmOw3x6PNhwci/uiwudOdeFAnQNHNi6TetfXLlJfSBZPuY1feXCgLjSfd+Pmcrxw5ormRSAvfXoVz9TYpbKHgmPSGHGtC91IX+Isi8NkVCWeZ24MYjg4hk22PLRuq8Z1/y0UGOdPeuaYJ/7wGT7fsQZ7y22oKcjFdf8trFtkgjFdeSwfb7nuwREszpqPxs3leM/dp5nE/PTjv+A//qpCUXaBcT5qC3MxHBzDTz/+y6S+E02OOMviMBk1r19xD4ZOq1/YWYtzfUPISp+HSmu2KlZisa/ZhUdKLKgtzJWWJ9qc4bDT/PGUe/lmAA6TEe/WV+Jc35Bm27Wv2YXHluQrys5Kn4e1i3KxIH0e/ns7E59kE2dZAPX1K+I3Pf3XNfjo9rVH4h4V4bEykeZuH5q6PKi3W6Xlydsc+f4znnLFfnn38iJssuVpzp7T3O3De+4+bLLlKZYp5gt/z93HmXBmCPZ060QcgYf3NgKhK5sPd7qlOarr7VYY0+fh+VNd+N2XoXk6w+e9jeZghxtP/OEzXOz3Y5MtD5tseejwDql2APGUe+lmAO+5+2DLzkS93Sr1VoZ79uRXirLr7VYszExHU5dHNT0c6U+cZQHUvY3N3T7sev+8NLtHvd0KW1Ym3nX3Ydf75wFANeVaNJduBnDf0ZNo6xlAhSVLmrtYLGuy5f7+qxvSc5EuTGvu9qnKrrBkoa1nAPcdPckdUAqIXkatm4XUHfsEHd5BOExG1NutqLRmw9Xvx31HT2I4OIbV1py4ylr6WhuaujxYmJkutTnPn+rC1SHlbDjxlPv/fP41vIFRbLLlqWZdkg/vrDUAACAASURBVCv+7ceKsjfZ8nDx9s3Fot3kh/Qh2rnwM8oA8FBTuzRHdb3diodseegdGZX2WY7bw5RiteWtTjx/qgv+4DdSm9PU5cG7YfcTiKfcl891S/cNiNb+PtTUrii73m6FP/gNnj/VhYea2mP+DpRaaePj4+OproSe5HewIwo3lfF10TDuKBrGHaUC445SRa/Ym2nY001EREREpDMm3UREREREOmPSTURERESkMybdREREREQ6Y9JNRERERKQzJt1ERERERDpj0k1EREREpDMm3VPk2VWH8T0bpH+Nm8tj/mzj5nJc2FkLIHT7as+uupg+d2FnbVzlTEVDhU2X+TXly7ywsxYNFTbN9ziLzAkvezZo3VatiDsRR7GQ/6bOInPM61ker8kwvmeDZlxMhTzWGjeXK7Yj+fqMdVucaw45HYr1FG/bIP9NPbvqcMjpmPAzerVBkejRvspjraHCptiOGjeXM+4mINop+b942gb5b9q6rRqt26pj+pwebVAkerSv4bEWaTu6sLM25nVCU8Oke5JEI3DU1YO0l45L/+rtVgbvBBoqbHD1+6XHDpNRdatvTqQfmdgxy+NO/jxFJo+15XlZeO/2neQ8u+rQ1OWR1mfvSDCpBxgzQeu2auxwFCrirqnLw4PjGMhjbZMtD+f6hgCEDmLq7VZpfbr6/Yy7MIecDpzYWoX1x85I62l/iwsH6hwxHbTNZfJYC9/vCoecDjhMxmRXbc5KT3UFZqpjj65EU5dHdYv39cfO4MTWKjRU2KSdu2dXHSyGDACAq9+Ppa+1Tbj8xs3lqlvCht/1S56Yrj92RnELXPlrbT0DWPvmaQCQDghqC3Olz1XlZ+NAnbLx2t8S+l7i+fE9G7C/xYWDHW5V3UTZziIzTmytQlvPAGoLcxXlyjlMRngDowBCBy/ib1E/8VlRR7qjdVs1ekeCqvW69LU2eHbVoXFzOba81Sm9V74OY7lrXEOFTTMW5AdFF3bWSo304U63YhuQv+YNjMJ6pAVAqGF/pMSChZnpsBgypM+FH1w1dXmw5a1O6fkDdQ5ssuVhy1udqrrJy/bsqoOr34/awlxFuXLhsbYwMx1nbgzCWWSGxZAhrTex7PD1MJcdcjpQW5iriqEtb3WidVs1Xt64TGrXDjkd2Ft+p3cwPH4iiRQLgrzdCW9b4m2T5HEK3GmXxfMOkxEXdtZK3yme9jT8VuTAnVgDgALjfJy6PgAAeKTEgqYuj/S+o64exp2Ms8iMveU27G9xKdbrwQ43HCYj9pbbpDZA/NZCePxEEikWhE22POk3CW9b4m2TIu3X5c+P79kgbWfxtqfh5LEm3+/K7S3XTsZJH7M+6TZnpsM3EkzoMsVO+oUzV1SvNXf7FDsmseGJjcWzqw6t26o1k1FB3vshjO/ZoPhcvd2qSIJPbK2S3j++Z4OiwRHDXsTj2sJcxU7pQJ1DsWO8sLMWz9bYpTofqHNIyxaNg/yxvGxBK8ELTwLlOzLR0Fz330LaS8dVDehMo0fcAaGG86irR/M1+c6gdVs1HCaj9Du0bquGZ1edZjIqRIsF8dhhMqKpy4Olr7VJOxxXvx8HO9xSD518hyFPXBwmo2qnJI9TEVvOIjPSXjquONATZYU/FmWL5WvFXXgSKI+7E1ursL/FpfpcpB3UdKdX3NUUhBJWLfK2TKxr0b4ccjpwoM6BMzcGNZNRIVosCMvzshRtnGjT4m2TRKIsHotYOuR0SIn3ub4hRfsZa3sqF96Gyf+uLcxFSY5B1QGzw1E4IxMgveLu8bJ8eAOjmgdt+5pdqoRbtC/i8SGnQzMZFaLFgvicPLbkbVq8bdJE+/XGzeVYnpclxUS87amcvKOvtjBX0f7Jl9G6rRqHO914pMQScR1RYs364SXm+Yk/rqjKzwaAqDsRILQBWwwZip3Sc6e6UFuYG/V07L5mdRIQ3hC39QxIG/aWtzrhDYzikDPUWHgDo6peO/nRtavfL9VdHCTIGzVxOkrLukUmHO68815RtnwMZKSkcO2bp5H20nF4A6PSqcK2ngEc7nRL3zeWnolEMWfqd8ypR9wBgMWQEdNOubYwF8+d6pIer33zNCyGjKinY2OJBXlsHexwo61nADschXAWmeEwGbH7/fPSe3e/fx4Ok1ExJlK+g7AeaVH83uL0u5YdjkJFzIuy5TuTj671a35WbE/yWDvcGfp8+PeVlxdpeVM1U+Puuv/WhO8TPbeifdnX7IKr349f3H9v1M/FEgvy2Drc6ca6RSYA8bdJa988rUh2o/XCx9ueyontSR5r64+dgTcwirSXjqt68cf3bEBtYa7ieybSTIy7khwDemNI5p+qKoar3y+1L83dPjR1ebDDURj1c7HEgjy2RJvmLDLH3SbFsl8XJtOeylmPtChiTb7fFd9X5CfRDkoSRc/Ym2lm/Zqw5xrQdTOQkrK1essOdoROW4vEPZrwU1fyZYlTRkLvSBAlOQbpb7l/v3gDe8ttUqIfqQdP3gMY6T1aSZ+r348C43zpsTiNGonFkCHtpGJNIvVgv72+dFl2CuNO/M7hv4Or34+SHENM6ztSLIR/9rr/FpbnZWkeiDZ3++ANjCpOj2oJP72rxWLIUB0AnLo+oNipXp5gfctjrSTHEDGJ9OyqQ+9IULcDwNkad0BoGEX473CubwjL87Ji+nykWPAGRhWx5er3Sz15k22TtE71h9NK+mJtT+XLELFWlZ+tmUSKWBM9tLEOyYnHbI67AuN81e/wnrtvwt9XiBYL/37xhvS3iMGq/OxJt0nR9uvCVNpT+TLksSbf7wLAszV2bH3706jLSBQ9Y2+mmfVJ9xa7FR9cjd4jHS/RgDuLzJo9HJGej5V8oxSNr54X14gES5xCbdxcLvUiJZJ8eIk8qTtQ51AMYUmWLTE2yJNddqLjDoCi0Q031bgDkhcLwJ0Ey9XvR9pLxzXHkyeCfHiJiDVBPoRFPI71uovJmqlxJ09i5RIRd8mKBUCZYMmHqyRa+PCS8OF0WmOOm7t9cPX7scmWl/CkeybG3eWbgajtz1RjL1mxACR3vy4fXhIed65+P871DeGja/1T3m5jpWfszTSzfnjJD5cVJXyZomF8qqpY8/UTW6vQuLlc0RsjiFND0XqDxWmrSKe+AUi92oLoYbp8M4CFYadyHi/Ll+odTpw+DT/dGYlW0ucwGWM69bz2zdNo6vJIs0Tsb3FJO9lkJ9wA8MPliY8Nadk6xB0QOl0ZafzdyxuX4cLOWkVvjJzDZIzaGxxLLITHs+hhkh+ICuLaB62edXH6VH66MxqtpK+mIDemU8/7ml2asSb+lifcbT0DuibcwMyMu6OunogXNj9VVSzt2OVn3ITleVlRe+ViiYXwuJOfRYy3TRLDUWJpc+JtT+Xk1/fIZycR1xBseasz5mkTE2Emxt2+ZhcshgzNafsaKmzSpAXX/bdUMbLJljdhb3AssSBvR+VnEeNtk2LZrwvxtqfhrEdaFLEm3+8ufa0N6xaZUG+3StMvOkxG1Bbm6jYDlp6xN9PM+qTbnJmuS4MgxvWFz+cqjiS3vNWJgx1ueAOjiikEn62xo61nYMIGW96ANG4uV+1U5BcZNW4ul8ZmiUZKXq+95TbFFfLRymqosEU9JffRtX7FmDVRdqyn4guM86XEL5UXqz25qljXU156xd2WtzqlmRXkRE+hSFraegbwbI1der11WzW8gdEJx+9NFAsOk1FKEhoqbKgtzMVRV490IPryxmXSe1/euExxUZEW+Q4tWs+mSPrEzleULR9vGU20GXOAUM9QpNl2Emmmxt3BDjdc/X5VL6DoKRSzHb1z2atom8R0ZD/9+C8TljFRLMjb0b3lNmm87GTaJPmBQbTexsm0p3KRZswRwusu1pfWRfpTMVPjDgiddTtQ51Ak3qLXuKnLg4Mdbrxw5oqibXIWmVFvt0a8vkhuoliQt6OiTWvu9k2qTZpovy5Mtj2VC58xR97hYj3Sopj609XvR1vPQNQL7SdL79ibaWb98BIAeHFdGT642pfQMWcHO9w42OFW3SAifMdtPdIi3UBH63Uta988rfpMU5dHcZqtqcujOHUpP1IXMz+EDxXQsq/ZhZqCXOm93sCoNAeqs8iMgx1uPFtjl07DiyEH8u8cTy+1fAdcU5CrGpueDPYcA34ma0j1okfcAaH1fWFnrWrctfx3WPvmaekGOuL1iRrUiWIBCI2VfaTEgvE9oR2NfGiGmPlBfD7aMI3mbp80LZ9IsMR0m4+X5aO524e2ngHFlIGAcnhIPONe5bH2eFm+orfokNMBiyEDFkOGKqmMNAXcZMz0uFv6Wptq2weU60gc1MnbponW4USxIH6r6/5bmm1avG3S1rc/xYmtVdL7D3e6gRILagpCPfnvXPZib7lNmuUhnvY0nLz+YhuSrwsx5WK06V+naqbHnXy60EjT8zV3+6SYEQcxkWb2kJsoFoDQgZFWmybanljbpIn26y+cuSLVRfRIx9qehguPtVgPfBMtWbE3k6SNj4+Pp7oSydDuGUTVv/851dWgFDNnpuOVjcvw2JL8pJTHuCOAcUepwbijVEl27M0Us354iVBpzcaZx7/N0xxzWCoaAcYdMe4oFRh3lCpMuCObMz3dcj96/zx+c7471dWgJDFnpuOxJfn42bftKd0ZMO7mFsYdpQLjjlJlusTedDYnk24A6LoZwO+/vIHGLo8uUx1NW3+/BciYDyzIBow5wIKc0N8Lsm//nQMY5Y9lz8+fORuROTMdldZsPFBkxmP35KPSOvG86MkwZ+Puv/7vQE4eUPotoHhp6H/T7LsLGuMuhfyDwIV2oLcHcJ0FvNeAHz8HWBaluma6Y9zpwH/7gtfhwdDf3mt3nvcPyR7f/lu833sN+F/3ALX/Kfl1ToHpGnvT1ZxNuueqtLS0SX82MzMTZrMZeXl5in+xPJeTk5PAb0EzyejoKObPV8/xXFJSgpqaGlRXV0v/WyyzLxGnxPL5fOjq6kJXVxfa29vx4Ycfor29HT6fOql75ZVX8MMf/jAFtaTpwOfzRfwHAJcuXZLiSev9k/Wzn/0M//RP/5SQ70CzC5PuOWZ4eBg+nw99fX2Kf7E8Nzw8POlyMzIyJp2wm0z63JyFkufLL7/E6dOncfr0aZw6dQqnT5/W3Kk5HA5VIp6dzZ6Tucrn86G9vR3t7e24dOkSPvjgA3R1dWnGjtlsRmVlJSorK1FaWooHH3wQdrsdZrNZY8k0E4jfWSTF4f9funRJeqyVME8lcRZxYzabYTabYbfbFY9LS0ulx/I4k7+PKByTbopZIBCIOUEPf25wMPqt4aOZN29ezAm61nM0PZ07d06ViPv96hs/rFy5EjU1NVISXl1djYyMDI0l0kzW3t6u6L0WCXc4keTY7XasWrUKDz74oJRw0/QjT4TDe5RF0ixekyfLU+1tBu4kyFr/AKC0tFQzoZa/hyiRmHRTUty6dWvSCfvAwOTn8k5LS4srYQ9//q675swEP9NCe3u7Igk/ffo0vvnmG9X7wnvDq6qqNJZG05FIsD744IOYeq/tdjsefPBB9l6nSHgCLO9VBu4M0Yg2jGOywnuPw/8Xvc0iJsITZsYJTTdMumnaCwaDcQ2DkT+XiEY/3qRdvJaePifuPaWrYDCoSsI7OjpU78vMzFQl4vfdd18Kakxy8uEhH374oTQWO1KCLZLqBx54QOrNZuI0NZMZoiF/PZG9zbEO0WBvM81WTLppVvvmm2/iHrsuf24qm0dubm5cw2Dk/zh8IrKhoSFVIv7FF1+o3mcymRRJeHV1Ne69994U1Hj2i+fiRq3hIZWVlUywoojWk+zz+dDf35+UIRqAslcZuDNEI9owDiIKYdJNFMVUEvaxsbFJl5udnR3XMBj5v8zMzASugZmht7dXkYSfOnVK6sGTKygoUCTiNTU1WLx4cQpqPHPx4sb4zbQhGvLX59pvRaQnJt1EOunv75900j46OjrpchcsWBD32HXxz2g0JnANpNbXX3+tulDz2rVrqvcVFxcresNrampgtVpTUOPpR/RcT3RxIwBUVlbO2osbw3uPIw3RCO9t1mOIRvg/k8kUcfgGe5uJphcm3UTT0M2bNyedsI+MjEy6XIPBMKkLTvPy8pCVlZXANaCPr776SpWI9/X1qd5XVlamGiM+m+eaF8lie3s7zp49OysvboxniIbW+6eCQzSICGDSTTTrDA0NTWoe9r6+PgQCgUmXO3/+/LjGrctfT2VCe/78edUYca056VeuXKnoDa+urta86c90Jx8ecvbsWWmqvomGh6T64sbpMESDczYT0VQw6SYiyUy4eVL463rcPOns2bOqRFxrjH54Er569eqE12WytC5uFI/DJePixkhDNCaas5lDNIhotmDSTUQJEQgEJp2wT/XmSRNN3xjttViMjY2pkvCzZ8+q3peZmalKxFesWDHp7xarZF3cmOohGgDnbCaimYtJNxGl3HS8eVIss8SIBFwk5FpTF+bm5qqmLiwrK5t0neO5uNFut6OyslLqvZ43bx7Ky8t5W20iohRg0k1EM5q4eZJWcq71vPxxom+elJWVhWAwiKGhIfT29qK7uxter1f1ufz8fFUiXlxcrHiP1sWNX331Ffr7+1XLEzPWmEwm5Ofnw2Qyobe3VzqDoMcFgRyiQUQUHybdRDRnyW+eNJle9kQ2n0ajEQUFBcjKysK1a9cwMDCAYDCYsOUDHKJBRJRKTLqJiCZJj5snpaWlRU3m5Ykwb6tNRDRzMOkmoqh8/iB+c/IaGj/1wucPoqs3AJ8/sT2wc9LIEDByEwjcBEYGAe9XwK1BwHsJWFgCmBYD2VYgM1v5j6bEvtCAysXZWHV3Fp58wAazMT3VVSKiOYJJNxFp8vmD+Od3LuE3J68xyaZZ68kHbHjxsXtTXQ0imgOYdBORSvvVQWw98hm6eid/sxyimcK+0IBju1agcjHPJBCRfph0E5FC+9VBbPjVWfZu05xiNqbj+P+5iok3EenmrlRXgIimD58/iB8d/YIJN805Pn+QB5tEpCsm3UQk+eWHbrRfnfzdIYlmMp8/iJ/8/i+prgYRzVJMuokIQCjhOHDiaqqrQZRSvzl5jdcyEJEumHQTEQBwlhKi237f6Ul1FYhoFmLSTUQAgA//or69OE1O4+6VGH/xAVx4ek1Clnfh6TUYf/EBNKxfnJDlUXTcFohID0y6iQgAOJab6LYPLvpSXQUimoWYdBMRAHBoCdFt3BaISA+8/y0RAWCiMZ0t/fnJVFeBiIimiD3dREREREQ6Y083EVGSNaxfjN3fKUKZ1YgF80N9H64bfrxzvhf7fndR9f4LT6+BI9+I/ccu4uDtaR0b1i/Gga1lcN3w47v/2oGXn1iKysXZsGRlAAA6vh7Cy3/qlt5PRESpxaSbiCiJ3v0/KrBpaR4AYPjWN3Dd8MOYcRcc+UY48hfjsXIr6g6241JfbHNFGzPuQktDJWzmTLh9I3Dd8GOxKRMVd2fhwNYyWLMy8OzbXXp+JSIiigGHlxARJcmrO5dJCfe/nepB1v/VjKU/P4nif27D/mMXMXzrG9jMmWj631bGvEybORMLF2Tgid9+juJ/bsPSn5/Eff/3n9Hx9RAAYM+6u3X5LkREFB8m3URESfLo8oUAgKZPvfjBa+cVrx08cRX/8D+/BABU3J2F7VX5MS/3H/7nl3jjzA3p8aW+AH7+7iUAgCUrA857TFOtOhERTRGTbiKiJGhYv1gab/3C8Sua7zl44ircvhEAwN+sLox52VrjtuVJeJUtO56qEhGRDph0ExElgcNqBBAax938ZeQ7Hn5xfRgAsLxwQUzLFUk6ERFNb0y6iYiSoCTPAAC42h89SR669U1cy/WPxvd+IiJKDSbdRESTcOHpNRj6r86Yx15fvj0byWJTZtT3Zc1ns0xENBuxdScimgRHfmiO7cKc+arXCnIyVM+5PH4AwIL5d0W9sPFbBaFhJed6hhNUUyIimg6YdBMRTYIYS12/wqJ6zXa7N/uybK7tgyeuwjs0CgB4akOx5jIb1i+GzRz67P/7SU9C60tERKnFpJuIaBI+cQ8CANbaTTj0vTIAQGmeAa1PVkmJc9NnXsVn3j7XCwCoX2nBqzuXKV5rWL8Y/+V/uQdA6O6U8tlHiIho5uMdKYmIJqHhdxex2pYNmzkTe52Lsde5WPF626UB1VR+P3jtPIpy52PT0jx8v6YQ36vIx9X+ERgz7pISdbdvBN/9146kfQ8iIkoO9nQTEU3Cpb4A6g62o+lTrzRsBAj1Uh9uvoq1vzyj+bmH/rUD+49dlO4Y6cg3wmbOlD5X/M9tMd8CnoiIZo608fHx8VRXgohSL+0nH6a6CkTTxviLD6S6CkQ0y7Cnm4iIiIhIZ0y6iYiIiIh0xqSbiIiIiEhnTLqJiIiIiHTGpJuIiIiISGdMuomIiIiIdMakm4iIiIhIZ0y6iYiIiIh0xqSbiIiIiEhnTLqJiIiIiHTGpJuIiIiISGdMuomIiIiIdMakm4iIiIhIZ0y6iYiIiIh0xqSbiIiIiEhnTLqJiIiIiHTGpJuIiIiISGdMuomIiIiIdMakm4iIiIhIZ0y6iYjCOO8xYfzFB3Dh6TWprsqkie/gvMeU6qoQERGYdBMRqTy1oRiuG3448o1oWL841dUhIqJZID3VFSAimm7WLcnF0U+uA1iIHasLcPDEVcXrF55eA0e+EQDQ9KkX9SstWH+oHc1f9qNh/WIc2FoGAPAOjcLl8QMA1v7yDBp3r0RBTgZqS3MBAPuPXcTBE1cVy/MOjcL6jx9LZTXuXon6lRYAQNulATisRhz95Dr2/e4iAGD8xQcUdWv61IsXjl/BiX2VAIAT+ypxuPkq9v3uoqJuAKTnAcDzL/fD5fGjtjRXVQciIpo69nQTEckc+l4oKd33u4t453yvlCALrU9WYeGCdKT95EOk/eRDrFty53XnPSYc2FqGw81XkfaTD3H0k+uqz9eW5mL/sYtI+8mHUsLtHR6VlvfRVwPw/Mv9Ul1EQp/2kw8BAJasDGlZnn+5H02feqXPigMAAFh/qF36X55wi7L3H7uIvc7Fip58h9WItJ98yISbiEgHTLqJiGQeWbYQH301AABSL7BIxIFQ0vzcHy5Jj+V/i2Ep4nP7fncRrht+xfK9Q6NSz7nzHhMc+Uas/eUZ6fUtL38KS1YGGtYvxiPLFqLpUy+av+wHAMX7AMD6jx9jy8ufSo/fc/VF/F47Vheg7dKAVPbBE1fRdmkAe+vuJN3iexMRUeJxeAkR0W0iCd79+hfSc22XBvDIsoXS6wBwxj0ovS7/uyAnA97hUcUywx/3Dgelvx+vzAegHiIChHqdFy5Ix+W+gHJ5Q6Oq98qHp0RiWZCBcz3DiudOXb6JHasLpMfhZRERUeIw6SYiuu2pDcUAII2HlmtYv1iRYCdKtPHT8oRYi0i2XTf8SPvJh6ox20RENH0w6SYium3dklzFxYWC51/uV1xQWWXLloZ8VNmypfddvzmK5YULFJ+1LFD3fgsujx+WrAw47zFJy5PrHQ6iJM+gXN7tMd2iV15cwDkR7/AoCnIyFM/VlOQoet6JiEg/HNNNRAAAs3FuH4Mf+l4ZLFkZqoQbCI11FhdEtl0awLMPl0qvyf9+4fgVOPKN0hjwQ98rizrs4+CJq3Dd8OPYrhXScw3rF2P8xQfQsH4x3jnfi/qVFmlYS+uTVaplyJP+aL3c4qJOceFkw/rFqC3NxeGWqxE/Q0REiTO397JEJDEb0+Hzz91ez0eWLUTbJe0LCV84fgX1Ky1o3L0Sa395BheeXiONw5bPGNL8ZT/2H7uIA1vLsNe5GN6h0YjLFJb+/KRieQAUvdcleQZpuEvbpQFpTHfzl/043HwVB7aWScn2+kPtOLGvEo9X5ksXccqnDASgeL+YspCU5voBKBHpI218fHw81ZUgotTb8Kuz+OCiL9XVmHHEOGoxpV+4C0+vwbmeYcUsI1Mx/uIDTJZ1Vrk4G2eeqk51NYholuHwEiICAGy53VtL0Y2/+AAad6+UHu9YXSBNC9i4e6Wix1qMu442lV80F55eo7gVvRi2woRbX9wWiEgP7OkmIgCAzx9E3tMfpboa0174DCGuG34s/flJ6XH49H1T7ZUOn04wUo86Jc5Xz3wH9oWGid9IRBQHJt1EJPnR0S/wm5PXUl0NopR58gEbXnzs3lRXg4hmIQ4vISLJi4/dyx4+mrPsCw342SOlE7+RiGgSmHQTkcRsTFdMX0c0V5iN6XjxsXs5cwkR6YZJNxEpiJkb2ONNc4XZmI5XdnwLj5VbU10VIprFOKabiCLiGG+azczGdDxWbsXPHinlQSYR6Y5JNxFF1dUbwO87PWj81Mt5vGnGMxvTUbk4Gw/ca8Jj5VZULs6e+ENERAnApJuIaJpIS0sDALBZJiKafTimm4iIiIhIZ0y6iYiIiIh0xqSbiIiIiEhnTLqJiIiIiHTGpJuIiIiISGdMuomIiIiIdMakm4iIiIhIZ0y6iYiIiIh0xqSbiIiIiEhnTLqJiIiIiHTGpJuIiIiISGdMuomIiIiIdMakm4iIiIhIZ0y6iYiIiIh0ljY+Pj6e6koQ0fTl8wfxm5PX0PipFz5/EF29Afj8wf+/vbuLrbO+7wD+NYohzquxk2xgg0OIO6YG6oQqC7A4QpOWSZVCQqd2uVvJxS5GDJaiXUTVuk6olSamvMHd2nFHJ00JydWYVFE7pURoIynNBMTmxQUzIHFIUmwznOFdhPPgE+cFSJ4dx/58bnzO83Z+5+JYX//8e/6n1mVNT4f/9dzPlX9e2zqmsaVNs9PRMi/fuHluHl3XmsaGWbUuCZghhG7ggk6Nns0Pnx3IUy++J2QzbT26rjU7Nt5e6zKAGUDoBiY5MvhRNv30v/LWyY9rXQqUbmnT7Ox76OvpaJlX61KAaUzoBqocGfwo9z/5a91tZpTGhll57q+/wQDeQwAACcNJREFUIXgDpXEjJVA4NXo233v6NYGbGefU6Fl/bAKlErqBws6ed3Jk8KNalwE1cWr0bLqfeb3WZQDTlNANJDkXOHb1Dta6DKipp158z70MQCmEbiBJrFICn3nmNydqXQIwDQndQJKk5/XTtS4BpgSfBaAMvhUASBKz3F/R+I51X/qcuu6eEiqpdmz76rQvbsgj+/qz29jQl/KL/lO1LgGYhnS6gSQxWgKf8VkAyqDTDSQRNK6UjjIAl6LTDQAAJRO6AQCgZMZLAGpo4g2Pi+bW54EVi3LXzXOTJEPDY3n+zTPp2tufgQ8nrx29dtnC/MOGZbnrpnmZc/11Gfnk07z83x/lbw688f/9NgC4DKEbYArYvGpJ1rQtyMgnn6bv+Gga6q9La+MN2bCiOata5+WWHx6qOv67Kxfnp39xR+Zcf+4flpVz1rQtyL/91V0ZHfvfWrwNAC5C6AaYAta0LcihgTO5Z+fhYltXZ0t2bVqe1sYbsufB5dm6tz9J0nbj7Dz57fbMuf66vPzucDb809GiE97V2ZIff2tZmufW1+R9AHBhQjfAVbBr0/Ls2rT8ssddbJWTvuOjVYE7SXb3DmbLH92Uu26em2/eOr/Yvu3+1jTPrc/IJ59WBe7KOZV6AJg63EgJMAW88v7IBbe/dfJcoG6e83nnuvP2xiTJC2+dvuCs9+7ewQwNj5VQJQBflU43wFVwpet0//rdL/6NoA315/olw598etFjTo6cNWICMIXodANMASe+RGe6fXFDkuTnfR+WVQ4AV5nQDXCN6Ts+miT5k/Yba1wJAF+U0A1wjRkdOzdWsmT+xcdHmuaYHgSYSoRugGtM7+unkiR33TQvbTfOnrS/q7PFPDfAFCN0A1xjtu7tzzun/idzrr8uv+zqyNplC4t93125OD/+1rIaVgfAhfj/I8BV8EXX6U6ufKWTJNl24PU8+e32tDbekN6tHVXfYjnyyacZGh7T7QaYQnS6Aa5B/3L4eO7+x5dy4OhQhobH0r64Ia2NN+Tld4fz0M9ezcmRs7UuEYAJ6sbHx8drXQRQe3XdPbUuAaaM8R3ral0CMM3odAMAQMmEbgAAKJnQDQAAJRO6AQCgZEI3AACUTOgGAICSCd0AAFAyoRsAAEomdAMAQMmEbgAAKJnQDQAAJRO6AQCgZEI3AACUTOgGAICSCd0AAFAyoRsAAEomdAMAQMmEbgAAKJnQDQAAJRO6Ac6zdtnCjO9Yl2PbV9e6lK+s8h7WLltY61IAiNANMMm2+29J3/HRtC9uSFdnS63LAWAamFXrAgCmmvtuW5CnX/ogSVM2r1qS3b2DVfuPbV+d9sUNSZIDR4eyYUVzOvccycE3TqersyW7Ni1PkgwNj6XvxGiS5J6dh7N/y4osmV+fNW0LkiSP7OvP7t7BqusNDY9l0fd/VbzW/i0rsmFFc5Lk0MCZtC9qyNMvfZCte/uTJOM71lXVduDoUB5/7u30bu1IkvRu7cgTBwezdW9/VW1Jiu1JcuKxe9N3YjRr2hZMqgGAK6fTDTDBngfPhdKte/vz7Ksni4Bc8cKjK9M0Z1bquntS192T+277fP/aZQuza9PyPHFwMHXdPXn6pQ8mnb+mbUEe2defuu6eInAPjYwV13v+zTM58di9RS2VQF/X3ZMkaZ5bX1zrxGP35sDRoeLcyh8ASdK550jxc2Lgrrz2I/v68/DalqpOfvuihtR19wjcACUQugEmWH9HU55/80ySFF3gShBPzoXmv//3geL5xMeVsZTKeVv39qfv+GjV9YeGx4rO+dplC9O+uCH37Dxc7H/gJ0fTPLc+XZ0tWX9HUw4cHcrBN04nSdVxSbLo+7/KAz85Wjz/ed+HF31fm1ctyaGBM8Vr7+4dzKGBM3n4jz8P3ZX3DcDVZ7wE4DOVELzlZ68V2w4NnMn6O5qK/Uly+J2Piv0THy+ZX5+hkbGqa57//OTI2eLxdzoWJ5k8IpKc6zo3zZmV3374cfX1hscmHTtxPOVimufU55X3R6q2/cdvf5fNq5YUz89/LQCuHqEb4DPb7r8lSYp56Im6OluqAvbVcqn56YmB+EIqYbvv+GjqunsmzWwDMHUI3UCSpLFhVk6Nnr38gdPYfbctqLq5sOLEY/dW3VC5snVeMfKxsnVecdwHvxvLH/7enKpzm+dM7n5X9J0YTfPc+qxdtrC43kQnR87m1htnV1/vs5nuSle+cgPn5QyNjGXJ/Pqqbd+8dX5V5x2A8pjpBpKcC90z2Z4Hl6d5bv2kwJ2cm3Wu3BB5aOBM/vZP24p9Ex8//tzbaV/cUMyA73lw+SXHPnb3Dqbv+Gj2PfT1YltXZ0vGd6xLV2dLnn31ZDasaC7GWl54dOWka0wM/Zfqcldu6qzcONnV2ZI1bQvyxC8HL3rOTDXTPwtAOfxmAZIkS5tm562TM3emd/0dTTk0cOEbCR9/7u1sWNGc/VtW5J6dh3Ns++piDnviiiEH3zidR/b1Z9em5Xl4bUuGhscues2Kr/3oxarrJanqXt964+xi3OXQwJlipvvgG6fzxMHB7Nq0vAjbnXuOpHdrR77Tsbi4iXPikoFJqo6vLFlItaVNsy9/EMCXVDc+Pj5e6yKA2tvZ8066n3m91mVccypz1JUl/c53bPvqvPL+SNUqI1difMc6YblkP1jflr/7s6W1LgOYZoyXAEmSv1z9+7Uu4ZowvmNd9m9ZUTzfvGpJsSzg/i0rqjrWlbnrSy3ldynHtq+u+ir6ytiKwF0unwWgDDrdQOF7T7+Wp158r9ZlTGnnrxDSd3w0X/vRi8Xz85fvu9Ku9PnLCV6so87V8ei61uzYeHutywCmIaEbKJwaPZuVj//njJ7tZuZa2jQ7h7fd7UZKoBTGS4BCY8OsqpU0YKZobJiVHRtvF7iB0gjdQJWOlnk5vO1uKzgwYzQ2zMo/b/6DbLxzUa1LAaYx4yXARZnxZjprbJiVjXcuyg/Wt/kjEyid0A1c0lsnP84zvzmR/UeH8ov+U7UuB65IY8OsdLTMy7rbF2bjnYvS0TLv8icBXAVCNwAAlMxMNwAAlEzoBgCAkgndAABQMqEbAABKJnQDAEDJhG4AACiZ0A0AACUTugEAoGRCNwAAlEzoBgCAkgndAABQMqEbAABKJnQDAEDJhG4AACiZ0A0AACUTugEAoGRCNwAAlEzoBgCAkgndAABQMqEbAABKJnQDAEDJhG4AACiZ0A0AACUTugEAoGRCNwAAlEzoBgCAkgndAABQMqEbAABKJnQDAEDJhG4AACiZ0A0AACUTugEAoGRCNwAAlEzoBgCAkgndAABQMqEbAABKJnQDAEDJhG4AACjZ/wG/aKvqTLZkfAAAAABJRU5ErkJggg==" + } + }, + "cell_type": "markdown", + "id": "8e406db6", + "metadata": { + "scrolled": true + }, + "source": [ + "Now we come to the flow definition. The OpenFL Workflow Interface adopts the conventions set by Metaflow, that every workflow begins with `start` and concludes with the `end` task. The aggregator begins with an optionally passed in model and optimizer. The aggregator begins the flow with the `start` task, where the list of collaborators is extracted from the runtime (`self.collaborators = self.runtime.collaborators`) and is then used as the list of participants to run the task listed in `self.next`, `aggregated_model_validation`. The model, optimizer, and anything that is not explicitly excluded from the next function will be passed from the `start` function on the aggregator to the `aggregated_model_validation` task on the collaborator. Where the tasks run is determined by the placement decorator that precedes each task definition (`@aggregator` or `@collaborator`). Once each of the collaborators (defined in the runtime) complete the `aggregated_model_validation` task, they pass their current state onto the `train` task, from `train` to `local_model_validation`, and then finally to `join` at the aggregator. It is in `join` that an average is taken of the model weights, and the next round can begin.\n", + "\n", + "![image.png](attachment:image.png)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "689e3373-a782-402b-b30e-f63648127ca4", + "metadata": {}, + "outputs": [], + "source": [ + "class FederatedFlow(FLSpec):\n", + " def __init__(self, model=None, optimizer=None, rounds=3, **kwargs):\n", + " super().__init__(**kwargs)\n", + " if model is not None:\n", + " self.model = model\n", + " self.peft_params = get_peft_model_state_dict(self.model)\n", + " self.optimizer = optimizer\n", + " else:\n", + " raise ValueError(\"No model inputted\")\n", + "\n", + " self.rounds = rounds\n", + "\n", + " @aggregator\n", + " def start(self):\n", + " print(f\"Performing initialization for model\")\n", + " self.collaborators = self.runtime.collaborators\n", + " self.private = 10\n", + " self.current_round = 0\n", + " self.next(\n", + " self.aggregated_model_validation,\n", + " foreach=\"collaborators\",\n", + " exclude=[\"model\"],\n", + " )\n", + "\n", + " @collaborator\n", + " def aggregated_model_validation(self):\n", + " print(f\"Performing aggregated model validation for collaborator {self.input}\")\n", + " self.model = AutoModelForCausalLM.from_pretrained(\n", + " checkpoint_path, return_dict=True, **model_kwargs\n", + " )\n", + " self.model = get_peft_model(self.model, peft_conf)\n", + " set_peft_model_state_dict(self.model, self.peft_params)\n", + " trainer = SFTTrainer(\n", + " model=self.model,\n", + " args=train_conf,\n", + " peft_config=peft_conf,\n", + " train_dataset=self.train_dataset,\n", + " eval_dataset=self.eval_dataset,\n", + " max_seq_length=sequence_max_length,\n", + " dataset_text_field=\"text\",\n", + " tokenizer=tokenizer,\n", + " packing=True,\n", + " data_collator=transformers.DataCollatorForSeq2Seq(\n", + " tokenizer, pad_to_multiple_of=8, return_tensors=\"pt\", padding=True\n", + " ),\n", + " )\n", + "\n", + " trainer.remove_callback(PrinterCallback)\n", + " out = trainer.evaluate()\n", + " self.agg_validation_score = out[\"eval_loss\"]\n", + " print(f\"{self.input} value of {self.agg_validation_score}\")\n", + " self.next(self.train)\n", + "\n", + " @collaborator\n", + " def train(self):\n", + " trainer = SFTTrainer(\n", + " model=self.model,\n", + " args=train_conf,\n", + " peft_config=peft_conf,\n", + " train_dataset=self.train_dataset,\n", + " eval_dataset=self.eval_dataset,\n", + " max_seq_length=sequence_max_length,\n", + " dataset_text_field=\"text\",\n", + " tokenizer=tokenizer,\n", + " packing=True,\n", + " data_collator=transformers.DataCollatorForSeq2Seq(\n", + " tokenizer, pad_to_multiple_of=8, return_tensors=\"pt\", padding=True\n", + " ),\n", + " )\n", + "\n", + " out = trainer.train()\n", + " self.loss = out.training_loss\n", + " trainer.save_model()\n", + " self.training_completed = True\n", + " self.next(self.local_model_validation)\n", + "\n", + " @collaborator\n", + " def local_model_validation(self):\n", + " trainer = SFTTrainer(\n", + " model=self.model,\n", + " args=train_conf,\n", + " peft_config=peft_conf,\n", + " train_dataset=processed_train_dataset,\n", + " eval_dataset=processed_test_dataset,\n", + " max_seq_length=sequence_max_length,\n", + " dataset_text_field=\"text\",\n", + " tokenizer=tokenizer,\n", + " packing=True,\n", + " data_collator=transformers.DataCollatorForSeq2Seq(\n", + " tokenizer, pad_to_multiple_of=8, return_tensors=\"pt\", padding=True\n", + " ),\n", + " )\n", + " out = trainer.evaluate()\n", + " self.local_validation_score = out[\"eval_loss\"]\n", + " self.peft_params = get_peft_model_state_dict(self.model)\n", + " print(f\"Doing local model validation for collaborator {self.input}\")\n", + " self.next(self.join, exclude=[\"training_completed\", \"model\"])\n", + "\n", + " @aggregator\n", + " def join(self, inputs):\n", + " self.average_loss = sum(input.loss for input in inputs) / len(inputs)\n", + " self.aggregated_model_accuracy = sum(\n", + " input.agg_validation_score for input in inputs\n", + " ) / len(inputs)\n", + " self.local_model_accuracy = sum(\n", + " input.local_validation_score for input in inputs\n", + " ) / len(inputs)\n", + " print(\n", + " f\"Average aggregated model validation values = {self.aggregated_model_accuracy}\"\n", + " )\n", + " print(f\"Average training loss = {self.average_loss}\")\n", + " print(f\"Average local model validation values = {self.local_model_accuracy}\")\n", + "\n", + " self.model = FedAvg([input.peft_params for input in inputs], self.model)\n", + "\n", + " self.model.save_pretrained(\"./aggregated/model\")\n", + " tokenizer.save_pretrained(\"./aggregated/tokenizer\")\n", + " self.current_round += 1\n", + " if self.current_round < self.rounds:\n", + " self.next(\n", + " self.aggregated_model_validation,\n", + " foreach=\"collaborators\",\n", + " exclude=[\"private\"],\n", + " )\n", + " else:\n", + " self.next(self.end)\n", + "\n", + " @aggregator\n", + " def end(self):\n", + " print(f\"This is the end of the flow\")" + ] + }, + { + "cell_type": "markdown", + "id": "4376c157-9f1f-412b-a3d4-adb6e8b39425", + "metadata": {}, + "source": [ + "You'll notice in the `FederatedFlow` definition above that there were certain attributes that the flow was not initialized with, namely the `train_dataset` and `eval_dataset` for each of the collaborators. These are **private_attributes** that are exposed only throught the runtime. Each participant has it's own set of private attributes: a dictionary where the key is the attribute name, and the value is the object that will be made accessible through that participant's task. \n", + "\n", + "Below, we segment shards of the MedQuAD dataset for **three collaborators**: Portland, Seattle, and Chandler. Each has their own slice of the dataset that's accessible via the `train_dataset` or `eval_dataset` attribute. Note that the private attributes are flexible, and you can choose to pass in a completely different type of object to any of the collaborators or aggregator (with an arbitrary name). These private attributes will always be filtered out of the current state when transfering from collaborator to aggregator, or vice versa. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4b53fb67-5b44-4bdf-a9ea-b6d10b20aad1", + "metadata": {}, + "outputs": [], + "source": [ + "# Setup participants\n", + "aggregator = Aggregator()\n", + "aggregator.private_attributes = {}\n", + "\n", + "# Setup collaborators with private attributes\n", + "collaborator_names = [\n", + " \"Portland\",\n", + " \"Seattle\",\n", + "]\n", + "collaborators = [Collaborator(name=name) for name in collaborator_names]\n", + "\n", + "for idx, current_collaborator in enumerate(collaborators):\n", + " # Set the private attributes of the Collaborator to include their specific training and testing data loaders\n", + " current_collaborator.private_attributes = {\n", + " \"train_dataset\": processed_train_dataset.shard(\n", + " num_shards=len(collaborators), index=idx\n", + " ),\n", + " \"eval_dataset\": processed_test_dataset.shard(\n", + " num_shards=len(collaborators), index=idx\n", + " ),\n", + " }\n", + "\n", + "local_runtime = LocalRuntime(\n", + " aggregator=aggregator, collaborators=collaborators, backend=\"single_process\"\n", + ")\n", + "print(f\"Local runtime collaborators = {local_runtime.collaborators}\")" + ] + }, + { + "cell_type": "markdown", + "id": "0bc693d1-1e16-43ad-aeb8-3af50fca14f2", + "metadata": {}, + "source": [ + "Now that we have our flow and runtime defined, let's run the experiment! " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "674def50-5e31-4abb-ad2d-ac74cb5d093d", + "metadata": {}, + "outputs": [], + "source": [ + "flflow = FederatedFlow(model, rounds=2)\n", + "flflow.runtime = local_runtime\n", + "flflow.run()" + ] + }, + { + "cell_type": "markdown", + "id": "c32e0844", + "metadata": {}, + "source": [ + "Now that the flow has completed, let's get the final model accuracy:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "863761fe", + "metadata": {}, + "outputs": [], + "source": [ + "print(f'\\nFinal aggregated model accuracy for {flflow.rounds} rounds of training: {flflow.aggregated_model_accuracy}')" + ] + }, + { + "cell_type": "markdown", + "id": "426f2395", + "metadata": {}, + "source": [ + "# Congratulations!\n", + "Now that you've completed this notebook, check out our [other tutorials](https://github.com/securefederatedai/openfl/tree/886704508b8b3b0638372003d72e0bcf7f2e7114/openfl-tutorials/experimental), including:\n", + "\n", + "- Using the LocalRuntime Ray Backend for dedicated GPU access\n", + "- Vertical Federated Learning\n", + "- Model Watermarking\n", + "- Differential Privacy\n", + "- And More!" + ] + }, + { + "cell_type": "markdown", + "id": "6d29bbc9-ccc2-4185-b36a-bc6dfc1a9753", + "metadata": {}, + "source": [ + "# Reference\n", + "\n", + " @ARTICLE{hu2023llm, \n", + " author = {Zhiqiang Hu and Yihuai Lan and Lei Wang and Wanyu Xu and Ee-Peng Lim and Roy Ka-Wei Lee and Lidong Bing and Soujanya Poria},\n", + " title = {LLM-Adapters: An Adapter Family for Parameter-Efficient Fine-Tuning of Large Language Models},\n", + " journal = {arXiv preprint arXiv:2304.01933},\n", + " year = {2023}\n", + "}\n", + "\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.0" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 0a2ce1796e0699711d55b3756460fe8e00b7f837 Mon Sep 17 00:00:00 2001 From: porteratzo Date: Wed, 5 Jun 2024 08:26:24 -0700 Subject: [PATCH 6/7] change variables --- .../Phi3/Workflow_Interface_Phi3.ipynb | 21 ++++++++++--------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/openfl-tutorials/experimental/Phi3/Workflow_Interface_Phi3.ipynb b/openfl-tutorials/experimental/Phi3/Workflow_Interface_Phi3.ipynb index ebba043085..613ca2fc42 100644 --- a/openfl-tutorials/experimental/Phi3/Workflow_Interface_Phi3.ipynb +++ b/openfl-tutorials/experimental/Phi3/Workflow_Interface_Phi3.ipynb @@ -307,8 +307,8 @@ " test_size=val_set_size, shuffle=True, seed=42\n", ")\n", "\n", - "processed_train_dataset = train_val[\"train\"].shuffle().map(generate_and_tokenize_prompt)\n", - "processed_test_dataset = train_val[\"test\"].shuffle().map(generate_and_tokenize_prompt)\n" + "processed_train_dataset = train_val[\"train\"].shuffle().map(generate_and_tokenize_prompt).select(range(3))\n", + "processed_test_dataset = train_val[\"test\"].shuffle().map(generate_and_tokenize_prompt).select(range(3))\n" ] }, { @@ -482,6 +482,7 @@ " print(f\"Average local model validation values = {self.local_model_accuracy}\")\n", "\n", " self.model = FedAvg([input.peft_params for input in inputs], self.model)\n", + " self.peft_params = get_peft_model_state_dict(self.model)\n", "\n", " self.model.save_pretrained(\"./aggregated/model\")\n", " tokenizer.save_pretrained(\"./aggregated/tokenizer\")\n", @@ -490,7 +491,7 @@ " self.next(\n", " self.aggregated_model_validation,\n", " foreach=\"collaborators\",\n", - " exclude=[\"private\"],\n", + " exclude=[\"model\"],\n", " )\n", " else:\n", " self.next(self.end)\n", @@ -518,29 +519,29 @@ "outputs": [], "source": [ "# Setup participants\n", - "aggregator = Aggregator()\n", - "aggregator.private_attributes = {}\n", + "_aggregator = Aggregator()\n", + "_aggregator.private_attributes = {}\n", "\n", "# Setup collaborators with private attributes\n", "collaborator_names = [\n", " \"Portland\",\n", " \"Seattle\",\n", "]\n", - "collaborators = [Collaborator(name=name) for name in collaborator_names]\n", + "_collaborators = [Collaborator(name=name) for name in collaborator_names]\n", "\n", - "for idx, current_collaborator in enumerate(collaborators):\n", + "for idx, current_collaborator in enumerate(_collaborators):\n", " # Set the private attributes of the Collaborator to include their specific training and testing data loaders\n", " current_collaborator.private_attributes = {\n", " \"train_dataset\": processed_train_dataset.shard(\n", - " num_shards=len(collaborators), index=idx\n", + " num_shards=len(_collaborators), index=idx\n", " ),\n", " \"eval_dataset\": processed_test_dataset.shard(\n", - " num_shards=len(collaborators), index=idx\n", + " num_shards=len(_collaborators), index=idx\n", " ),\n", " }\n", "\n", "local_runtime = LocalRuntime(\n", - " aggregator=aggregator, collaborators=collaborators, backend=\"single_process\"\n", + " aggregator=_aggregator, collaborators=_collaborators, backend=\"single_process\"\n", ")\n", "print(f\"Local runtime collaborators = {local_runtime.collaborators}\")" ] From 1f96a4b57a9d1adbaefafc53fbcc5ec10975ef95 Mon Sep 17 00:00:00 2001 From: porteratzo Date: Mon, 10 Jun 2024 15:30:37 -0700 Subject: [PATCH 7/7] deleted unnecesary file --- openfl-tutorials/Federated_PyTorch_LLM.ipynb | 510 ------------------- 1 file changed, 510 deletions(-) delete mode 100644 openfl-tutorials/Federated_PyTorch_LLM.ipynb diff --git a/openfl-tutorials/Federated_PyTorch_LLM.ipynb b/openfl-tutorials/Federated_PyTorch_LLM.ipynb deleted file mode 100644 index ba5b2bede9..0000000000 --- a/openfl-tutorials/Federated_PyTorch_LLM.ipynb +++ /dev/null @@ -1,510 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Federated PyTorch LLM Tutorial" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This notebook is an example of LLM fine-tuning\n", - "\n", - "Custom DataLoader is used with OpenFL Python API" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#Install dependencies if not already installed\n", - "!pip install torch torchvision peft transformers sentencepiece huggingface_hub accelerate datasets evaluate seqeval\n", - "%load_ext autoreload\n", - "%autoreload 2" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from typing import Any, Mapping\n", - "import numpy as np\n", - "import openfl.native as fx\n", - "import torch\n", - "import torch as pt\n", - "from datasets import Dataset, load_dataset, load_metric\n", - "from openfl.federated import PyTorchTaskRunner\n", - "from openfl.federated.task.runner_pt import change_tags\n", - "from openfl.utilities import Metric, TensorKey\n", - "from openfl.utilities.data_splitters import EqualNumPyDataSplitter\n", - "from peft import LoraConfig, TaskType, get_peft_model\n", - "from peft.utils import get_peft_model_state_dict, set_peft_model_state_dict\n", - "from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\n", - "from torch.optim import AdamW\n", - "from torch.utils.data import DataLoader\n", - "from tqdm import tqdm\n", - "import torch.nn as nn\n", - "from transformers.trainer_pt_utils import get_parameter_names\n", - "from transformers import (AutoModelForSequenceClassification,\n", - " AutoTokenizer, DataCollatorWithPadding, get_scheduler)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "After importing the required packages, the next step is setting up our openfl workspace. To do this, simply run the `fx.init()` command as follows:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#Setup default workspace, logging, etc.\n", - "fx.init('torch_cnn_mnist')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we are ready to define our dataset and model to perform federated learning on. The dataset should be composed of a numpy arrayWe start with a simple Roberta model that is trained on the glue mrpc dataset. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Download the data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def get_glue_mrpc_dataset(tokenizer):\n", - " dataset = load_dataset(\"glue\", \"mrpc\")\n", - "\n", - " def tokenize_function(examples):\n", - " # max_length=None => use the model max length (it's actually the default)\n", - " outputs = tokenizer(\n", - " examples[\"sentence1\"],\n", - " examples[\"sentence2\"],\n", - " truncation=True,\n", - " max_length=None,\n", - " )\n", - " return outputs\n", - "\n", - " tokenized_datasets = dataset.map(\n", - " tokenize_function,\n", - " batched=True,\n", - " remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n", - " )\n", - " tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")\n", - " tokenized_datasets.set_format(\"torch\")\n", - " data_collator = DataCollatorWithPadding(tokenizer=tokenizer, padding=\"longest\")\n", - " return data_collator, tokenized_datasets\n", - "\n", - "base_model_name = \"roberta-base\"\n", - "padding_side = \"right\"\n", - "tokenizer = AutoTokenizer.from_pretrained(base_model_name, padding_side=padding_side)\n", - "if getattr(tokenizer, \"pad_token_id\") is None:\n", - " tokenizer.pad_token_id = tokenizer.eos_token_id\n", - "data_collator, tokenized_datasets = get_glue_mrpc_dataset(tokenizer)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Describe the dataset" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "class GlueMrpc(Dataset):\n", - " \"\"\"\n", - " Has 5.8k pairs of sentences with annotations if the two sentences are equivalent\n", - " \"\"\" \n", - " def get_shape(self):\n", - " \n", - " if not hasattr(self, 'saved_shape'):\n", - " self.saved_shape = max([len(i) for i in self.data['input_ids']])\n", - " return self.saved_shape\n", - "\n", - "train_set = GlueMrpc.from_dict(tokenized_datasets['train'].to_dict())\n", - "valid_set = GlueMrpc.from_dict(tokenized_datasets['test'].to_dict())\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Implement Federated dataset\n", - "We have to implement `split` method" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "class GlueMrpcFederatedDataset(DataLoader):\n", - " def __init__(self, train_set, valid_set, batch_size, data_collator=None):\n", - " self.data_splitter = EqualNumPyDataSplitter(shuffle=True)\n", - " if isinstance(train_set,Dataset):\n", - " self.train_set = GlueMrpc.from_dict(train_set.to_dict())\n", - " else:\n", - " self.train_set = train_set\n", - " \n", - " if isinstance(valid_set,Dataset):\n", - " self.valid_set = GlueMrpc.from_dict(valid_set.to_dict())\n", - " else:\n", - " self.valid_set = valid_set \n", - " \n", - " self.batch_size = batch_size\n", - " self.data_collator = data_collator\n", - " \n", - " def split(self, num_collaborators):\n", - " train_split = self.data_splitter.split(self.train_set, num_collaborators)\n", - " valid_split = self.data_splitter.split(self.valid_set, num_collaborators)\n", - " return [\n", - " GlueMrpcFederatedDataset(\n", - " self.train_set.select(train_split[i]),\n", - " self.valid_set.select(valid_split[i]),\n", - " self.batch_size\n", - " )\n", - " for i in range(num_collaborators)\n", - " ]\n", - " \n", - " def get_feature_shape(self):\n", - " return self.train_set.get_shape()\n", - " \n", - " def get_train_loader(self, num_batches=None):\n", - " return DataLoader(self.train_set, batch_size=self.batch_size, collate_fn=data_collator)\n", - " \n", - " def get_valid_loader(self):\n", - " return DataLoader(self.valid_set, batch_size=self.batch_size, collate_fn=data_collator)\n", - " \n", - " def get_train_data_size(self):\n", - " return len(self.train_set)\n", - " \n", - " def get_valid_data_size(self):\n", - " return len(self.valid_set)\n", - " \n", - "fl_data = GlueMrpcFederatedDataset(train_set, valid_set, batch_size=32)\n", - "metric = load_metric('glue', \"mrpc\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Define model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "class LLMTaskRunner(PyTorchTaskRunner):\n", - " def __init__(\n", - " self, base_model_name, data_loader, device=None, metric=None, **kwargs\n", - " ):\n", - " kwargs[\"data_loader\"] = data_loader\n", - " super().__init__(device, **kwargs)\n", - " self.base_model_name = base_model_name\n", - " self.metric = metric\n", - " self._init_model()\n", - " self._init_optimizer()\n", - " self.save_models = []\n", - "\n", - " def _init_model(self):\n", - " model = AutoModelForSequenceClassification.from_pretrained(\n", - " self.base_model_name, return_dict=True\n", - " )\n", - " peft_config = LoraConfig(\n", - " task_type=TaskType.SEQ_CLS,\n", - " inference_mode=False,\n", - " r=16,\n", - " lora_alpha=16,\n", - " lora_dropout=0.1,\n", - " bias=\"lora_only\",\n", - " )\n", - " self.model = get_peft_model(model, peft_config)\n", - "\n", - " def _init_optimizer(self):\n", - " ALL_LAYERNORM_LAYERS = [nn.LayerNorm]\n", - " decay_parameters = get_parameter_names(self.model, ALL_LAYERNORM_LAYERS)\n", - " decay_parameters = [name for name in decay_parameters if \"bias\" not in name]\n", - "\n", - " optimizer_grouped_parameters = [\n", - " {\n", - " \"params\": [\n", - " p\n", - " for n, p in self.model.named_parameters()\n", - " if (n in decay_parameters and p.requires_grad)\n", - " ],\n", - " \"weight_decay\": 0.01,\n", - " },\n", - " {\n", - " \"params\": [\n", - " p\n", - " for n, p in self.model.named_parameters()\n", - " if (n not in decay_parameters and p.requires_grad)\n", - " ],\n", - " \"weight_decay\": 0.0,\n", - " },\n", - " ]\n", - " self.optimizer = AdamW(optimizer_grouped_parameters, lr=0.001)\n", - " self.lr_scheduler = get_scheduler(\n", - " name=\"linear\",\n", - " optimizer=self.optimizer,\n", - " num_warmup_steps=0,\n", - " num_training_steps=len(self.data_loader.train_set) * 5,\n", - " )\n", - "\n", - " self.training_round_completed = False\n", - " self.initialize_tensorkeys_for_functions()\n", - "\n", - " def train(self):\n", - " return self.model.train()\n", - "\n", - " def state_dict(self):\n", - " return get_peft_model_state_dict(self.model)\n", - "\n", - " def load_state_dict(self, state_dict: Mapping[str, Any], strict: bool = True):\n", - " return set_peft_model_state_dict(self.model, state_dict)\n", - "\n", - " def validate(\n", - " self, col_name, round_num, input_tensor_dict, use_tqdm=False, **kwargs\n", - " ):\n", - " \"\"\"Validate.\n", - "\n", - " Run validation of the model on the local data.\n", - "\n", - " Args:\n", - " col_name: Name of the collaborator\n", - " round_num: What round is it\n", - " input_tensor_dict: Required input tensors (for model)\n", - " use_tqdm (bool): Use tqdm to print a progress bar (Default=True)\n", - "\n", - " Returns:\n", - " global_output_dict: Tensors to send back to the aggregator\n", - " local_output_dict: Tensors to maintain in the local TensorDB\n", - "\n", - " \"\"\"\n", - " self.save_models.append(input_tensor_dict.copy())\n", - " self.rebuild_model(round_num, input_tensor_dict, validation=True)\n", - " self.model.eval()\n", - " \n", - "\n", - " self.model.to(self.device)\n", - " val_score = 0\n", - " total_samples = 0\n", - "\n", - " loader = self.data_loader.get_valid_loader()\n", - " if use_tqdm:\n", - " loader = tqdm(loader, desc=\"validate\")\n", - "\n", - " with pt.no_grad():\n", - " for sample in loader:\n", - " samples = sample[\"input_ids\"].shape[0]\n", - " total_samples += samples\n", - " output = self.model(**sample)\n", - " # get the index of the max log-probability\n", - " logits = output.logits\n", - " predictions = torch.argmax(logits, dim=-1)\n", - " metric.add_batch(predictions=predictions, references=sample[\"labels\"])\n", - " val_score = metric.compute()[\"accuracy\"]\n", - "\n", - " origin = col_name\n", - " suffix = \"validate\"\n", - " if kwargs[\"apply\"] == \"local\":\n", - " suffix += \"_local\"\n", - " else:\n", - " suffix += \"_agg\"\n", - " tags = (\"metric\",)\n", - " tags = change_tags(tags, add_field=suffix)\n", - " # TODO figure out a better way to pass in metric for this pytorch\n", - " # validate function\n", - " output_tensor_dict = {\n", - " TensorKey(\"acc\", origin, round_num, True, tags): np.array(val_score)\n", - " }\n", - "\n", - " # Empty list represents metrics that should only be stored locally\n", - " return output_tensor_dict, {}\n", - "\n", - " def train_epoch(self, batch_generator) -> Metric:\n", - " \"\"\"Train single epoch.\n", - "\n", - " Override this function in order to use custom training.\n", - "\n", - " Args:\n", - " batch_generator: Train dataset batch generator. Yields (samples, targets) tuples of\n", - " size = `self.data_loader.batch_size`.\n", - " Returns:\n", - " Metric: An object containing name and np.ndarray value.\n", - " \"\"\"\n", - " losses = []\n", - " for sample in batch_generator:\n", - " self.model.zero_grad()\n", - " output = self.model(**sample)\n", - " loss = output.loss\n", - " loss.backward()\n", - " torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)\n", - " self.optimizer.step()\n", - " self.lr_scheduler.step()\n", - " losses.append(loss.detach().cpu().numpy())\n", - " loss = np.mean(losses)\n", - " if self.model.config.problem_type == \"regression\":\n", - " loss_fct = MSELoss()\n", - " elif self.model.config.problem_type == \"single_label_classification\":\n", - " loss_fct = CrossEntropyLoss()\n", - " elif self.model.config.problem_type == \"multi_label_classification\":\n", - " loss_fct = BCEWithLogitsLoss()\n", - " return Metric(name=loss_fct._get_name(), value=np.array(loss))\n", - "\n", - " def save_native(\n", - " self,\n", - " filepath,\n", - " model_state_dict_key=\"model_state_dict\",\n", - " optimizer_state_dict_key=\"optimizer_state_dict\",\n", - " **kwargs,\n", - " ):\n", - " \"\"\"\n", - " Save model and optimizer states in a picked file specified by the \\\n", - " filepath. model_/optimizer_state_dicts are stored in the keys provided. \\\n", - " Uses pt.save().\n", - "\n", - " Args:\n", - " filepath (string) : Path to pickle file to be\n", - " created by pt.save().\n", - " model_state_dict_key (string) : key for model state dict\n", - " in pickled file.\n", - " optimizer_state_dict_key (string) : key for optimizer state\n", - " dict in picked file.\n", - " kwargs : unused\n", - "\n", - " Returns:\n", - " None\n", - " \"\"\"\n", - " pickle_dict = {\n", - " model_state_dict_key: get_peft_model_state_dict(self.model),\n", - " optimizer_state_dict_key: self.optimizer.state_dict(),\n", - " }\n", - " pt.save(pickle_dict, filepath)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "num_collaborators = 2\n", - "collaborator_models = [\n", - " LLMTaskRunner(\n", - " base_model_name,\n", - " data_loader=data_slice,\n", - " metric=metric\n", - " )\n", - " for data_slice in fl_data.split(num_collaborators)]\n", - "collaborators = {'one':collaborator_models[0],'two':collaborator_models[1]}#, 'three':collaborator_models[2]}" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#Original TinyImageNet dataset\n", - "print(f'Original training data size: {len(fl_data.train_set)}')\n", - "print(f'Original validation data size: {len(fl_data.valid_set)}\\n')\n", - "\n", - "#Collaborator one's data\n", - "for i, model in enumerate(collaborator_models):\n", - " print(f'Collaborator {i}\\'s training data size: {len(model.data_loader.train_set)}')\n", - " print(f'Collaborator {i}\\'s validation data size: {len(model.data_loader.valid_set)}\\n')\n", - "\n", - "#Collaborator three's data\n", - "#print(f'Collaborator three\\'s training data size: {len(collaborator_models[2].data_loader.X_train)}')\n", - "#print(f'Collaborator three\\'s validation data size: {len(collaborator_models[2].data_loader.X_valid)}')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#Run experiment, return trained FederatedModel\n", - "final_fl_model = fx.run_experiment(collaborators,{'aggregator.settings.rounds_to_train':10,\"tasks.train.kwargs.epochs\":2})" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#Save final model\n", - "final_fl_model.save_native('final_model.pth')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "llama-env", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.0" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -}