From 460ff9b29f6ad05d6703382e28a4b4a53da5ac77 Mon Sep 17 00:00:00 2001 From: Erik Jenner Date: Wed, 28 Feb 2024 14:28:20 -0800 Subject: [PATCH 01/25] Export AnomalyDetector --- src/cupbearer/detectors/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/cupbearer/detectors/__init__.py b/src/cupbearer/detectors/__init__.py index 93df129f..04ea74ab 100644 --- a/src/cupbearer/detectors/__init__.py +++ b/src/cupbearer/detectors/__init__.py @@ -1,5 +1,6 @@ # ruff: noqa: F401 from .abstraction import AbstractionDetectorConfig +from .anomaly_detector import AnomalyDetector from .config import DetectorConfig, StoredDetector from .finetuning import FinetuningConfig from .statistical import ( From dbae3bfcb75510cb88935971b3ad57184a204dba Mon Sep 17 00:00:00 2001 From: Erik Jenner Date: Wed, 28 Feb 2024 17:31:46 -0800 Subject: [PATCH 02/25] Make tasks more flexible --- src/cupbearer/data/__init__.py | 5 +- src/cupbearer/data/_shared.py | 49 +++-- src/cupbearer/data/adversarial.py | 44 ++-- src/cupbearer/data/backdoor_data.py | 5 + src/cupbearer/data/pytorch.py | 9 + .../abstraction/abstraction_detector.py | 7 +- src/cupbearer/detectors/anomaly_detector.py | 30 +-- src/cupbearer/detectors/finetuning.py | 13 +- .../statistical/mahalanobis_detector.py | 4 +- .../detectors/statistical/que_detector.py | 4 +- .../statistical/spectral_detector.py | 4 +- .../detectors/statistical/statistical.py | 32 ++- .../scripts/conf/eval_detector_conf.py | 4 +- .../scripts/conf/train_detector_conf.py | 4 +- src/cupbearer/scripts/eval_detector.py | 4 +- src/cupbearer/scripts/train_detector.py | 36 ++-- src/cupbearer/tasks/__init__.py | 2 +- src/cupbearer/tasks/_config.py | 202 ++++++++---------- src/cupbearer/tasks/adversarial_examples.py | 21 +- src/cupbearer/tasks/backdoor_detection.py | 32 +-- src/cupbearer/utils/train.py | 4 +- tests/test_data.py | 11 +- tests/test_detectors.py | 4 +- tests/test_pipeline.py | 28 +-- 24 files changed, 285 insertions(+), 273 deletions(-) diff --git a/src/cupbearer/data/__init__.py b/src/cupbearer/data/__init__.py index a92441b9..1cf8f1a7 100644 --- a/src/cupbearer/data/__init__.py +++ b/src/cupbearer/data/__init__.py @@ -1,9 +1,8 @@ # ruff: noqa: F401 from ._shared import ( DatasetConfig, - RemoveMixLabelDataset, - TestDataConfig, - TestDataMix, + MixedData, + MixedDataConfig, TrainDataFromRun, ) from .adversarial import AdversarialExampleConfig diff --git a/src/cupbearer/data/_shared.py b/src/cupbearer/data/_shared.py index 8d5df10d..e6a9a65e 100644 --- a/src/cupbearer/data/_shared.py +++ b/src/cupbearer/data/_shared.py @@ -24,6 +24,10 @@ class DatasetConfig(BaseConfig, ABC): def num_classes(self) -> int: # type: ignore pass + def get_test_split(self) -> "DatasetConfig": + # Not every dataset will define this + raise NotImplementedError + def get_transforms(self) -> list[Transform]: """Return a list of transforms that should be applied to this dataset. @@ -68,6 +72,9 @@ def __getitem__(self, index): class TrainDataFromRun(DatasetConfig): path: Path + def get_test_split(self) -> DatasetConfig: + return self.cfg.get_test_split() + def __post_init__(self): self._cfg = None @@ -95,16 +102,18 @@ def get_transforms(self) -> list[Transform]: return transforms -class TestDataMix(Dataset): +class MixedData(Dataset): def __init__( self, normal: Dataset, anomalous: Dataset, normal_weight: float = 0.5, + return_anomaly_labels: bool = True, ): self.normal_data = normal self.anomalous_data = anomalous self.normal_weight = normal_weight + self.return_anomaly_labels = return_anomaly_labels self._length = min( int(len(normal) / normal_weight), int(len(anomalous) / (1 - normal_weight)) ) @@ -116,23 +125,36 @@ def __len__(self): def __getitem__(self, index): if index < self.normal_len: - return self.normal_data[index], 0 + if self.return_anomaly_labels: + return self.normal_data[index], 0 + return self.normal_data[index] else: - return self.anomalous_data[index - self.normal_len], 1 + if self.return_anomaly_labels: + return self.anomalous_data[index - self.normal_len], 1 + return self.anomalous_data[index - self.normal_len] @dataclass -class TestDataConfig(DatasetConfig): +class MixedDataConfig(DatasetConfig): normal: DatasetConfig anomalous: DatasetConfig normal_weight: float = 0.5 + return_anomaly_labels: bool = True + + def get_test_split(self) -> "MixedDataConfig": + return MixedDataConfig( + normal=self.normal.get_test_split(), + anomalous=self.anomalous.get_test_split(), + normal_weight=self.normal_weight, + return_anomaly_labels=self.return_anomaly_labels, + ) @property def num_classes(self): assert (n := self.normal.num_classes) == self.anomalous.num_classes return n - def build(self) -> TestDataMix: + def build(self) -> MixedData: # We need to override this method because max_size needs to be applied in a # different way: TestDataMix just has normal data first and then anomalous data, # if we just used a Subset with indices 1...n, we'd get an incorrect ratio. @@ -145,22 +167,11 @@ def build(self) -> TestDataMix: anomalous_size = self.max_size - normal_size assert anomalous_size <= len(anomalous) anomalous = Subset(anomalous, range(anomalous_size)) - dataset = TestDataMix(normal, anomalous, self.normal_weight) + dataset = MixedData( + normal, anomalous, self.normal_weight, self.return_anomaly_labels + ) # We don't want to return a TransformDataset here. Transforms should be applied # directly to the normal and anomalous data. if self.transforms: raise ValueError("Transforms are not supported for TestDataConfig.") return dataset - - -class RemoveMixLabelDataset(Dataset): - """Help class to only return the first element of each item""" - - def __init__(self, dataset: Dataset): - self._dataset = dataset - - def __len__(self): - return len(self._dataset) - - def __getitem__(self, index): - return self._dataset[index][0] diff --git a/src/cupbearer/data/adversarial.py b/src/cupbearer/data/adversarial.py index 75fbb3c3..efad5001 100644 --- a/src/cupbearer/data/adversarial.py +++ b/src/cupbearer/data/adversarial.py @@ -17,19 +17,27 @@ def make_adversarial_example( path: Path, + filename: str, batch_size: int = 128, eps: float = 8 / 255, max_examples: Optional[int] = None, success_threshold: float = 0.1, steps: int = 40, + use_test_data: bool = False, ): - save_path = path / "adv_examples.pt" + save_path = path / f"{filename}.pt" if os.path.exists(save_path): logger.info("Adversarial examples already exist, skipping attack") return + else: + logger.info( + "Adversarial examples not found, running attack with default settings" + ) model_cfg = StoredModel(path=path) data_cfg = TrainDataFromRun(path=path) + if use_test_data: + data_cfg = data_cfg.get_test_split() dataset = data_cfg.build() if max_examples: @@ -76,22 +84,24 @@ class AdversarialExampleConfig(DatasetConfig): success_threshold: float = 0.1 steps: int = 40 eps: float = 8 / 255 + use_test_data: bool = False def _build(self) -> Dataset: - if not (self.path / "adv_examples").exists(): - logger.info( - "Adversarial examples not found, running attack with default settings" - ) - make_adversarial_example( - path=self.path, - batch_size=self.attack_batch_size, - eps=self.eps, - max_examples=self.max_size, - success_threshold=self.success_threshold, - steps=self.steps, - ) + filename = f"adv_examples_{'test' if self.use_test_data else 'train'}" + make_adversarial_example( + path=self.path, + filename=filename, + batch_size=self.attack_batch_size, + eps=self.eps, + max_examples=self.max_size, + success_threshold=self.success_threshold, + steps=self.steps, + use_test_data=self.use_test_data, + ) - return AdversarialExampleDataset(base_run=self.path, num_examples=self.max_size) + return AdversarialExampleDataset( + filepath=self.path / filename, num_examples=self.max_size + ) @property def num_classes(self): @@ -100,10 +110,8 @@ def num_classes(self): class AdversarialExampleDataset(Dataset): - def __init__(self, base_run: Path, num_examples=None): - self.base_run = base_run - - data = utils.load(base_run / "adv_examples") + def __init__(self, filepath: Path, num_examples=None): + data = utils.load(filepath) assert isinstance(data, dict) self.examples = data["adv_inputs"] self.labels = data["labels"] diff --git a/src/cupbearer/data/backdoor_data.py b/src/cupbearer/data/backdoor_data.py index a75f370d..44e79c4b 100644 --- a/src/cupbearer/data/backdoor_data.py +++ b/src/cupbearer/data/backdoor_data.py @@ -12,6 +12,11 @@ class BackdoorData(DatasetConfig): original: DatasetConfig backdoor: Backdoor + def get_test_split(self) -> DatasetConfig: + return BackdoorData( + original=self.original.get_test_split(), backdoor=self.backdoor + ) + @property def num_classes(self): return self.original.num_classes diff --git a/src/cupbearer/data/pytorch.py b/src/cupbearer/data/pytorch.py index 95725f09..804ce80e 100644 --- a/src/cupbearer/data/pytorch.py +++ b/src/cupbearer/data/pytorch.py @@ -1,3 +1,4 @@ +import dataclasses from dataclasses import dataclass from torch.utils.data import Dataset @@ -25,6 +26,14 @@ class PytorchConfig(DatasetConfig): transforms: dict[str, Transform] = mutable_field({"to_tensor": ToTensor()}) default_augmentations: bool = True + def get_test_split(self) -> DatasetConfig: + if self.train: + # TODO: this will keep the augmentations around, + # which we probably don't want? + return dataclasses.replace(self, train=False) + else: + raise ValueError("This dataset is already a test split.") + def __post_init__(self): super().__post_init__() if self.default_augmentations and self.train: diff --git a/src/cupbearer/detectors/abstraction/abstraction_detector.py b/src/cupbearer/detectors/abstraction/abstraction_detector.py index 34062256..df49ddea 100644 --- a/src/cupbearer/detectors/abstraction/abstraction_detector.py +++ b/src/cupbearer/detectors/abstraction/abstraction_detector.py @@ -146,11 +146,14 @@ def should_train_on_clean_data(self) -> bool: def train( self, - dataset, + trusted_data, + untrusted_data, *, num_classes: int, train_config: TrainConfig, ): + if trusted_data is None: + raise ValueError("Abstraction detector requires trusted training data.") # Possibly we should store this as a submodule to save optimizers and continue # training later. But as long as we don't actually make use of that, # this is easiest. @@ -160,7 +163,7 @@ def train( optim_cfg=train_config.optimizer, ) - train_loader = train_config.get_dataloader(dataset) + train_loader = train_config.get_dataloader(trusted_data) # TODO: implement validation data # val_loaders = { diff --git a/src/cupbearer/detectors/anomaly_detector.py b/src/cupbearer/detectors/anomaly_detector.py index e70e5d50..4fbb8724 100644 --- a/src/cupbearer/detectors/anomaly_detector.py +++ b/src/cupbearer/detectors/anomaly_detector.py @@ -13,7 +13,7 @@ from torch.utils.data import DataLoader, Dataset from tqdm.auto import tqdm -from cupbearer.data import TestDataMix +from cupbearer.data import MixedData from cupbearer.models.models import HookedModel from cupbearer.utils import utils @@ -33,18 +33,19 @@ def __init__( self.trained = False - @property @abstractmethod - def should_train_on_clean_data(self) -> bool: - pass - - @property - def should_train_on_poisoned_data(self) -> bool: - return not self.should_train_on_clean_data + def train( + self, + trusted_data: Dataset | None, + untrusted_data: Dataset | None, + *, + num_classes: int, + train_config: utils.BaseConfig, + ): + """Train the anomaly detector with the given datasets. - @abstractmethod - def train(self, dataset, *, num_classes: int, train_config: utils.BaseConfig): - """Train the anomaly detector with the given dataset as "normal" data.""" + At least one of trusted_data or untrusted_data must be provided. + """ @contextmanager def finetune(self, **kwargs): @@ -91,15 +92,18 @@ def eval( self, # Don't need train_dataset here, but e.g. adversarial abstractions need it, # and in general there's no reason to deny detectors access to it during eval. + # TODO: I think we can/should remove this and require detectors to handle + # anything involving training data during training (now that they get access + # to untrusted data then). train_dataset: Dataset, - test_dataset: TestDataMix, + test_dataset: MixedData, histogram_percentile: float = 95, num_bins: int = 100, pbar: bool = False, ): # Check this explicitly because otherwise things can break in weird ways # when we assume that anomaly labels are included. - assert isinstance(test_dataset, TestDataMix), type(test_dataset) + assert isinstance(test_dataset, MixedData), type(test_dataset) test_loader = DataLoader( test_dataset, diff --git a/src/cupbearer/detectors/finetuning.py b/src/cupbearer/detectors/finetuning.py index d76f512b..24ff4bbd 100644 --- a/src/cupbearer/detectors/finetuning.py +++ b/src/cupbearer/detectors/finetuning.py @@ -19,26 +19,25 @@ def __init__(self, model, max_batch_size, save_path): # detector or load weights for inference, we'll need to copy in both cases. self.finetuned_model = copy.deepcopy(self.model) - @property - def should_train_on_clean_data(self) -> bool: - return True - def train( self, - clean_dataset, + trusted_data, + untrusted_data, *, num_classes: int, train_config: TrainConfig, ): + if trusted_data is None: + raise ValueError("Finetuning detector requires trusted training data.") classifier = Classifier( self.finetuned_model, num_classes=num_classes, - optim_cfg=train_config, + optim_cfg=train_config.optimizer, save_hparams=False, ) # Create a DataLoader for the clean dataset - clean_loader = train_config.get_dataloader(clean_dataset) + clean_loader = train_config.get_dataloader(trusted_data) # Finetune the model on the clean dataset trainer = train_config.get_trainer(path=self.save_path) diff --git a/src/cupbearer/detectors/statistical/mahalanobis_detector.py b/src/cupbearer/detectors/statistical/mahalanobis_detector.py index d18e8835..b91d4287 100644 --- a/src/cupbearer/detectors/statistical/mahalanobis_detector.py +++ b/src/cupbearer/detectors/statistical/mahalanobis_detector.py @@ -8,9 +8,7 @@ class MahalanobisDetector(ActivationCovarianceBasedDetector): - @property - def should_train_on_clean_data(self) -> bool: - return True + use_trusted: bool = True def post_covariance_training(self, train_config: MahalanobisTrainConfig): self.inv_covariances = { diff --git a/src/cupbearer/detectors/statistical/que_detector.py b/src/cupbearer/detectors/statistical/que_detector.py index a3d4cf2f..7bc8dd15 100644 --- a/src/cupbearer/detectors/statistical/que_detector.py +++ b/src/cupbearer/detectors/statistical/que_detector.py @@ -8,9 +8,7 @@ class QuantumEntropyDetector(ActivationCovarianceBasedDetector): - @property - def should_train_on_clean_data(self) -> bool: - return True + use_trusted: bool = True def post_covariance_training(self, train_config: ActivationCovarianceTrainConfig): whitening_matrices = {} diff --git a/src/cupbearer/detectors/statistical/spectral_detector.py b/src/cupbearer/detectors/statistical/spectral_detector.py index eaf74a3a..7774721d 100644 --- a/src/cupbearer/detectors/statistical/spectral_detector.py +++ b/src/cupbearer/detectors/statistical/spectral_detector.py @@ -13,9 +13,7 @@ class SpectralSignatureDetector(ActivationCovarianceBasedDetector): Neural Information Processing Systems (2018). """ - @property - def should_train_on_clean_data(self) -> bool: - return False + use_trusted: bool = False def post_covariance_training(self, train_config: ActivationCovarianceTrainConfig): # Calculate top right singular vectors from covariance matrices diff --git a/src/cupbearer/detectors/statistical/statistical.py b/src/cupbearer/detectors/statistical/statistical.py index 80914c8d..2e686777 100644 --- a/src/cupbearer/detectors/statistical/statistical.py +++ b/src/cupbearer/detectors/statistical/statistical.py @@ -2,7 +2,7 @@ from dataclasses import dataclass import torch -from torch.utils.data import DataLoader +from torch.utils.data import DataLoader, Dataset from tqdm import tqdm from cupbearer.detectors.anomaly_detector import ActivationBasedDetector @@ -20,7 +20,7 @@ class StatisticalTrainConfig(BaseConfig, ABC): # robust: bool = False # TODO spectre uses # https://www.semanticscholar.org/paper/Being-Robust-(in-High-Dimensions)-Can-Be-Practical-Diakonikolas-Kamath/2a6de51d86f13e9eb7efa85491682dad0ccd65e8?utm_source=direct_link - def get_dataloader(self, dataset, train=True): + def get_dataloader(self, dataset: Dataset, train=True): if train: return DataLoader( dataset, @@ -39,7 +39,7 @@ def get_dataloader(self, dataset, train=True): @dataclass class DebugStatisticalTrainConfig(StatisticalTrainConfig): - max_batchs: int = 3 + max_batches: int = 3 batch_size: int = 5 max_batch_size: int = 5 @@ -67,6 +67,8 @@ class DebugMahalanobisTrainConfig(DebugStatisticalTrainConfig, MahalanobisTrainC class StatisticalDetector(ActivationBasedDetector, ABC): + use_trusted: bool + @abstractmethod def init_variables(self, activation_sizes: dict[str, torch.Size]): pass @@ -77,7 +79,8 @@ def batch_update(self, activations: dict[str, torch.Tensor]): def train( self, - dataset, + trusted_data, + untrusted_data, *, num_classes: int, train_config: StatisticalTrainConfig, @@ -85,7 +88,20 @@ def train( # Common for statistical methods is that the training does not require # gradients, but instead computes summary statistics or similar with torch.inference_mode(): - data_loader = train_config.get_dataloader(dataset) + if self.use_trusted: + if trusted_data is None: + raise ValueError( + f"{self.__class__.__name__} requires trusted training data." + ) + data = trusted_data + else: + if untrusted_data is None: + raise ValueError( + f"{self.__class__.__name__} requires untrusted training data." + ) + data = untrusted_data + + data_loader = train_config.get_dataloader(data) example_batch = next(iter(data_loader)) _, example_activations = self.get_activations(example_batch) @@ -131,13 +147,15 @@ def post_covariance_training(self, train_config: ActivationCovarianceTrainConfig def train( self, - dataset, + trusted_data, + untrusted_data, *, num_classes: int, train_config: ActivationCovarianceTrainConfig, ): super().train( - dataset, + trusted_data=trusted_data, + untrusted_data=untrusted_data, num_classes=num_classes, train_config=train_config, ) diff --git a/src/cupbearer/scripts/conf/eval_detector_conf.py b/src/cupbearer/scripts/conf/eval_detector_conf.py index 980c768a..ca5f7d2c 100644 --- a/src/cupbearer/scripts/conf/eval_detector_conf.py +++ b/src/cupbearer/scripts/conf/eval_detector_conf.py @@ -1,13 +1,13 @@ from dataclasses import dataclass from cupbearer.detectors import DetectorConfig, StoredDetector -from cupbearer.tasks import TaskConfigBase +from cupbearer.tasks import TaskConfig from cupbearer.utils.scripts import ScriptConfig @dataclass(kw_only=True) class Config(ScriptConfig): - task: TaskConfigBase + task: TaskConfig detector: DetectorConfig | None = None save_config: bool = False pbar: bool = False diff --git a/src/cupbearer/scripts/conf/train_detector_conf.py b/src/cupbearer/scripts/conf/train_detector_conf.py index ed854e32..0b51379c 100644 --- a/src/cupbearer/scripts/conf/train_detector_conf.py +++ b/src/cupbearer/scripts/conf/train_detector_conf.py @@ -1,11 +1,11 @@ from dataclasses import dataclass from cupbearer.detectors import DetectorConfig -from cupbearer.tasks import TaskConfigBase +from cupbearer.tasks import TaskConfig from cupbearer.utils.scripts import ScriptConfig @dataclass(kw_only=True) class Config(ScriptConfig): - task: TaskConfigBase + task: TaskConfig detector: DetectorConfig diff --git a/src/cupbearer/scripts/eval_detector.py b/src/cupbearer/scripts/eval_detector.py index f9e87b74..fe20b245 100644 --- a/src/cupbearer/scripts/eval_detector.py +++ b/src/cupbearer/scripts/eval_detector.py @@ -6,8 +6,8 @@ def main(cfg: Config): assert cfg.detector is not None # make type checker happy # Init - train_data = cfg.task.build_train_data() - test_data = cfg.task.build_test_data() + train_data = cfg.task.trusted_data.build() + test_data = cfg.task.test_data.build() # train_data[0] is the first sample, which is (input, ...), so we need another [0] example_input = train_data[0][0] model = cfg.task.build_model(input_shape=example_input.shape) diff --git a/src/cupbearer/scripts/train_detector.py b/src/cupbearer/scripts/train_detector.py index 3f0dd720..a0e099c7 100644 --- a/src/cupbearer/scripts/train_detector.py +++ b/src/cupbearer/scripts/train_detector.py @@ -1,5 +1,3 @@ -import warnings - from cupbearer.utils.scripts import script from . import EvalDetectorConfig, eval_detector @@ -8,29 +6,27 @@ @script def main(cfg: Config): - reference_data = cfg.task.build_train_data() - # reference_data[0] is the first sample, which is (input, ...), so we need another + trusted_data = untrusted_data = None + + if cfg.task.allow_trusted: + trusted_data = cfg.task.trusted_data.build() + if cfg.task.allow_untrusted: + untrusted_data = cfg.task.untrusted_data.build() + + example_data = trusted_data or untrusted_data + if example_data is None: + raise ValueError( + f"{type(cfg.task).__name__} does not allow trusted nor untrusted data." + ) + # example_data[0] is the first sample, which is (input, ...), so we need another # [0] index - example_input = reference_data[0][0] + example_input = example_data[0][0] model = cfg.task.build_model(input_shape=example_input.shape) detector = cfg.detector.build(model=model, save_dir=cfg.path) - if cfg.task.normal_weight_when_training < 1.0: - if not detector.should_train_on_poisoned_data: - warnings.warn( - f"Detector of type {type(detector).__name__} is not meant" - + " to be trained on poisoned samples." - ) - else: - if not detector.should_train_on_clean_data: - warnings.warn( - f"Detector of type {type(detector).__name__} is not meant" - + " to be trained without poisoned samples." - ) - - # We want to convert the train dataclass to a dict, but *not* recursively. detector.train( - reference_data, + trusted_data=trusted_data, + untrusted_data=untrusted_data, num_classes=cfg.task.num_classes, train_config=cfg.detector.train, ) diff --git a/src/cupbearer/tasks/__init__.py b/src/cupbearer/tasks/__init__.py index 60328543..09baff94 100644 --- a/src/cupbearer/tasks/__init__.py +++ b/src/cupbearer/tasks/__init__.py @@ -1,5 +1,5 @@ # ruff: noqa: F401 -from ._config import CustomTask, TaskConfig, TaskConfigBase +from ._config import CustomTask, TaskConfig from .adversarial_examples import AdversarialExampleTask from .backdoor_detection import BackdoorDetection from .toy_features import ToyFeaturesTask diff --git a/src/cupbearer/tasks/_config.py b/src/cupbearer/tasks/_config.py index 54dae1b9..525e688f 100644 --- a/src/cupbearer/tasks/_config.py +++ b/src/cupbearer/tasks/_config.py @@ -1,150 +1,130 @@ -from abc import ABC, abstractmethod, abstractproperty -from copy import deepcopy +from abc import ABC from dataclasses import dataclass from typing import Optional -from torch.utils.data import Dataset - from cupbearer.data import ( DatasetConfig, - RemoveMixLabelDataset, - TestDataConfig, - TestDataMix, + MixedDataConfig, ) from cupbearer.models import ModelConfig from cupbearer.models.models import HookedModel -from cupbearer.utils.utils import BaseConfig @dataclass(kw_only=True) -class TaskConfigBase(BaseConfig, ABC): - @abstractmethod - def build_train_data(self) -> Dataset: - pass - - @abstractmethod - def build_model(self, input_shape: list[int] | tuple[int]) -> HookedModel: - pass - - @abstractmethod - def build_test_data(self) -> TestDataMix: - pass - - @abstractproperty - def num_classes(self) -> int: # type: ignore - pass - +class TaskConfig(ABC): + # Proportion of clean data in untrusted datasets: + clean_test_weight: float = 0.5 + clean_train_weight: float = 0.5 + # Whether to allow using trusted and untrusted data for training: + allow_trusted: bool = True + allow_untrusted: bool = True -@dataclass(kw_only=True) -class TaskConfig(TaskConfigBase, ABC): - normal_weight: float = 0.5 - normal_weight_when_training: float = 1.0 max_train_size: Optional[int] = None max_test_size: Optional[int] = None def __post_init__(self): # We'll only actually instantiate these when we need them, in case relevant # attributes get changed after initialization. - self._train_data: Optional[DatasetConfig] = None - self._test_data: Optional[DatasetConfig] = None + + # TODO: I think this is no longer necessary after the config refactor. + self._trusted_data: Optional[DatasetConfig] = None + self._untrusted_data: Optional[DatasetConfig] = None + self._test_data: Optional[MixedDataConfig] = None self._model: Optional[ModelConfig] = None - @abstractmethod - def _init_train_data(self): - pass - - def _get_normal_test_data(self) -> DatasetConfig: - # Default implementation: just use the training data, but the test split - # if possible. May be overridden, e.g. if normal test data is meant to be - # harder or otherwise out-of-distribution. - if not self._train_data: - self._init_train_data() - assert self._train_data is not None, "init_train_data must set _train_data" - normal = deepcopy(self._train_data) - if hasattr(normal, "train"): - # TODO: this is a bit of a hack, maybe there should be a nicer interface - # for this. - normal.train = False # type: ignore - - return normal - - @abstractmethod - def _get_anomalous_test_data(self) -> DatasetConfig: - pass - - @abstractmethod - def _init_model(self): - pass - - def build_train_data(self) -> Dataset: - if not self._train_data: - self._init_train_data() - assert self._train_data is not None, "init_train_data must set _train_data" - self._train_data.max_size = self.max_train_size - - if self.normal_weight_when_training == 1.0: - return self._train_data.build() - else: - # E.g. SpectralDetector should use poisoned data when training - anomalous = self._get_anomalous_test_data() - - # As TestDataMix adds a label for poisoned or not, we remove this here - train_data = RemoveMixLabelDataset( - TestDataConfig( - normal=self._train_data, - anomalous=anomalous, - normal_weight=self.normal_weight_when_training, - ).build() + def _get_clean_data(self, train: bool) -> DatasetConfig: + raise NotImplementedError + + def _get_anomalous_data(self, train: bool) -> DatasetConfig: + raise NotImplementedError + + def _get_model(self) -> ModelConfig: + raise NotImplementedError + + @property + def trusted_data(self) -> DatasetConfig: + """Clean data that may be used for training.""" + if not self.allow_trusted: + raise ValueError( + "Using trusted training data is not allowed for this task." ) - return train_data + if not self._trusted_data: + self._trusted_data = self._get_clean_data(train=True) + self._trusted_data.max_size = self.max_train_size + return self._trusted_data + + @property + def untrusted_data(self) -> DatasetConfig: + """A mix of clean and anomalous data that may be used for training.""" + if not self.allow_untrusted: + raise ValueError( + "Using untrusted training data is not allowed for this task." + ) + if not self._untrusted_data: + anomalous_data = self._get_anomalous_data(train=True) + clean_data = self._get_clean_data(train=True) + self._untrusted_data = MixedDataConfig( + normal=clean_data, + anomalous=anomalous_data, + normal_weight=self.clean_train_weight, + max_size=self.max_train_size, + return_anomaly_labels=False, + ) + return self._untrusted_data def build_model(self, input_shape: list[int] | tuple[int]) -> HookedModel: if not self._model: - self._init_model() - assert self._model is not None, "init_model must set _model" + self._model = self._get_model() return self._model.build_model(input_shape) - def build_test_data(self) -> TestDataMix: - normal = self._get_normal_test_data() - anomalous = self._get_anomalous_test_data() - self._test_data = TestDataConfig( - normal=normal, - anomalous=anomalous, - normal_weight=self.normal_weight, - max_size=self.max_test_size, - ) - return self._test_data.build() + @property + def test_data(self) -> MixedDataConfig: + if not self._test_data: + normal = self._get_clean_data(train=False) + anomalous = self._get_anomalous_data(train=False) + self._test_data = MixedDataConfig( + normal=normal, + anomalous=anomalous, + normal_weight=self.clean_test_weight, + max_size=self.max_test_size, + ) + return self._test_data @property def num_classes(self): - if not self._train_data: - self._init_train_data() - assert self._train_data is not None, "init_train_data must set _train_data" - return self._train_data.num_classes + try: + return self.trusted_data.num_classes + except ValueError: + return self.untrusted_data.num_classes -@dataclass(kw_only=True) +@dataclass class CustomTask(TaskConfig): """A fully customizable task config, where all datasets are specified directly.""" - train_data: DatasetConfig - anomalous_data: DatasetConfig - normal_test_data: Optional[DatasetConfig] = None + clean_test_data: DatasetConfig + anomalous_test_data: DatasetConfig model: ModelConfig + clean_train_data: Optional[DatasetConfig] = None + anomalous_train_data: Optional[DatasetConfig] = None - def _init_train_data(self): - self._train_data = self.train_data - - def _get_anomalous_test_data(self) -> DatasetConfig: - return self.anomalous_data - - def _get_normal_test_data(self) -> DatasetConfig: - if self.normal_test_data: - return self.normal_test_data - return super()._get_normal_test_data() - - def _init_model(self): - self._model = self.model + def __post_init__(self): + super(CustomTask, self).__post_init__() + self.allow_trusted = self.clean_train_data is not None + self.allow_untrusted = self.anomalous_train_data is not None + + def _get_clean_data(self, train: bool) -> DatasetConfig: + # This is a bit of a hack because it might return `None`, but that only + # becomes important if illegal training data is used. + return self.clean_train_data if train else self.clean_test_data + + def _get_anomalous_data(self, train: bool) -> DatasetConfig: + # This is a bit of a hack because it might return `None`, but that only + # becomes important if illegal training data is used. + return self.anomalous_train_data if train else self.anomalous_test_data + + def _get_model(self) -> ModelConfig: + return self.model @dataclass(kw_only=True) diff --git a/src/cupbearer/tasks/adversarial_examples.py b/src/cupbearer/tasks/adversarial_examples.py index f8196d76..907967d2 100644 --- a/src/cupbearer/tasks/adversarial_examples.py +++ b/src/cupbearer/tasks/adversarial_examples.py @@ -2,9 +2,8 @@ from dataclasses import dataclass from pathlib import Path -from cupbearer.data._shared import TrainDataFromRun -from cupbearer.data.adversarial import AdversarialExampleConfig -from cupbearer.models import StoredModel +from cupbearer.data import AdversarialExampleConfig, DatasetConfig, TrainDataFromRun +from cupbearer.models import ModelConfig, StoredModel from ._config import DebugTaskConfig, TaskConfig @@ -17,15 +16,18 @@ class AdversarialExampleTask(TaskConfig): steps: int = 40 eps: float = 8 / 255 - def _init_train_data(self): - self._train_data = TrainDataFromRun(path=self.path) + def _get_clean_data(self, train: bool) -> DatasetConfig: + if train: + return TrainDataFromRun(path=self.path) + else: + return TrainDataFromRun(path=self.path).get_test_split() - def _get_anomalous_test_data(self): + def _get_anomalous_data(self, train: bool) -> DatasetConfig: max_size = None if self.max_test_size: # This isn't strictly necessary, but it lets us avoid generating more # adversarial examples than needed. - max_size = math.ceil(self.max_test_size * (1 - self.normal_weight)) + max_size = math.ceil(self.max_test_size * (1 - self.clean_test_weight)) return AdversarialExampleConfig( path=self.path, max_size=max_size, @@ -33,10 +35,11 @@ def _get_anomalous_test_data(self): success_threshold=self.success_threshold, steps=self.steps, eps=self.eps, + use_test_data=not train, ) - def _init_model(self): - self._model = StoredModel(path=self.path) + def _get_model(self) -> ModelConfig: + return StoredModel(path=self.path) @dataclass(kw_only=True) diff --git a/src/cupbearer/tasks/backdoor_detection.py b/src/cupbearer/tasks/backdoor_detection.py index b1d05762..0291e847 100644 --- a/src/cupbearer/tasks/backdoor_detection.py +++ b/src/cupbearer/tasks/backdoor_detection.py @@ -1,10 +1,9 @@ -from copy import deepcopy from dataclasses import dataclass from pathlib import Path -from cupbearer.data import Backdoor +from cupbearer.data import Backdoor, DatasetConfig from cupbearer.data.backdoor_data import BackdoorData -from cupbearer.models import StoredModel +from cupbearer.models import ModelConfig, StoredModel from cupbearer.utils.scripts import load_config from ._config import DebugTaskConfig, TaskConfig @@ -16,19 +15,28 @@ class BackdoorDetection(TaskConfig): backdoor: Backdoor no_load: bool = False - def _init_train_data(self): - data_cfg = load_config(self.path, "train_data", BackdoorData) - # Remove the backdoor - self._train_data = data_cfg.original + def __post_init__(self): + super().__post_init__() + self.backdoored_train_data = load_config(self.path, "train_data", BackdoorData) - def _get_anomalous_test_data(self): - copy = deepcopy(self._train_data) + def _get_clean_data(self, train: bool) -> DatasetConfig: + if train: + return self.backdoored_train_data.original + else: + return self.backdoored_train_data.original.get_test_split() + + def _get_anomalous_data(self, train: bool) -> DatasetConfig: if not self.no_load: self.backdoor.load(self.path) - return BackdoorData(original=copy, backdoor=self.backdoor) - def _init_model(self): - self._model = StoredModel(path=self.path) + # TODO: should we get rid of `self.backdoor` and just use the existing one + # from the training run? + return BackdoorData( + original=self._get_clean_data(train), backdoor=self.backdoor + ) + + def _get_model(self) -> ModelConfig: + return StoredModel(path=self.path) @dataclass diff --git a/src/cupbearer/utils/train.py b/src/cupbearer/utils/train.py index 7f827b7e..a87b9b6d 100644 --- a/src/cupbearer/utils/train.py +++ b/src/cupbearer/utils/train.py @@ -4,7 +4,7 @@ import lightning as L from lightning.pytorch import callbacks, loggers -from torch.utils.data import DataLoader +from torch.utils.data import DataLoader, Dataset from cupbearer.utils.optimizers import OptimizerConfig from cupbearer.utils.utils import BaseConfig @@ -37,7 +37,7 @@ def callbacks(self): return callback_list - def get_dataloader(self, dataset, train=True): + def get_dataloader(self, dataset: Dataset, train=True): if train: return DataLoader( dataset, diff --git a/tests/test_data.py b/tests/test_data.py index fcc05d78..66a31b5f 100644 --- a/tests/test_data.py +++ b/tests/test_data.py @@ -3,9 +3,6 @@ import numpy as np import pytest import torch - -# We shouldn't import TestDataMix directly because that will make pytest think -# it's a test. from cupbearer import data from torch.utils.data import DataLoader, Dataset from torchvision.transforms.functional import InterpolationMode @@ -87,7 +84,7 @@ def anomalous_dataset(): @pytest.fixture def mixed_dataset(clean_dataset, anomalous_dataset): - return data.TestDataMix(clean_dataset, anomalous_dataset) + return data.MixedData(clean_dataset, anomalous_dataset) @pytest.fixture @@ -102,7 +99,7 @@ def anomalous_config(): @pytest.fixture def mixed_config(clean_config, anomalous_config): - return data.TestDataConfig(clean_config, anomalous_config) + return data.MixedDataConfig(clean_config, anomalous_config) def test_len(mixed_dataset): @@ -118,7 +115,7 @@ def test_contents(mixed_dataset): def test_uneven_weight(clean_dataset, anomalous_dataset): - mixed_data = data.TestDataMix(clean_dataset, anomalous_dataset, normal_weight=0.3) + mixed_data = data.MixedData(clean_dataset, anomalous_dataset, normal_weight=0.3) # The 7 anomalous datapoints should be 70% of the dataset, so total length should # be 10. assert len(mixed_data) == 10 @@ -149,7 +146,7 @@ def test_mixed_max_size(clean_config, anomalous_config): anomalous_config.max_size = 23 # The actual mixed dataset we build now is the same as before: 10 datapoints, # 3 normal and 7 anomalous. - mixed_config = data.TestDataConfig(clean_config, anomalous_config) + mixed_config = data.MixedDataConfig(clean_config, anomalous_config) mixed_config.max_size = 10 mixed_config.normal_weight = 0.3 mixed_data = mixed_config.build() diff --git a/tests/test_detectors.py b/tests/test_detectors.py index 66964f52..5d53a2b4 100644 --- a/tests/test_detectors.py +++ b/tests/test_detectors.py @@ -51,7 +51,9 @@ def train_detector(self, dataset, Model, Detector, **kwargs): detector = Detector(model=model) detector.train( - dataset=dataset, + # Just make sure all detectors get the data they need: + trusted_data=dataset, + untrusted_data=dataset, num_classes=7, train_config=self.train_config, ) diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py index f8be68c1..2efb3ced 100644 --- a/tests/test_pipeline.py +++ b/tests/test_pipeline.py @@ -1,9 +1,6 @@ -import warnings - import pytest import torch from cupbearer import data, detectors, models, tasks -from cupbearer.data import RemoveMixLabelDataset from cupbearer.scripts import ( eval_classifier, train_classifier, @@ -109,7 +106,7 @@ def test_train_mahalanobis_advex(backdoor_classifier_path, tmp_path): path=tmp_path, ) train_detector(cfg) - assert (backdoor_classifier_path / "adv_examples.pt").is_file() + assert (backdoor_classifier_path / "adv_examples_train.pt").is_file() assert (backdoor_classifier_path / "adv_examples.pdf").is_file() assert (tmp_path / "config.yaml").is_file() assert (tmp_path / "detector.pt").is_file() @@ -127,39 +124,18 @@ def test_train_mahalanobis_advex(backdoor_classifier_path, tmp_path): detectors.DebugQuantumEntropyConfig, ], ) -@pytest.mark.parametrize("train_on_clean", [False, True]) -def test_train_statistical_backdoor( - backdoor_classifier_path, tmp_path, detector_type, train_on_clean -): +def test_train_statistical_backdoor(backdoor_classifier_path, tmp_path, detector_type): cfg = train_detector_conf.Config( task=tasks.backdoor_detection.DebugBackdoorDetection( path=backdoor_classifier_path, backdoor=data.CornerPixelBackdoor(), - normal_weight_when_training=1.0 if train_on_clean else 0.9, ), detector=detector_type(), path=tmp_path, ) train_detector(cfg) - # Check that data is mixed when it should be - assert train_on_clean ^ isinstance( - cfg.task.build_train_data(), RemoveMixLabelDataset - ) - # Train detector - warning_message = ( - r".*Detector of type \w+ is not meant to be trained \w+ poisoned samples[.].*" - ) - if train_on_clean ^ (detector_type != detectors.DebugSpectralSignatureConfig): - # Should warn for incompatibility - with pytest.warns(match=warning_message): - train_detector(cfg) - else: - # Should not warn for incompatibility - with warnings.catch_warnings(): - warnings.filterwarnings(action="error", message=warning_message) - train_detector(cfg) assert (tmp_path / "config.yaml").is_file() assert (tmp_path / "detector.pt").is_file() # Eval outputs: From f16b9ca264d92d2dc0a338ad10d7a3bbf637eb40 Mon Sep 17 00:00:00 2001 From: Erik Jenner Date: Thu, 29 Feb 2024 00:03:42 -0800 Subject: [PATCH 03/25] Iterating on tasks --- src/cupbearer/data/__init__.py | 2 + src/cupbearer/data/_shared.py | 68 ++++++++++- src/cupbearer/scripts/train_detector.py | 4 + src/cupbearer/tasks/_config.py | 130 +++++++++++++++++----- src/cupbearer/tasks/backdoor_detection.py | 35 +++--- src/cupbearer/utils/__init__.py | 2 +- 6 files changed, 191 insertions(+), 50 deletions(-) diff --git a/src/cupbearer/data/__init__.py b/src/cupbearer/data/__init__.py index 1cf8f1a7..479558ef 100644 --- a/src/cupbearer/data/__init__.py +++ b/src/cupbearer/data/__init__.py @@ -3,7 +3,9 @@ DatasetConfig, MixedData, MixedDataConfig, + SubsetConfig, TrainDataFromRun, + split_dataset_cfg, ) from .adversarial import AdversarialExampleConfig from .backdoor_data import BackdoorData diff --git a/src/cupbearer/data/_shared.py b/src/cupbearer/data/_shared.py index e6a9a65e..d004f22c 100644 --- a/src/cupbearer/data/_shared.py +++ b/src/cupbearer/data/_shared.py @@ -53,6 +53,70 @@ def _build(self) -> Dataset: raise NotImplementedError +@dataclass +class SubsetConfig(DatasetConfig): + full_dataset: DatasetConfig + start_fraction: float = 0.0 + end_fraction: float = 1.0 + + def __post_init__(self): + super().__post_init__() + if self.max_size: + raise ValueError( + "max_size should be set on the full dataset, not the subset." + ) + if self.start_fraction > self.end_fraction: + raise ValueError( + f"{self.start_fraction=} must be less than or equal " + f"to {self.end_fraction=}." + ) + if self.start_fraction < 0 or self.end_fraction > 1: + raise ValueError( + "Fractions must be between 0 and 1, " + f"got {self.start_fraction} and {self.end_fraction}." + ) + if self.transforms: + raise ValueError( + "Transforms should be applied to the full dataset, not the subset." + ) + + def _build(self) -> Dataset: + full = self.full_dataset.build() + start = int(self.start_fraction * len(full)) + end = int(self.end_fraction * len(full)) + return Subset(full, range(start, end)) + + def num_classes(self) -> int: # type: ignore + return self.full_dataset.num_classes + + def get_test_split(self) -> "DatasetConfig": + return SubsetConfig( + full_dataset=self.full_dataset.get_test_split(), + start_fraction=self.start_fraction, + end_fraction=self.end_fraction, + ) + + # Mustn't inherit get_transforms() from full_dataset, they're already applied + # to the full dataset on build. + + +def split_dataset_cfg(cfg: DatasetConfig, *fractions: float) -> list[SubsetConfig]: + if not fractions: + raise ValueError("At least one fraction must be provided.") + if not all(0 <= f <= 1 for f in fractions): + raise ValueError("Fractions must be between 0 and 1.") + if not sum(fractions) == 1: + fractions = fractions + (1 - sum(fractions),) + + subsets = [] + current_start = 0.0 + for fraction in fractions: + subsets.append(SubsetConfig(cfg, current_start, current_start + fraction)) + current_start += fraction + assert current_start == 1.0 + return subsets + + class TransformDataset(Dataset): """Dataset that applies a transform to another dataset.""" @@ -162,10 +226,10 @@ def build(self) -> MixedData: anomalous = self.anomalous.build() if self.max_size: normal_size = int(self.max_size * self.normal_weight) - assert normal_size <= len(normal) + normal_size = min(len(normal), normal_size) normal = Subset(normal, range(normal_size)) anomalous_size = self.max_size - normal_size - assert anomalous_size <= len(anomalous) + anomalous_size = min(len(anomalous), anomalous_size) anomalous = Subset(anomalous, range(anomalous_size)) dataset = MixedData( normal, anomalous, self.normal_weight, self.return_anomaly_labels diff --git a/src/cupbearer/scripts/train_detector.py b/src/cupbearer/scripts/train_detector.py index a0e099c7..f8641e3e 100644 --- a/src/cupbearer/scripts/train_detector.py +++ b/src/cupbearer/scripts/train_detector.py @@ -10,8 +10,12 @@ def main(cfg: Config): if cfg.task.allow_trusted: trusted_data = cfg.task.trusted_data.build() + if len(trusted_data) == 0: + trusted_data = None if cfg.task.allow_untrusted: untrusted_data = cfg.task.untrusted_data.build() + if len(untrusted_data) == 0: + untrusted_data = None example_data = trusted_data or untrusted_data if example_data is None: diff --git a/src/cupbearer/tasks/_config.py b/src/cupbearer/tasks/_config.py index 525e688f..b6309f2d 100644 --- a/src/cupbearer/tasks/_config.py +++ b/src/cupbearer/tasks/_config.py @@ -1,10 +1,12 @@ -from abc import ABC +from abc import ABC, abstractmethod +from copy import deepcopy from dataclasses import dataclass from typing import Optional from cupbearer.data import ( DatasetConfig, MixedDataConfig, + split_dataset_cfg, ) from cupbearer.models import ModelConfig from cupbearer.models.models import HookedModel @@ -32,10 +34,21 @@ def __post_init__(self): self._test_data: Optional[MixedDataConfig] = None self._model: Optional[ModelConfig] = None - def _get_clean_data(self, train: bool) -> DatasetConfig: + def _get_trusted_data(self) -> DatasetConfig: raise NotImplementedError - def _get_anomalous_data(self, train: bool) -> DatasetConfig: + def _get_clean_untrusted_data(self) -> DatasetConfig: + raise NotImplementedError + + def _get_anomalous_data(self) -> DatasetConfig: + raise NotImplementedError + + # The following two methods don't need to be implemented, the task will use + # get_test_split() on the untrusted data by default. + def _get_clean_test_data(self) -> DatasetConfig: + raise NotImplementedError + + def _get_anomalous_test_data(self) -> DatasetConfig: raise NotImplementedError def _get_model(self) -> ModelConfig: @@ -49,7 +62,7 @@ def trusted_data(self) -> DatasetConfig: "Using trusted training data is not allowed for this task." ) if not self._trusted_data: - self._trusted_data = self._get_clean_data(train=True) + self._trusted_data = deepcopy(self._get_trusted_data()) self._trusted_data.max_size = self.max_train_size return self._trusted_data @@ -61,8 +74,8 @@ def untrusted_data(self) -> DatasetConfig: "Using untrusted training data is not allowed for this task." ) if not self._untrusted_data: - anomalous_data = self._get_anomalous_data(train=True) - clean_data = self._get_clean_data(train=True) + anomalous_data = self._get_anomalous_data() + clean_data = self._get_clean_untrusted_data() self._untrusted_data = MixedDataConfig( normal=clean_data, anomalous=anomalous_data, @@ -80,11 +93,15 @@ def build_model(self, input_shape: list[int] | tuple[int]) -> HookedModel: @property def test_data(self) -> MixedDataConfig: if not self._test_data: - normal = self._get_clean_data(train=False) - anomalous = self._get_anomalous_data(train=False) + try: + anomalous_data = self._get_anomalous_test_data() + clean_data = self._get_clean_test_data() + except NotImplementedError: + anomalous_data = self._get_anomalous_data().get_test_split() + clean_data = self._get_clean_untrusted_data().get_test_split() self._test_data = MixedDataConfig( - normal=normal, - anomalous=anomalous, + normal=clean_data, + anomalous=anomalous_data, normal_weight=self.clean_test_weight, max_size=self.max_test_size, ) @@ -99,29 +116,86 @@ def num_classes(self): @dataclass +class FuzzedTask(TaskConfig): + """A task where the anomalous inputs are some modified version of clean ones.""" + + trusted_fraction: float = 1.0 + + def __post_init__(self): + super().__post_init__() + + # First we get the base (unmodified) data and its test split. + train_data = self._get_base_data() + test_data = train_data.get_test_split() + + # We split the training data up into three parts: + # 1. A `trusted_fraction` part will be used as trusted data. + # 2. Out of the remaining part, a `clean_untrusted_fraction` part will be used + # as clean untrusted data. + # 3. The rest will be used as anomalous training data. + ( + self._trusted_data, + self._clean_untrusted_data, + _anomalous_base, + ) = split_dataset_cfg( + train_data, + self.trusted_fraction, + # Using clean_train_weight here means we'll end up using all our data, + # since this is also what's used later in the MixedDataConfig. + (1 - self.trusted_fraction) * self.clean_train_weight, + (1 - self.trusted_fraction) * (1 - self.clean_train_weight), + ) + + # Similarly, we plit up the test data, except there is no trusted subset. + self._clean_test_data, _anomalous_test_base = split_dataset_cfg( + test_data, + self.clean_test_weight, + ) + + self._anomalous_data = self.fuzz(_anomalous_base) + self._anomalous_test_data = self.fuzz(_anomalous_test_base) + + @abstractmethod + def fuzz(self, data: DatasetConfig) -> DatasetConfig: + pass + + @abstractmethod + def _get_base_data(self) -> DatasetConfig: + pass + + def _get_trusted_data(self) -> DatasetConfig: + return self._trusted_data + + def _get_clean_untrusted_data(self) -> DatasetConfig: + return self._clean_untrusted_data + + def _get_anomalous_data(self) -> DatasetConfig: + return self._anomalous_data + + def _get_clean_test_data(self) -> DatasetConfig: + return self._clean_test_data + + def _get_anomalous_test_data(self) -> DatasetConfig: + return self._anomalous_test_data + + +@dataclass(kw_only=True) class CustomTask(TaskConfig): """A fully customizable task config, where all datasets are specified directly.""" - clean_test_data: DatasetConfig - anomalous_test_data: DatasetConfig + trusted_data: DatasetConfig + clean_untrusted_data: DatasetConfig + anomalous_data: DatasetConfig model: ModelConfig - clean_train_data: Optional[DatasetConfig] = None - anomalous_train_data: Optional[DatasetConfig] = None - def __post_init__(self): - super(CustomTask, self).__post_init__() - self.allow_trusted = self.clean_train_data is not None - self.allow_untrusted = self.anomalous_train_data is not None - - def _get_clean_data(self, train: bool) -> DatasetConfig: - # This is a bit of a hack because it might return `None`, but that only - # becomes important if illegal training data is used. - return self.clean_train_data if train else self.clean_test_data - - def _get_anomalous_data(self, train: bool) -> DatasetConfig: - # This is a bit of a hack because it might return `None`, but that only - # becomes important if illegal training data is used. - return self.anomalous_train_data if train else self.anomalous_test_data + def _get_clean_untrusted_data(self) -> DatasetConfig: + return self.clean_untrusted_data + + def _get_trusted_data(self) -> DatasetConfig: + return self.trusted_data + + def _get_anomalous_data(self) -> DatasetConfig: + return self.anomalous_data def _get_model(self) -> ModelConfig: return self.model diff --git a/src/cupbearer/tasks/backdoor_detection.py b/src/cupbearer/tasks/backdoor_detection.py index 0291e847..cec9fdcc 100644 --- a/src/cupbearer/tasks/backdoor_detection.py +++ b/src/cupbearer/tasks/backdoor_detection.py @@ -1,39 +1,36 @@ from dataclasses import dataclass from pathlib import Path -from cupbearer.data import Backdoor, DatasetConfig +from cupbearer.data import DatasetConfig from cupbearer.data.backdoor_data import BackdoorData from cupbearer.models import ModelConfig, StoredModel from cupbearer.utils.scripts import load_config -from ._config import DebugTaskConfig, TaskConfig +from ._config import DebugTaskConfig, FuzzedTask @dataclass(kw_only=True) -class BackdoorDetection(TaskConfig): +class BackdoorDetection(FuzzedTask): path: Path - backdoor: Backdoor no_load: bool = False def __post_init__(self): - super().__post_init__() - self.backdoored_train_data = load_config(self.path, "train_data", BackdoorData) - - def _get_clean_data(self, train: bool) -> DatasetConfig: - if train: - return self.backdoored_train_data.original - else: - return self.backdoored_train_data.original.get_test_split() + backdoor_data = load_config(self.path, "train_data", BackdoorData) + self._original = backdoor_data.original + self._backdoor = backdoor_data.backdoor + self._backdoor.p_backdoor = 1.0 - def _get_anomalous_data(self, train: bool) -> DatasetConfig: if not self.no_load: - self.backdoor.load(self.path) + self._backdoor.load(self.path) + + # Call this only now that _original and _backdoor are set. + super().__post_init__() + + def _get_base_data(self) -> DatasetConfig: + return self._original - # TODO: should we get rid of `self.backdoor` and just use the existing one - # from the training run? - return BackdoorData( - original=self._get_clean_data(train), backdoor=self.backdoor - ) + def fuzz(self, data: DatasetConfig) -> DatasetConfig: + return BackdoorData(original=data, backdoor=self._backdoor) def _get_model(self) -> ModelConfig: return StoredModel(path=self.path) diff --git a/src/cupbearer/utils/__init__.py b/src/cupbearer/utils/__init__.py index 27d7d21c..5ca825cb 100644 --- a/src/cupbearer/utils/__init__.py +++ b/src/cupbearer/utils/__init__.py @@ -1,4 +1,4 @@ # ruff: noqa: F401 from .optimizers import OptimizerConfig from .train import DebugTrainConfig, TrainConfig -from .utils import load, save +from .utils import inputs_from_batch, load, save From 9073a8564a444041a11bc2701593af1214f2642a Mon Sep 17 00:00:00 2001 From: Erik Jenner Date: Thu, 29 Feb 2024 15:05:43 -0800 Subject: [PATCH 04/25] Mostly fix tests Adversarial examples are broken, I think they might be easier to fix after some bigger changes --- src/cupbearer/data/_shared.py | 1 + tests/test_pipeline.py | 23 ++++++++--------------- 2 files changed, 9 insertions(+), 15 deletions(-) diff --git a/src/cupbearer/data/_shared.py b/src/cupbearer/data/_shared.py index d004f22c..911a77c6 100644 --- a/src/cupbearer/data/_shared.py +++ b/src/cupbearer/data/_shared.py @@ -86,6 +86,7 @@ def _build(self) -> Dataset: end = int(self.end_fraction * len(full)) return Subset(full, range(start, end)) + @property def num_classes(self) -> int: # type: ignore return self.full_dataset.num_classes diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py index 2efb3ced..4fdad0ea 100644 --- a/tests/test_pipeline.py +++ b/tests/test_pipeline.py @@ -56,9 +56,7 @@ def test_eval_classifier(backdoor_classifier_path): @pytest.mark.slow def test_train_abstraction_corner_backdoor(backdoor_classifier_path, tmp_path): cfg = train_detector_conf.Config( - task=tasks.BackdoorDetection( - path=backdoor_classifier_path, backdoor=data.CornerPixelBackdoor() - ), + task=tasks.BackdoorDetection(path=backdoor_classifier_path), detector=detectors.AbstractionDetectorConfig(train=DebugTrainConfig()), path=tmp_path, ) @@ -75,9 +73,7 @@ def test_train_abstraction_corner_backdoor(backdoor_classifier_path, tmp_path): @pytest.mark.slow def test_train_autoencoder_corner_backdoor(backdoor_classifier_path, tmp_path): cfg = train_detector_conf.Config( - task=tasks.BackdoorDetection( - path=backdoor_classifier_path, backdoor=data.CornerPixelBackdoor() - ), + task=tasks.BackdoorDetection(path=backdoor_classifier_path), detector=detectors.AbstractionDetectorConfig( train=DebugTrainConfig(), abstraction=detectors.abstraction.AutoencoderAbstractionConfig(), @@ -127,8 +123,9 @@ def test_train_mahalanobis_advex(backdoor_classifier_path, tmp_path): def test_train_statistical_backdoor(backdoor_classifier_path, tmp_path, detector_type): cfg = train_detector_conf.Config( task=tasks.backdoor_detection.DebugBackdoorDetection( + # Need some untrusted data for SpectralSignatureConfig path=backdoor_classifier_path, - backdoor=data.CornerPixelBackdoor(), + trusted_fraction=0.5, ), detector=detector_type(), path=tmp_path, @@ -146,9 +143,7 @@ def test_train_statistical_backdoor(backdoor_classifier_path, tmp_path, detector @pytest.mark.slow def test_finetuning_detector(backdoor_classifier_path, tmp_path): cfg = train_detector_conf.Config( - task=tasks.BackdoorDetection( - path=backdoor_classifier_path, backdoor=data.CornerPixelBackdoor() - ), + task=tasks.BackdoorDetection(path=backdoor_classifier_path), detector=detectors.finetuning.FinetuningConfig(train=DebugTrainConfig()), path=tmp_path, ) @@ -195,16 +190,14 @@ def test_wanet(tmp_path): # Check that from_run can load WanetBackdoor properly train_detector_cfg = train_detector_conf.Config( - task=tasks.backdoor_detection.DebugBackdoorDetection( - path=tmp_path / "wanet", backdoor=data.WanetBackdoor() - ), + task=tasks.backdoor_detection.DebugBackdoorDetection(path=tmp_path / "wanet"), detector=detectors.DebugMahalanobisConfig(), path=tmp_path / "wanet-mahalanobis", ) train_detector(train_detector_cfg) assert isinstance(train_detector_cfg.task, tasks.BackdoorDetection) - assert isinstance(train_detector_cfg.task.backdoor, data.WanetBackdoor) + assert isinstance(train_detector_cfg.task._backdoor, data.WanetBackdoor) assert torch.allclose( - train_detector_cfg.task.backdoor.control_grid, + train_detector_cfg.task._backdoor.control_grid, cfg.train_data.backdoor.control_grid, ) From 54c34a682c6518a19958c3251d551d5edf74155f Mon Sep 17 00:00:00 2001 From: Erik Jenner Date: Thu, 29 Feb 2024 23:46:51 -0800 Subject: [PATCH 05/25] [WIP] Remove configs --- demo.ipynb | 505 ----------------- notebooks/simple_demo.ipynb | 514 ++++++++++++++++++ src/cupbearer/data/__init__.py | 17 +- src/cupbearer/data/_shared.py | 75 ++- src/cupbearer/data/adversarial.py | 131 ++--- src/cupbearer/data/backdoor_data.py | 35 -- src/cupbearer/data/backdoors.py | 12 +- src/cupbearer/data/pytorch.py | 51 +- src/cupbearer/data/toy_ambiguous_features.py | 15 - src/cupbearer/data/transforms.py | 8 +- src/cupbearer/detectors/__init__.py | 14 +- .../detectors/abstraction/__init__.py | 3 +- src/cupbearer/detectors/anomaly_detector.py | 42 +- src/cupbearer/detectors/config.py | 57 -- src/cupbearer/detectors/finetuning.py | 19 +- .../detectors/statistical/__init__.py | 67 +-- src/cupbearer/models/__init__.py | 100 +--- src/cupbearer/scripts/_shared.py | 22 +- .../scripts/conf/eval_classifier_conf.py | 13 +- .../scripts/conf/eval_detector_conf.py | 15 +- .../scripts/conf/train_classifier_conf.py | 23 +- .../scripts/conf/train_detector_conf.py | 12 +- src/cupbearer/scripts/eval_classifier.py | 20 +- src/cupbearer/scripts/eval_detector.py | 14 +- src/cupbearer/scripts/train_classifier.py | 20 +- src/cupbearer/scripts/train_detector.py | 39 +- src/cupbearer/tasks/__init__.py | 7 +- src/cupbearer/tasks/_config.py | 283 +++------- src/cupbearer/tasks/adversarial_examples.py | 79 ++- src/cupbearer/tasks/backdoor_detection.py | 79 ++- src/cupbearer/tasks/toy_features.py | 27 - src/cupbearer/utils/scripts.py | 55 +- 32 files changed, 927 insertions(+), 1446 deletions(-) delete mode 100644 demo.ipynb create mode 100644 notebooks/simple_demo.ipynb delete mode 100644 src/cupbearer/data/backdoor_data.py delete mode 100644 src/cupbearer/detectors/config.py delete mode 100644 src/cupbearer/tasks/toy_features.py diff --git a/demo.ipynb b/demo.ipynb deleted file mode 100644 index fa2a4e44..00000000 --- a/demo.ipynb +++ /dev/null @@ -1,505 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "%load_ext autoreload\n", - "%autoreload 2" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "import json\n", - "from pathlib import Path\n", - "\n", - "from cupbearer import data, detectors, models, scripts, tasks, utils\n", - "from tensorboard import notebook" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Training a backdoored classifier\n", - "First, we train a classifier on poisoned data:" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "GPU available: True (mps), used: True\n", - "TPU available: False, using: 0 TPU cores\n", - "IPU available: False, using: 0 IPUs\n", - "HPU available: False, using: 0 HPUs\n", - "/Users/erik/.pyenv/versions/3.10.9/envs/cupbearer/lib/python3.10/site-packages/lightning/pytorch/callbacks/model_checkpoint.py:639: Checkpoint directory logs/demo/classifier/checkpoints exists and is not empty.\n", - "\n", - " | Name | Type | Params\n", - "------------------------------------------------------\n", - "0 | model | MLP | 118 K \n", - "1 | train_accuracy | MulticlassAccuracy | 0 \n", - "2 | val_accuracy | ModuleList | 0 \n", - "3 | test_accuracy | ModuleList | 0 \n", - "------------------------------------------------------\n", - "118 K Trainable params\n", - "0 Non-trainable params\n", - "118 K Total params\n", - "0.473 Total estimated model params size (MB)\n" - ] - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "2478edb97e774ca097a31f195f31c032", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "Sanity Checking: | | 0/? [00:00\n", - " \n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "notebook.display(port=6006, height=1000)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can also explicitly evaluate the trained model (right now this is pretty limited and doesn't support multiple datasets at once):" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\u001b[32m2024-02-14 14:33:59.006\u001b[0m | \u001b[34m\u001b[1mDEBUG \u001b[0m | \u001b[36mcupbearer.scripts.eval_classifier\u001b[0m:\u001b[36mmain\u001b[0m:\u001b[36m18\u001b[0m - \u001b[34m\u001b[1mLoading transform: ToTensor()\u001b[0m\n", - "\u001b[32m2024-02-14 14:33:59.006\u001b[0m | \u001b[34m\u001b[1mDEBUG \u001b[0m | \u001b[36mcupbearer.scripts.eval_classifier\u001b[0m:\u001b[36mmain\u001b[0m:\u001b[36m18\u001b[0m - \u001b[34m\u001b[1mLoading transform: RandomCrop(p=0.8, padding=5, fill=0, padding_mode='constant')\u001b[0m\n", - "\u001b[32m2024-02-14 14:33:59.007\u001b[0m | \u001b[34m\u001b[1mDEBUG \u001b[0m | \u001b[36mcupbearer.scripts.eval_classifier\u001b[0m:\u001b[36mmain\u001b[0m:\u001b[36m18\u001b[0m - \u001b[34m\u001b[1mLoading transform: RandomRotation(p=0.5, degrees=10, interpolation=, expand=False, center=None, fill=0)\u001b[0m\n", - "GPU available: True (mps), used: True\n", - "TPU available: False, using: 0 TPU cores\n", - "IPU available: False, using: 0 IPUs\n", - "HPU available: False, using: 0 HPUs\n" - ] - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "531198feafe142feb9c826b7c9886331", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "Testing: | | 0/? [00:00┏━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n", - "┃ Test metric DataLoader 0 ┃\n", - "┡━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━┩\n", - "│ test/acc_epoch 0.9509999752044678 │\n", - "│ test/acc_step 0.9509999752044678 │\n", - "│ test/loss 0.15841300785541534 │\n", - "└───────────────────────────┴───────────────────────────┘\n", - "\n" - ], - "text/plain": [ - "┏━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n", - "┃\u001b[1m \u001b[0m\u001b[1m Test metric \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1m DataLoader 0 \u001b[0m\u001b[1m \u001b[0m┃\n", - "┡━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━┩\n", - "│\u001b[36m \u001b[0m\u001b[36m test/acc_epoch \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m 0.9509999752044678 \u001b[0m\u001b[35m \u001b[0m│\n", - "│\u001b[36m \u001b[0m\u001b[36m test/acc_step \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m 0.9509999752044678 \u001b[0m\u001b[35m \u001b[0m│\n", - "│\u001b[36m \u001b[0m\u001b[36m test/loss \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m 0.15841300785541534 \u001b[0m\u001b[35m \u001b[0m│\n", - "└───────────────────────────┴───────────────────────────┘\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "scripts.eval_classifier(\n", - " scripts.EvalClassifierConfig(\n", - " path=Path(\"logs/demo/classifier\"), data=data.MNIST(train=False)\n", - " )\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "These results will also have been stored to `logs/demo/classifier/metrics.json` if we want to process them further (e.g. to compare many runs):" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[{'test/loss': 0.15841300785541534, 'test/acc_step': 0.9509999752044678, 'test/acc_epoch': 0.9509999752044678}]\n" - ] - } - ], - "source": [ - "with open(\"logs/demo/classifier/eval.json\") as f:\n", - " print(json.load(f))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Training a backdoor detector\n", - "We'll train a very simple detector using the Mahalanobis distance:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\u001b[32m2024-02-14 14:39:19.332\u001b[0m | \u001b[34m\u001b[1mDEBUG \u001b[0m | \u001b[36mcupbearer.utils.scripts\u001b[0m:\u001b[36mload_config\u001b[0m:\u001b[36m55\u001b[0m - \u001b[34m\u001b[1mLoading config 'train_data' from logs/demo/classifier\u001b[0m\n", - "\u001b[32m2024-02-14 14:39:19.356\u001b[0m | \u001b[34m\u001b[1mDEBUG \u001b[0m | \u001b[36mcupbearer.utils.scripts\u001b[0m:\u001b[36mload_config\u001b[0m:\u001b[36m55\u001b[0m - \u001b[34m\u001b[1mLoading config 'model' from logs/demo/classifier\u001b[0m\n", - "100%|██████████| 15/15 [00:06<00:00, 2.32it/s]\n", - "\u001b[32m2024-02-14 14:39:26.325\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mcupbearer.detectors.anomaly_detector\u001b[0m:\u001b[36msave_weights\u001b[0m:\u001b[36m205\u001b[0m - \u001b[1mSaving detector to logs/demo/detector/detector\u001b[0m\n", - "\u001b[32m2024-02-14 14:39:26.360\u001b[0m | \u001b[34m\u001b[1mDEBUG \u001b[0m | \u001b[36mcupbearer.utils.scripts\u001b[0m:\u001b[36mload_config\u001b[0m:\u001b[36m55\u001b[0m - \u001b[34m\u001b[1mLoading config 'model' from logs/demo/classifier\u001b[0m\n", - "\u001b[32m2024-02-14 14:39:26.375\u001b[0m | \u001b[34m\u001b[1mDEBUG \u001b[0m | \u001b[36mcupbearer.utils.scripts\u001b[0m:\u001b[36mload_config\u001b[0m:\u001b[36m55\u001b[0m - \u001b[34m\u001b[1mLoading config 'detector' from logs/demo/detector\u001b[0m\n", - "\u001b[32m2024-02-14 14:39:26.378\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mcupbearer.detectors.anomaly_detector\u001b[0m:\u001b[36mload_weights\u001b[0m:\u001b[36m209\u001b[0m - \u001b[1mLoading detector from logs/demo/detector/detector\u001b[0m\n", - "\u001b[32m2024-02-14 14:39:28.636\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mcupbearer.detectors.anomaly_detector\u001b[0m:\u001b[36meval\u001b[0m:\u001b[36m128\u001b[0m - \u001b[1mAUC_ROC: 1.0000\u001b[0m\n", - "\u001b[32m2024-02-14 14:39:28.636\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mcupbearer.detectors.anomaly_detector\u001b[0m:\u001b[36meval\u001b[0m:\u001b[36m129\u001b[0m - \u001b[1mAP: 1.0000\u001b[0m\n" - ] - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAkQAAAHHCAYAAABeLEexAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAABWv0lEQVR4nO3deVhU5f8//uewb86wKCCJQOYCSmrigvtCoqK55oaJSloJ7pnyLU1Tw33Nrd4kWri0qKWWSYhSiigo4oK4oVgKWAgjmqz37w9/nI9HQBGBQc/zcV1zXc593+ec17mB5tnZRiWEECAiIiJSMD1dF0BERESkawxEREREpHgMRERERKR4DERERESkeAxEREREpHgMRERERKR4DERERESkeAxEREREpHgMRERERKR4DEREVC4qlQpz5szRdRkvhNDQUKhUKly7dk1q69y5Mzp37lwl23/8ZzVnzhyoVCr8888/VbJ9Z2dnjBo1qkq2RVReDERElWTdunVQqVRo3bq1rkuhl8TRo0cxZ84cZGZm6rqUYqpzbURlYaDrAoheVmFhYXB2dsbx48dx+fJlvPbaa7ouiaqRAwcOPPMyR48exdy5czFq1ChYWlqWebn//vsPBgaV+5/7J9WWlJQEPT3+/zdVb/wNJaoEycnJOHr0KJYvX45atWohLCxM1yXRU9y7d69Kt2dkZAQjI6NKW39hYSEePHgAADAxMan0QPQkxsbGMDQ01Nn2icqCgYioEoSFhcHKygo+Pj4YNGhQiYHo2rVrUKlUWLp0Kb788kvUq1cPxsbGaNmyJU6cOFFs/MGDB9GhQweYm5vD0tISffv2RWJiomxM0bUhFy9exIgRI6DRaFCrVi3MmjULQgjcuHEDffv2hVqthr29PZYtWyZbPjc3F7Nnz0aLFi2g0Whgbm6ODh06IDIy8on7GxkZCZVKhV27dhXr27p1K1QqFaKjo0tdPi8vD3PnzkX9+vVhYmICGxsbtG/fHuHh4bJxFy5cwODBg1GrVi2YmpqiYcOG+Pjjj2VjTp06hZ49e0KtVsPCwgLdunXDsWPHZGOKruk5fPgwxo8fD1tbW9SpU0fq//XXX6W5rlGjBnx8fHDu3LknzkGRc+fOoWvXrjA1NUWdOnUwf/58FBYWFhtX0jVEa9asQePGjWFmZgYrKyt4eHhg69atAB7+bKdPnw4AcHFxgUqlkl2XpFKpEBgYiLCwMDRu3BjGxsbYv3+/1FfS9V7//PMPBg8eDLVaDRsbG0yaNEkKUcD//Y6GhoYWW/bRdT6ttpKuIbp69SrefvttWFtbw8zMDG3atMG+fftkYw4dOgSVSoXvvvsOCxYsQJ06dWBiYoJu3brh8uXLxWoieh48ZUZUCcLCwjBgwAAYGRlh2LBhWL9+PU6cOIGWLVsWG7t161bcvXsX7733HlQqFRYvXowBAwbg6tWr0v9V//777+jZsydeffVVzJkzB//99x/WrFmDdu3a4eTJk3B2dpatc8iQIXB1dcXChQuxb98+zJ8/H9bW1ti4cSO6du2KRYsWISwsDB9++CFatmyJjh07AgC0Wi3+97//YdiwYRg7dizu3r2LkJAQeHt74/jx42jWrFmJ+9u5c2c4OjoiLCwM/fv3LzYX9erVg6enZ6nzNWfOHAQHB+Pdd99Fq1atoNVqERsbi5MnT+LNN98EACQkJKBDhw4wNDTEuHHj4OzsjCtXrmDPnj1YsGABgIdhpEOHDlCr1fjoo49gaGiIjRs3onPnzjh8+HCx67nGjx+PWrVqYfbs2dIRom+++QZ+fn7w9vbGokWLcP/+faxfvx7t27fHqVOnis31o1JTU9GlSxfk5+dj5syZMDc3x5dffglTU9NSlyny1VdfYeLEiRg0aJAUTBISEhATE4Phw4djwIABuHjxIrZt24YVK1agZs2aAIBatWpJ6zh48CC+++47BAYGombNmk+sFQAGDx4MZ2dnBAcH49ixY1i9ejXu3LmDLVu2PLXeR5WltkelpaWhbdu2uH//PiZOnAgbGxts3rwZb731Fn744Ydiv0MLFy6Enp4ePvzwQ2RlZWHx4sXw9fVFTEzMM9VJ9ESCiCpUbGysACDCw8OFEEIUFhaKOnXqiEmTJsnGJScnCwDCxsZGZGRkSO0//fSTACD27NkjtTVr1kzY2tqKf//9V2o7ffq00NPTEyNHjpTaPv30UwFAjBs3TmrLz88XderUESqVSixcuFBqv3PnjjA1NRV+fn6ysTk5ObI679y5I+zs7MSYMWNk7QDEp59+Kr0PCgoSxsbGIjMzU2pLT08XBgYGsnEladq0qfDx8XnimI4dO4oaNWqI69evy9oLCwulf/fr108YGRmJK1euSG03b94UNWrUEB07dpTaNm3aJACI9u3bi/z8fKn97t27wtLSUowdO1a2jdTUVKHRaIq1P27y5MkCgIiJiZHa0tPThUajEQBEcnKy1N6pUyfRqVMn6X3fvn1F48aNn7j+JUuWFFtPEQBCT09PnDt3rsS+R38GRb8nb731lmzc+PHjBQBx+vRpIcT//Y5u2rTpqet8Um1OTk6y37Oiefrjjz+ktrt37woXFxfh7OwsCgoKhBBCREZGCgDC1dVV9nu5atUqAUCcOXOm2LaIyounzIgqWFhYGOzs7NClSxcAD08tDBkyBNu3b0dBQUGx8UOGDIGVlZX0vkOHDgAenlIAgFu3biE+Ph6jRo2CtbW1NO7111/Hm2++iV9++aXYOt99913p3/r6+vDw8IAQAv7+/lK7paUlGjZsKG2naGzRdS2FhYXIyMhAfn4+PDw8cPLkySfu98iRI5GTk4MffvhBatuxYwfy8/MxYsSIJy5raWmJc+fO4dKlSyX23759G1FRURgzZgzq1q0r61OpVACAgoICHDhwAP369cOrr74q9deuXRvDhw/Hn3/+Ca1WK1t27Nix0NfXl96Hh4cjMzMTw4YNwz///CO99PX10bp166eeOvzll1/Qpk0btGrVSmqrVasWfH19n7hc0Rz89ddfJZ4uLatOnTrBzc2tzOMDAgJk7ydMmAAAJf5OVaRffvkFrVq1Qvv27aU2CwsLjBs3DteuXcP58+dl40ePHi273urxvxGiisBARFSBCgoKsH37dnTp0gXJycm4fPkyLl++jNatWyMtLQ0RERHFlnn8A74oHN25cwcAcP36dQBAw4YNiy3r6uqKf/75p9gFwY+vU6PRwMTERDqV8Wh70XaKbN68Ga+//rp0LU+tWrWwb98+ZGVlPXHfGzVqhJYtW8qulwoLC0ObNm2eeofdZ599hszMTDRo0ADu7u6YPn06EhISpP6iD74mTZqUuo7bt2/j/v37pc5TYWEhbty4IWt3cXGRvS8KZF27dkWtWrVkrwMHDiA9Pf2J+3H9+nXUr1+/WHtJNT1uxowZsLCwQKtWrVC/fn0EBATgyJEjT13uUY/vz9M8Xmu9evWgp6cne15SZbh+/XqpP6ei/kc97W+EqCLwGiKiCnTw4EHcunUL27dvx/bt24v1h4WFoXv37rK2R49QPEoIUe46SlpnWbbz7bffYtSoUejXrx+mT58OW1tb6OvrIzg4GFeuXHnqdkeOHIlJkybhr7/+Qk5ODo4dO4Yvvvjiqct17NgRV65cwU8//YQDBw7gf//7H1asWIENGzbIjnZVtMev7Sm6+Pmbb76Bvb19sfGVeaeWq6srkpKSsHfvXuzfvx8//vgj1q1bh9mzZ2Pu3LllWkdZrlV6kqKjbaW9L1LSkc7KVBl/I0SPYyAiqkBhYWGwtbXF2rVri/Xt3LkTu3btwoYNG57pg8vJyQnAw2e5PO7ChQuoWbMmzM3Ny1/0I3744Qe8+uqr2Llzp+zD8NNPPy3T8kOHDsXUqVOxbds2/PfffzA0NMSQIUPKtKy1tTVGjx6N0aNHIzs7Gx07dsScOXPw7rvvSqfAzp49W+rytWrVgpmZWanzpKenB0dHxyfWUK9ePQCAra0tvLy8ylT3o5ycnEo87VdSTSUxNzfHkCFDMGTIEOTm5mLAgAFYsGABgoKCYGJiUmpAKa9Lly7JjipdvnwZhYWF0sXYRUdiHn/Y4uNHcIDSw1NJnJycSv05FfUTVTWeMiOqIP/99x927tyJ3r17Y9CgQcVegYGBuHv3Ln7++ednWm/t2rXRrFkzbN68WfbBdPbsWRw4cAC9evWqsH0o+j/xR//POyYm5om3zD+qZs2a6NmzJ7799luEhYWhR48exU7TleTff/+VvbewsMBrr72GnJwcAA/DTseOHfH1118jJSVFNraoVn19fXTv3h0//fST7JRPWloatm7divbt20OtVj+xDm9vb6jVanz++efIy8sr1n/79u0nLt+rVy8cO3YMx48fly1TludQPT4HRkZGcHNzgxBCqqUo+FbU06AfD+5r1qwBAPTs2RMAoFarUbNmTURFRcnGrVu3rti6nqW2Xr164fjx47Lfq3v37uHLL7+Es7PzM10HRVRReISIqIL8/PPPuHv3Lt56660S+9u0aSM9pLGsR02KLFmyBD179oSnpyf8/f2l2+41Gk2Ffp9Y7969sXPnTvTv3x8+Pj5ITk7Ghg0b4Obmhuzs7DKtY+TIkRg0aBAAYN68eWVaxs3NDZ07d0aLFi1gbW2N2NhY/PDDDwgMDJTGrF69Gu3bt8cbb7yBcePGwcXFBdeuXcO+ffsQHx8PAJg/fz7Cw8PRvn17jB8/HgYGBti4cSNycnKwePHip9ahVquxfv16vPPOO3jjjTcwdOhQ1KpVCykpKdi3bx/atWv3xFOAH330Eb755hv06NEDkyZNkm67d3Jykl0TVZLu3bvD3t4e7dq1g52dHRITE/HFF1/Ax8cHNWrUAAC0aNECAPDxxx9j6NChMDQ0RJ8+fcp9hDA5ORlvvfUWevTogejoaHz77bcYPnw4mjZtKo159913sXDhQrz77rvw8PBAVFQULl68WGxdz1LbzJkzsW3bNvTs2RMTJ06EtbU1Nm/ejOTkZPz44498qjXphg7vcCN6qfTp00eYmJiIe/fulTpm1KhRwtDQUPzzzz/SLc1LliwpNg6P3dIshBC///67aNeunTA1NRVqtVr06dNHnD9/Xjam6Hbq27dvy9r9/PyEubl5se106tRJdqt3YWGh+Pzzz4WTk5MwNjYWzZs3F3v37hV+fn7CycnpqTUKIUROTo6wsrISGo1G/Pfff6XOxaPmz58vWrVqJSwtLYWpqalo1KiRWLBggcjNzZWNO3v2rOjfv7+wtLQUJiYmomHDhmLWrFmyMSdPnhTe3t7CwsJCmJmZiS5duoijR4/KxhTddn/ixIkS64mMjBTe3t5Co9EIExMTUa9ePTFq1CgRGxv71H1JSEgQnTp1EiYmJuKVV14R8+bNEyEhIU+97X7jxo2iY8eOwsbGRhgbG4t69eqJ6dOni6ysLNn6582bJ1555RWhp6cnWycAERAQUGJNj/+sin5Pzp8/LwYNGiRq1KghrKysRGBgYLGf2f3794W/v7/QaDSiRo0aYvDgwSI9Pb3En39ptT1+270QQly5ckUMGjRI+lm2atVK7N27Vzam6Lb777//Xtb+pMcBEJWXSghelUZEFSc/Px8ODg7o06cPQkJCdF0OEVGZ8LgkEVWo3bt34/bt2xg5cqSuSyEiKjMeISKiChETE4OEhATMmzcPNWvWfOqDHImIqhMeISKiCrF+/Xp88MEHsLW1febvwiIi0jUeISIiIiLF4xEiIiIiUjwGIiIiIlI8PpixjAoLC3Hz5k3UqFGjwh+fT0RERJVDCIG7d+/CwcHhiQ/9ZCAqo5s3bz71e5CIiIioerpx4wbq1KlTaj8DURkVPTr/xo0bT/0+JCIiIqoetFotHB0dpc/x0jAQlVHRaTK1Ws1ARERE9IJ52uUuvKiaiIiIFI+BiIiIiBSPgYiIiIgUj9cQERHRS6WgoAB5eXm6LoOqiKGhIfT19Z97PQxERET0UhBCIDU1FZmZmbouhaqYpaUl7O3tn+s5gQxERET0UigKQ7a2tjAzM+NDdBVACIH79+8jPT0dAFC7du1yr4uBiIiIXngFBQVSGLKxsdF1OVSFTE1NAQDp6emwtbUt9+kzXlRNREQvvKJrhszMzHRcCelC0c/9ea4dYyAiIqKXBk+TKVNF/NwZiIiIiEjxGIiIiIjomRw6dAgqleqluqNPpxdVR0VFYcmSJYiLi8OtW7ewa9cu9OvXTzYmMTERM2bMwOHDh5Gfnw83Nzf8+OOPqFu3LgDgwYMHmDZtGrZv346cnBx4e3tj3bp1sLOzk9aRkpKCDz74AJGRkbCwsICfnx+Cg4NhYMBryomIXnYrwi9W6famvNngmcaPGjUKmzdvRnBwMGbOnCm17969G/3794cQoqJLpBLo9AjRvXv30LRpU6xdu7bE/itXrqB9+/Zo1KgRDh06hISEBMyaNQsmJibSmClTpmDPnj34/vvvcfjwYdy8eRMDBgyQ+gsKCuDj44Pc3FwcPXoUmzdvRmhoKGbPnl3p+0dERFQWJiYmWLRoEe7cuVNh68zNza2wdSmBTgNRz549MX/+fPTv37/E/o8//hi9evXC4sWL0bx5c9SrVw9vvfUWbG1tAQBZWVkICQnB8uXL0bVrV7Ro0QKbNm3C0aNHcezYMQDAgQMHcP78eXz77bdo1qwZevbsiXnz5mHt2rX8ZSEiomrBy8sL9vb2CA4OLnXMjz/+iMaNG8PY2BjOzs5YtmyZrN/Z2Rnz5s3DyJEjoVarMW7cOISGhsLS0hJ79+5Fw4YNYWZmhkGDBuH+/fvYvHkznJ2dYWVlhYkTJ6KgoEBa1zfffAMPDw/UqFED9vb2GD58uPSsn5dVtb2GqLCwEPv27UODBg3g7e0NW1tbtG7dGrt375bGxMXFIS8vD15eXlJbo0aNULduXURHRwMAoqOj4e7uLjuF5u3tDa1Wi3PnzpW6/ZycHGi1WtmLiIioMujr6+Pzzz/HmjVr8NdffxXrj4uLw+DBgzF06FCcOXMGc+bMwaxZsxAaGiobt3TpUjRt2hSnTp3CrFmzAAD379/H6tWrsX37duzfvx+HDh1C//798csvv+CXX37BN998g40bN+KHH36Q1pOXl4d58+bh9OnT2L17N65du4ZRo0ZV5hToXLW9iCY9PR3Z2dlYuHAh5s+fj0WLFmH//v0YMGAAIiMj0alTJ6SmpsLIyAiWlpayZe3s7JCamgrg4ZNLHw1DRf1FfaUJDg7G3LlzK3anKllJ58mf9Vw2ERHpRv/+/dGsWTN8+umnCAkJkfUtX74c3bp1k0JOgwYNcP78eSxZskQWVLp27Ypp06ZJ7//44w/k5eVh/fr1qFevHgBg0KBB+Oabb5CWlgYLCwu4ubmhS5cuiIyMxJAhQwAAY8aMkdbx6quvYvXq1WjZsiWys7NhYWFRWVOgU9X6CBEA9O3bF1OmTEGzZs0wc+ZM9O7dGxs2bKj07QcFBSErK0t63bhxo9K3SUREyrZo0SJs3rwZiYmJsvbExES0a9dO1tauXTtcunRJdqrLw8Oj2DrNzMykMAQ8PCjg7OwsCzZ2dnayU2JxcXHo06cP6tatixo1aqBTp04AHt6k9LKqtoGoZs2aMDAwgJubm6zd1dVV+oHY29sjNze32G1/aWlpsLe3l8akpaUV6y/qK42xsTHUarXsRUREVJk6duwIb29vBAUFlWt5c3PzYm2Ghoay9yqVqsS2ogMR9+7dg7e3N9RqNcLCwnDixAns2rULwMt9oXa1DURGRkZo2bIlkpKSZO0XL16Ek5MTAKBFixYwNDRERESE1J+UlISUlBR4enoCADw9PXHmzBlZ8g0PD4darS4WtoiIiHRt4cKF2LNnj3QtLPDwYMCRI0dk444cOYIGDRqU+7u7SnPhwgX8+++/WLhwITp06IBGjRq99BdUAzq+hig7OxuXL1+W3icnJyM+Ph7W1taoW7cupk+fjiFDhqBjx47o0qUL9u/fjz179uDQoUMAAI1GA39/f0ydOhXW1tZQq9WYMGECPD090aZNGwBA9+7d4ebmhnfeeQeLFy9GamoqPvnkEwQEBMDY2FgXu01ERFQqd3d3+Pr6YvXq1VLbtGnT0LJlS8ybNw9DhgxBdHQ0vvjiC6xbt67Ct1+3bl0YGRlhzZo1eP/993H27FnMmzevwrdT3ej0CFFsbCyaN2+O5s2bAwCmTp2K5s2bS88I6t+/PzZs2IDFixfD3d0d//vf//Djjz+iffv20jpWrFiB3r17Y+DAgejYsSPs7e2xc+dOqV9fXx979+6Fvr4+PD09MWLECIwcORKfffZZ1e4sERFRGX322WfSKSwAeOONN/Ddd99h+/btaNKkCWbPno3PPvusUu78qlWrFkJDQ/H999/Dzc0NCxcuxNKlSyt8O9WNSvARmGWi1Wqh0WiQlZVVba8n4l1mRKRUDx48QHJyMlxcXGQP7yVleNLPv6yf39X2GiIiIiKiqsJARERERIrHQERERESKx0BEREREisdARERERIrHQERERESKx0BEREREisdARERERIrHQERERESKx0BERERE5ebs7IyVK1fquoznptMvd6XnU9JXdRAR0WMig6t2e12CyrVYdHQ02rdvjx49emDfvn0VXBQ9DY8QERERVQMhISGYMGECoqKicPPmTV2XozgMRERERDqWnZ2NHTt24IMPPoCPjw9CQ0OlvkOHDkGlUiEiIgIeHh4wMzND27ZtkZSUJFvH+vXrUa9ePRgZGaFhw4b45ptvZP0qlQobN25E7969YWZmBldXV0RHR+Py5cvo3LkzzM3N0bZtW1y5ckVa5sqVK+jbty/s7OxgYWGBli1b4vfff3/ivqSkpKBv376wsLCAWq3G4MGDkZaWJvWPGjUK/fr1ky0zefJkdO7cWXr/ww8/wN3dHaamprCxsYGXlxfu3btXxtksHwYiIiIiHfvuu+/QqFEjNGzYECNGjMDXX38NIYRszMcff4xly5YhNjYWBgYGGDNmjNS3a9cuTJo0CdOmTcPZs2fx3nvvYfTo0YiMjJStY968eRg5ciTi4+PRqFEjDB8+HO+99x6CgoIQGxsLIQQCAwOl8dnZ2ejVqxciIiJw6tQp9OjRA3369EFKSkqJ+1FYWIi+ffsiIyMDhw8fRnh4OK5evYohQ4aUeS5u3bqFYcOGYcyYMUhMTMShQ4cwYMCAYvNR0XgNERERkY6FhIRgxIgRAIAePXogKysLhw8flh01WbBgATp16gQAmDlzJnx8fPDgwQOYmJhg6dKlGDVqFMaPHw8AmDp1Ko4dO4alS5eiS5cu0jpGjx6NwYMHAwBmzJgBT09PzJo1C97e3gCASZMmYfTo0dL4pk2bomnTptL7efPmYdeuXfj5559lwalIREQEzpw5g+TkZDg6OgIAtmzZgsaNG+PEiRNo2bLlU+fi1q1byM/Px4ABA+Dk5AQAcHd3f/okPiceISIiItKhpKQkHD9+HMOGDQMAGBgYYMiQIQgJCZGNe/3116V/165dGwCQnp4OAEhMTES7du1k49u1a4fExMRS12FnZwdAHjbs7Ozw4MEDaLVaAA+PEH344YdwdXWFpaUlLCwskJiYWOoRosTERDg6OkphCADc3NxgaWlZrJbSNG3aFN26dYO7uzvefvttfPXVV7hz506Zln0eDEREREQ6FBISgvz8fDg4OMDAwAAGBgZYv349fvzxR2RlZUnjDA0NpX+rVCoAD09RPYuS1vGk9X744YfYtWsXPv/8c/zxxx+Ij4+Hu7s7cnNzn3Ev/4+enl6x0195eXnSv/X19REeHo5ff/0Vbm5uWLNmDRo2bIjk5ORyb7NMdVXq2omIiKhU+fn52LJlC5YtW4b4+Hjpdfr0aTg4OGDbtm1lWo+rqyuOHDkiazty5Ajc3Nyeq74jR45g1KhR6N+/P9zd3WFvb49r1649sY4bN27gxo0bUtv58+eRmZkp1VKrVi3cunVLtlx8fLzsvUqlQrt27TB37lycOnUKRkZG2LVr13Pty9PwGiIiIiId2bt3L+7cuQN/f39oNBpZ38CBAxESEoIlS5Y8dT3Tp0/H4MGD0bx5c3h5eWHPnj3YuXPnU+8Ie5r69etj586d6NOnD1QqFWbNmvXEo1JeXl5wd3eHr68vVq5cifz8fIwfPx6dOnWCh4cHAKBr165YsmQJtmzZAk9PT3z77bc4e/YsmjdvDgCIiYlBREQEunfvDltbW8TExOD27dtwdXV9rn15Gh4hIiIi0pGQkBB4eXkVC0PAw0AUGxuLhISEp66nX79+WLVqFZYuXYrGjRtj48aN2LRpk+yi7PJYvnw5rKys0LZtW/Tp0wfe3t544403Sh2vUqnw008/wcrKCh07doSXlxdeffVV7NixQxrj7e2NWbNm4aOPPkLLli1x9+5djBw5UupXq9WIiopCr1690KBBA3zyySdYtmwZevbs+Vz78jQqUdn3sb0ktFotNBoNsrKyoFardV0OgLI9qXrKmw2qoBIiIt168OABkpOT4eLiAhMTE12XQ1XsST//sn5+8wgRERERKR4DERERESkeAxEREREpHgMRERERKR4DERERvTR4n5AyVcTPnYGIiIheeEVPW75//76OKyFdKPq5P/rU7WfFBzMSEdELT19fH5aWltJ3e5mZmUlfQ0EvLyEE7t+/j/T0dFhaWkJfX7/c62IgIiKil4K9vT2A//vCU1IOS0tL6edfXgxERET0UlCpVKhduzZsbW1lXxZKLzdDQ8PnOjJUhIGIiIheKvr6+hXyAUnKwouqiYiISPEYiIiIiEjxdBqIoqKi0KdPHzg4OEClUmH37t2ljn3//fehUqmwcuVKWXtGRgZ8fX2hVqthaWkJf39/ZGdny8YkJCSgQ4cOMDExgaOjIxYvXlwJe0NEREQvKp0Gonv37qFp06ZYu3btE8ft2rULx44dg4ODQ7E+X19fnDt3DuHh4di7dy+ioqIwbtw4qV+r1aJ79+5wcnJCXFwclixZgjlz5uDLL7+s8P0hIiKiF5NOL6ru2bMnevbs+cQxf//9NyZMmIDffvsNPj4+sr7ExETs378fJ06cgIeHBwBgzZo16NWrF5YuXQoHBweEhYUhNzcXX3/9NYyMjNC4cWPEx8dj+fLlsuBEREREylWtryEqLCzEO++8g+nTp6Nx48bF+qOjo2FpaSmFIQDw8vKCnp4eYmJipDEdO3aEkZGRNMbb2xtJSUm4c+dOqdvOycmBVquVvYiIiOjlVK0D0aJFi2BgYICJEyeW2J+amgpbW1tZm4GBAaytrZGamiqNsbOzk40pel80piTBwcHQaDTSy9HR8Xl2hYiIiKqxahuI4uLisGrVKoSGhurk8etBQUHIysqSXjdu3KjyGoiIiKhqVNtA9McffyA9PR1169aFgYEBDAwMcP36dUybNg3Ozs4AHj6m/fFHtOfn5yMjI0N6hLe9vT3S0tJkY4reP+kx38bGxlCr1bIXERERvZyqbSB65513kJCQgPj4eOnl4OCA6dOn47fffgMAeHp6IjMzE3FxcdJyBw8eRGFhIVq3bi2NiYqKkj3GPTw8HA0bNoSVlVXV7hQRERFVSzq9yyw7OxuXL1+W3icnJyM+Ph7W1taoW7cubGxsZOMNDQ1hb2+Phg0bAgBcXV3Ro0cPjB07Fhs2bEBeXh4CAwMxdOhQ6Rb94cOHY+7cufD398eMGTNw9uxZrFq1CitWrKi6HSUiIqJqTaeBKDY2Fl26dJHeT506FQDg5+eH0NDQMq0jLCwMgYGB6NatG/T09DBw4ECsXr1a6tdoNDhw4AACAgLQokUL1KxZE7Nnz+Yt90RERCRRCSGErot4EWi1Wmg0GmRlZVWb64lWhF986pgpbzaogkqIiIiqp7J+flfba4iIiIiIqgoDERERESkeAxEREREpHgMRERERKR4DERERESkeAxEREREpHgMRERERKR4DERERESkeAxEREREpHgMRERERKR4DERERESkeAxEREREpHgMRERERKR4DERERESmega4LoMq1Ivyi7P2UNxvoqBIiIqLqi0eIiIiISPEYiIiIiEjxGIiIiIhI8RiIiIiISPEYiIiIiEjxGIiIiIhI8RiIiIiISPEYiIiIiEjxGIiIiIhI8RiIiIiISPEYiIiIiEjxGIiIiIhI8RiIiIiISPEYiIiIiEjxGIiIiIhI8RiIiIiISPEYiIiIiEjxGIiIiIhI8XQaiKKiotCnTx84ODhApVJh9+7dUl9eXh5mzJgBd3d3mJubw8HBASNHjsTNmzdl68jIyICvry/UajUsLS3h7++P7Oxs2ZiEhAR06NABJiYmcHR0xOLFi6ti94iIiOgFodNAdO/ePTRt2hRr164t1nf//n2cPHkSs2bNwsmTJ7Fz504kJSXhrbfeko3z9fXFuXPnEB4ejr179yIqKgrjxo2T+rVaLbp37w4nJyfExcVhyZIlmDNnDr788stK3z8iIiJ6MaiEEELXRQCASqXCrl270K9fv1LHnDhxAq1atcL169dRt25dJCYmws3NDSdOnICHhwcAYP/+/ejVqxf++usvODg4YP369fj444+RmpoKIyMjAMDMmTOxe/duXLhwocz1abVaaDQaZGVlQa1WP9e+VpQV4RefeZkpbzaohEqIiIiqp7J+fr9Q1xBlZWVBpVLB0tISABAdHQ1LS0spDAGAl5cX9PT0EBMTI43p2LGjFIYAwNvbG0lJSbhz506p28rJyYFWq5W9iIiI6OX0wgSiBw8eYMaMGRg2bJiU8FJTU2FraysbZ2BgAGtra6Smpkpj7OzsZGOK3heNKUlwcDA0Go30cnR0rMjdISIiomrkhQhEeXl5GDx4MIQQWL9+fZVsMygoCFlZWdLrxo0bVbJdIiIiqnoGui7gaYrC0PXr13Hw4EHZ+T97e3ukp6fLxufn5yMjIwP29vbSmLS0NNmYovdFY0pibGwMY2PjitqN51ae64WIiIiobKr1EaKiMHTp0iX8/vvvsLGxkfV7enoiMzMTcXFxUtvBgwdRWFiI1q1bS2OioqKQl5cnjQkPD0fDhg1hZWVVNTtCRERE1ZpOA1F2djbi4+MRHx8PAEhOTkZ8fDxSUlKQl5eHQYMGITY2FmFhYSgoKEBqaipSU1ORm5sLAHB1dUWPHj0wduxYHD9+HEeOHEFgYCCGDh0KBwcHAMDw4cNhZGQEf39/nDt3Djt27MCqVaswdepUXe02ERERVTM6ve3+0KFD6NKlS7F2Pz8/zJkzBy4uLiUuFxkZic6dOwN4+GDGwMBA7NmzB3p6ehg4cCBWr14NCwsLaXxCQgICAgJw4sQJ1KxZExMmTMCMGTOeqVZd33ZfUafMeNs9EREpSVk/v6vNc4iqOwYiIiKiF89L+RwiIiIiosrAQERERESKx0BEREREisdARERERIrHQERERESKx0BEREREisdARERERIrHQERERESKx0BEREREisdARERERIrHQERERESKx0BEREREisdARERERIpnoOsCqGqtCL9YrG3Kmw10UAkREVH1wSNEREREpHgMRERERKR4DERERESkeAxEREREpHgMRERERKR4DERERESkeAxEREREpHgMRERERKR4DERERESkeAxEREREpHgMRERERKR4DERERESkeAxEREREpHj8tnsiIiICIoOLt3UJqvo6dIRHiIiIiEjxGIiIiIhI8RiIiIiISPEYiIiIiEjxGIiIiIhI8XQaiKKiotCnTx84ODhApVJh9+7dsn4hBGbPno3atWvD1NQUXl5euHTpkmxMRkYGfH19oVarYWlpCX9/f2RnZ8vGJCQkoEOHDjAxMYGjoyMWL15c2btGRERELxCdBqJ79+6hadOmWLt2bYn9ixcvxurVq7FhwwbExMTA3Nwc3t7eePDggTTG19cX586dQ3h4OPbu3YuoqCiMGzdO6tdqtejevTucnJwQFxeHJUuWYM6cOfjyyy8rff+IiIiqrchg+UvhdPocop49e6Jnz54l9gkhsHLlSnzyySfo27cvAGDLli2ws7PD7t27MXToUCQmJmL//v04ceIEPDw8AABr1qxBr169sHTpUjg4OCAsLAy5ubn4+uuvYWRkhMaNGyM+Ph7Lly+XBSciIiJSrmp7DVFycjJSU1Ph5eUltWk0GrRu3RrR0dEAgOjoaFhaWkphCAC8vLygp6eHmJgYaUzHjh1hZGQkjfH29kZSUhLu3LlT6vZzcnKg1WplLyIiIno5VdtAlJqaCgCws7OTtdvZ2Ul9qampsLW1lfUbGBjA2tpaNqakdTy6jZIEBwdDo9FIL0dHx+fbISIiIqq2qm0g0rWgoCBkZWVJrxs3bui6JCIiIqok5QpEV69ereg6irG3twcApKWlydrT0tKkPnt7e6Snp8v68/PzkZGRIRtT0joe3UZJjI2NoVarZS8iIiJ6OZUrEL322mvo0qULvv32W9kdXxXJxcUF9vb2iIiIkNq0Wi1iYmLg6ekJAPD09ERmZibi4uKkMQcPHkRhYSFat24tjYmKikJeXp40Jjw8HA0bNoSVlVWl1E5EREQvlnIFopMnT+L111/H1KlTYW9vj/feew/Hjx9/5vVkZ2cjPj4e8fHxAB5eSB0fH4+UlBSoVCpMnjwZ8+fPx88//4wzZ85g5MiRcHBwQL9+/QAArq6u6NGjB8aOHYvjx4/jyJEjCAwMxNChQ+Hg4AAAGD58OIyMjODv749z585hx44dWLVqFaZOnVqeXSciIqKXULkCUbNmzbBq1SrcvHkTX3/9NW7duoX27dujSZMmWL58OW7fvl2m9cTGxqJ58+Zo3rw5AGDq1Klo3rw5Zs+eDQD46KOPMGHCBIwbNw4tW7ZEdnY29u/fDxMTE2kdYWFhaNSoEbp164ZevXqhffv2smcMaTQaHDhwAMnJyWjRogWmTZuG2bNn85Z7IiIikqiEEOJ5V5KTk4N169YhKCgIubm5MDIywuDBg7Fo0SLUrl27IurUOa1WC41Gg6ysLJ1cT7Qi/GKlrXvKmw0qbd1ERFRNledhjF2CKr6OSlbWz+/nusssNjYW48ePR+3atbF8+XJ8+OGHuHLlCsLDw3Hz5k3pgYpERERE1Vm5nlS9fPlybNq0CUlJSejVqxe2bNmCXr16QU/vYb5ycXFBaGgonJ2dK7JWIiIiokpRrkC0fv16jBkzBqNGjSr1lJitrS1CQkKeqzgiIiKiqlCuQPT4N86XxMjICH5+fuVZPREREVGVKtc1RJs2bcL3339frP3777/H5s2bn7soIiIioqpUrkAUHByMmjVrFmu3tbXF559//txFEREREVWlcgWilJQUuLi4FGt3cnJCSkrKcxdFREREVJXKFYhsbW2RkJBQrP306dOwsbF57qKIiIiIqlK5AtGwYcMwceJEREZGoqCgAAUFBTh48CAmTZqEoUOHVnSNRERERJWqXHeZzZs3D9euXUO3bt1gYPBwFYWFhRg5ciSvISIiIqIXTrkCkZGREXbs2IF58+bh9OnTMDU1hbu7O5ycnCq6PiIiIqJKV65AVKRBgwZo0IDfg0VEREQvtnIFooKCAoSGhiIiIgLp6ekoLCyU9R88eLBCiiMiIiKqCuUKRJMmTUJoaCh8fHzQpEkTqFSqiq6LiIiIqMqUKxBt374d3333HXr16lXR9RARERFVuXLddm9kZITXXnutomshIiIi0olyBaJp06Zh1apVEEJUdD1EREREVa5cp8z+/PNPREZG4tdff0Xjxo1haGgo69+5c2eFFEdERERUFcoViCwtLdG/f/+KroWIiIhIJ8oViDZt2lTRdRARERHpTLkfzJifn49Dhw7hypUrGD58OGrUqIGbN29CrVbDwsKiImskIiKi6iAyuHhbl6Cqr6MSlCsQXb9+HT169EBKSgpycnLw5ptvokaNGli0aBFycnKwYcOGiq6TiIiIyqukIEMy5brLbNKkSfDw8MCdO3dgamoqtffv3x8REREVVhwRERFRVSjXEaI//vgDR48ehZGRkazd2dkZf//9d4UURkRERFRVynWEqLCwEAUFBcXa//rrL9SoUeO5iyIiIiKqSuUKRN27d8fKlSul9yqVCtnZ2fj000/5dR5ERET0winXKbNly5bB29sbbm5uePDgAYYPH45Lly6hZs2a2LZtW0XXSERERFSpyhWI6tSpg9OnT2P79u1ISEhAdnY2/P394evrK7vImoiIiOhFUO7nEBkYGGDEiBEVWQsRERGRTpQrEG3ZsuWJ/SNHjixXMURERES6UK5ANGnSJNn7vLw83L9/H0ZGRjAzM2MgIiIiohdKue4yu3PnjuyVnZ2NpKQktG/fnhdVExER0QunXIGoJPXr18fChQuLHT0iIiIiqu4qLBABDy+0vnnzZoWtr6CgALNmzYKLiwtMTU1Rr149zJs3D0IIaYwQArNnz0bt2rVhamoKLy8vXLp0SbaejIwM+Pr6Qq1Ww9LSEv7+/sjOzq6wOomIiOjFVq5riH7++WfZeyEEbt26hS+++ALt2rWrkMIAYNGiRVi/fj02b96Mxo0bIzY2FqNHj4ZGo8HEiRMBAIsXL8bq1auxefNmuLi4YNasWfD29sb58+dhYmICAPD19cWtW7cQHh6OvLw8jB49GuPGjcPWrVsrrFYiIiJ6cZUrEPXr10/2XqVSoVatWujatSuWLVtWEXUBAI4ePYq+ffvCx8cHwMPvStu2bRuOHz8O4GEQW7lyJT755BP07dsXwMM74Ozs7LB7924MHToUiYmJ2L9/P06cOAEPDw8AwJo1a9CrVy8sXboUDg4OFVYvERERvZjK/V1mj74KCgqQmpqKrVu3onbt2hVWXNu2bREREYGLFy8CAE6fPo0///wTPXv2BAAkJycjNTUVXl5e0jIajQatW7dGdHQ0ACA6OhqWlpZSGAIALy8v6OnpISYmptRt5+TkQKvVyl5ERET0cir3gxmrwsyZM6HVatGoUSPo6+ujoKAACxYsgK+vLwAgNTUVAGBnZydbzs7OTupLTU2Fra2trN/AwADW1tbSmJIEBwdj7ty5Fbk7REREVE2VKxBNnTq1zGOXL19enk0AAL777juEhYVh69ataNy4MeLj4zF58mQ4ODjAz8+v3Osti6CgINl+arVaODo6Vuo2iYiISDfKFYhOnTqFU6dOIS8vDw0bNgQAXLx4Efr6+njjjTekcSqV6rmKmz59OmbOnImhQ4cCANzd3XH9+nUEBwfDz88P9vb2AIC0tDTZqbq0tDQ0a9YMAGBvb4/09HTZevPz85GRkSEtXxJjY2MYGxs/V/1ERET0YijXNUR9+vRBx44d8ddff+HkyZM4efIkbty4gS5duqB3796IjIxEZGQkDh48+FzF3b9/H3p68hL19fVRWFgIAHBxcYG9vT0iIiKkfq1Wi5iYGHh6egIAPD09kZmZibi4OGnMwYMHUVhYiNatWz9XfURERPRyKNcRomXLluHAgQOwsrKS2qysrDB//nx0794d06ZNq5Di+vTpgwULFqBu3bpo3LgxTp06heXLl2PMmDEAHh6Bmjx5MubPn4/69etLt907ODhId8K5urqiR48eGDt2LDZs2IC8vDwEBgZi6NChvMOMiIiIAJQzEGm1Wty+fbtY++3bt3H37t3nLqrImjVrMGvWLIwfPx7p6elwcHDAe++9h9mzZ0tjPvroI9y7dw/jxo1DZmYm2rdvj/3790vPIAKAsLAwBAYGolu3btDT08PAgQOxevXqCquTiIiIXmwq8ehjn8to5MiR+OOPP7Bs2TK0atUKABATE4Pp06ejQ4cO2Lx5c4UXqmtarRYajQZZWVlQq9VVvv0V4RerdHtT3mxQpdsjIqJKFBlceevuElR5664AZf38LtcRog0bNuDDDz/E8OHDkZeX93BFBgbw9/fHkiVLylcxERERkY6UKxCZmZlh3bp1WLJkCa5cuQIAqFevHszNzSu0OCIiInpGlXk06CX2XF/ueuvWLdy6dQv169eHubk5ynH2jYiIiEjnyhWI/v33X3Tr1g0NGjRAr169cOvWLQCAv79/hd1hRkRERFRVyhWIpkyZAkNDQ6SkpMDMzExqHzJkCPbv319hxRERERFVhXJdQ3TgwAH89ttvqFOnjqy9fv36uH79eoUURkRERFRVyhWI7t27JzsyVCQjI4Nfd0FERKQkj1/EXc1vwy9NuU6ZdejQAVu2bJHeq1QqFBYWYvHixejSpUuFFUdERERUFcp1hGjx4sXo1q0bYmNjkZubi48++gjnzp1DRkYGjhw5UtE1EhEREVWqch0hatKkCS5evIj27dujb9++uHfvHgYMGIBTp06hXr16FV0jERERUaV65iNEeXl56NGjBzZs2ICPP/64MmoiIiIiqlLPfITI0NAQCQkJlVELERERkU6U65TZiBEjEBISUtG1EBEREelEuS6qzs/Px9dff43ff/8dLVq0KPYdZsuXL6+Q4oiIiIiqwjMFoqtXr8LZ2Rlnz57FG2+8AQC4ePGibIxKpaq46hRsRfjFpw8iIiKiCvFMgah+/fq4desWIiMjATz8qo7Vq1fDzs6uUoojIiIiqgrPdA3R499m/+uvv+LevXsVWhARERFRVSvXRdVFHg9IRERERC+iZwpEKpWq2DVCvGaIiIiIXnTPdA2REAKjRo2SvsD1wYMHeP/994vdZbZz586Kq5CIiIiokj1TIPLz85O9HzFiRIUWQ0RERKQLzxSINm3aVFl1EBEREenMc11UTURERPQyYCAiIiIixWMgIiIiIsVjICIiIiLFYyAiIiIixWMgIiIiIsVjICIiIiLFYyAiIiIixWMgIiIiIsVjICIiIiLFYyAiIiIixav2gejvv//GiBEjYGNjA1NTU7i7uyM2NlbqF0Jg9uzZqF27NkxNTeHl5YVLly7J1pGRkQFfX1+o1WpYWlrC398f2dnZVb0rREREVE1V60B0584dtGvXDoaGhvj1119x/vx5LFu2DFZWVtKYxYsXY/Xq1diwYQNiYmJgbm4Ob29vPHjwQBrj6+uLc+fOITw8HHv37kVUVBTGjRuni10iIiKiauiZvu2+qi1atAiOjo7YtGmT1Obi4iL9WwiBlStX4pNPPkHfvn0BAFu2bIGdnR12796NoUOHIjExEfv378eJEyfg4eEBAFizZg169eqFpUuXwsHBoWp3ioiIiKqdan2E6Oeff4aHhwfefvtt2Nraonnz5vjqq6+k/uTkZKSmpsLLy0tq02g0aN26NaKjowEA0dHRsLS0lMIQAHh5eUFPTw8xMTGlbjsnJwdarVb2IiIiopdTtQ5EV69exfr161G/fn389ttv+OCDDzBx4kRs3rwZAJCamgoAsLOzky1nZ2cn9aWmpsLW1lbWb2BgAGtra2lMSYKDg6HRaKSXo6NjRe4aERERVSPVOhAVFhbijTfewOeff47mzZtj3LhxGDt2LDZs2FDp2w4KCkJWVpb0unHjRqVvk4iIiHSjWgei2rVrw83NTdbm6uqKlJQUAIC9vT0AIC0tTTYmLS1N6rO3t0d6erqsPz8/HxkZGdKYkhgbG0OtVsteRERE9HKq1oGoXbt2SEpKkrVdvHgRTk5OAB5eYG1vb4+IiAipX6vVIiYmBp6engAAT09PZGZmIi4uThpz8OBBFBYWonXr1lWwF0RERFTdVeu7zKZMmYK2bdvi888/x+DBg3H8+HF8+eWX+PLLLwEAKpUKkydPxvz581G/fn24uLhg1qxZcHBwQL9+/QA8PKLUo0cP6VRbXl4eAgMDMXToUN5h9gQrwi/K3k95s4GOKiEiIqp81ToQtWzZErt27UJQUBA+++wzuLi4YOXKlfD19ZXGfPTRR7h37x7GjRuHzMxMtG/fHvv374eJiYk0JiwsDIGBgejWrRv09PQwcOBArF69Whe7RERERNWQSgghdF3Ei0Cr1UKj0SArK6tKrid6/AiNrvEIERHRCyIyWLfb7xKk2+0/pqyf39X6CBERERG9YEoKZNUsJJWkWl9UTURERFQVGIiIiIhI8RiIiIiISPEYiIiIiEjxGIiIiIhI8RiIiIiISPEYiIiIiEjxGIiIiIhI8RiIiIiISPEYiIiIiEjxGIiIiIhI8RiIiIiISPEYiIiIiEjxGIiIiIhI8Qx0XQARERE9h8hgXVfwUuARIiIiIlI8BiIiIiJSPAYiIiIiUjwGIiIiIlI8XlRNRERElevxC7+7BOmmjifgESIiIiJSPAYiIiIiUjwGIiIiIlI8BiIiIiJSPAYiIiIiUjwGIiIiIlI8BiIiIiJSPAYiIiIiUjwGIiIiIlI8BiIiIiJSPAYiIiIiUjwGIiIiIlK8FyoQLVy4ECqVCpMnT5baHjx4gICAANjY2MDCwgIDBw5EWlqabLmUlBT4+PjAzMwMtra2mD59OvLz86u4eiIiIqquXphAdOLECWzcuBGvv/66rH3KlCnYs2cPvv/+exw+fBg3b97EgAEDpP6CggL4+PggNzcXR48exebNmxEaGorZs2dX9S4QERFRNfVCBKLs7Gz4+vriq6++gpWVldSelZWFkJAQLF++HF27dkWLFi2wadMmHD16FMeOHQMAHDhwAOfPn8e3336LZs2aoWfPnpg3bx7Wrl2L3NxcXe0SERERVSMvRCAKCAiAj48PvLy8ZO1xcXHIy8uTtTdq1Ah169ZFdHQ0ACA6Ohru7u6ws7OTxnh7e0Or1eLcuXOlbjMnJwdarVb2IiIiopeTga4LeJrt27fj5MmTOHHiRLG+1NRUGBkZwdLSUtZuZ2eH1NRUacyjYaiov6ivNMHBwZg7d+5zVk9EREQvgmp9hOjGjRuYNGkSwsLCYGJiUqXbDgoKQlZWlvS6ceNGlW6fiIiIqk61DkRxcXFIT0/HG2+8AQMDAxgYGODw4cNYvXo1DAwMYGdnh9zcXGRmZsqWS0tLg729PQDA3t6+2F1nRe+LxpTE2NgYarVa9iIiIqKXU7UORN26dcOZM2cQHx8vvTw8PODr6yv929DQEBEREdIySUlJSElJgaenJwDA09MTZ86cQXp6ujQmPDwcarUabm5uVb5PREREVP1U62uIatSogSZNmsjazM3NYWNjI7X7+/tj6tSpsLa2hlqtxoQJE+Dp6Yk2bdoAALp37w43Nze88847WLx4MVJTU/HJJ58gICAAxsbGVb5PREREVP1U60BUFitWrICenh4GDhyInJwceHt7Y926dVK/vr4+9u7diw8++ACenp4wNzeHn58fPvvsMx1WTURERNWJSgghdF3Ei0Cr1UKj0SArK6tKridaEX6x0rfxLKa82UDXJRARUUkig3VdwbPrElRlmyrr53e1voaIiIiIqCq88KfMiIiI6AVT0lGtKjxqVBIGIiIiohfFi3h67AXBU2ZERESkeAxEREREpHgMRERERKR4DERERESkeAxEREREpHgMRERERKR4DERERESkeHwOEZVJSV8lwq/zICKilwWPEBEREZHiMRARERGR4jEQERERkeIxEBEREZHiMRARERGR4jEQERERkeIxEBEREZHiMRARERGR4jEQERERkeIxEBEREZHiMRARERGR4jEQERERkeIxEBEREZHiMRARERGR4jEQERERkeIxEBEREZHiMRARERGR4jEQERERkeIxEBEREZHiGei6ACIiIipFZLCuK1AMBqJqYEX4RV2XQEREpGg8ZUZERESKV+0DUXBwMFq2bIkaNWrA1tYW/fr1Q1JSkmzMgwcPEBAQABsbG1hYWGDgwIFIS0uTjUlJSYGPjw/MzMxga2uL6dOnIz8/vyp35aWzIvyi7EVERPSiqvaB6PDhwwgICMCxY8cQHh6OvLw8dO/eHffu3ZPGTJkyBXv27MH333+Pw4cP4+bNmxgwYIDUX1BQAB8fH+Tm5uLo0aPYvHkzQkNDMXv2bF3sEhEREVUzKiGE0HURz+L27duwtbXF4cOH0bFjR2RlZaFWrVrYunUrBg0aBAC4cOECXF1dER0djTZt2uDXX39F7969cfPmTdjZ2QEANmzYgBkzZuD27dswMjJ66na1Wi00Gg2ysrKgVqsrdJ9elqMrU95soOsSiIheLkq6qLpLUKWstqyf39X+CNHjsrKyAADW1tYAgLi4OOTl5cHLy0sa06hRI9StWxfR0dEAgOjoaLi7u0thCAC8vb2h1Wpx7ty5EreTk5MDrVYrexEREdHL6YUKRIWFhZg8eTLatWuHJk2aAABSU1NhZGQES0tL2Vg7OzukpqZKYx4NQ0X9RX0lCQ4OhkajkV6Ojo4VvDdERERUXbxQgSggIABnz57F9u3bK31bQUFByMrKkl43btyo9G0SERGRbrwwzyEKDAzE3r17ERUVhTp16kjt9vb2yM3NRWZmpuwoUVpaGuzt7aUxx48fl62v6C60ojGPMzY2hrGxcQXvBREREVVH1f4IkRACgYGB2LVrFw4ePAgXFxdZf4sWLWBoaIiIiAipLSkpCSkpKfD09AQAeHp64syZM0hPT5fGhIeHQ61Ww83NrWp2hIiIiKqtan+EKCAgAFu3bsVPP/2EGjVqSNf8aDQamJqaQqPRwN/fH1OnToW1tTXUajUmTJgAT09PtGnTBgDQvXt3uLm54Z133sHixYuRmpqKTz75BAEBATwKRERERNU/EK1fvx4A0LlzZ1n7pk2bMGrUKADAihUroKenh4EDByInJwfe3t5Yt26dNFZfXx979+7FBx98AE9PT5ibm8PPzw+fffZZVe0GERERVWPVPhCV5TFJJiYmWLt2LdauXVvqGCcnJ/zyyy8VWRoRERG9JKr9NUREREREla3aHyGiF0dJT9zm06uJiOhFwCNEREREpHgMRERERKR4DERERESkeAxEREREpHgMRERERKR4DERERESkeAxEREREpHgMRERERKR4DERERESkeAxEREREpHj86g4iIqLqIDJY1xUoGo8QERERkeIxEBEREZHi8ZQZVaoV4Rdl76e82UBHlRAREZWOR4iIiIhI8RiIiIiISPEYiIiIiEjxGIiIiIhI8RiIiIiISPEYiIiIiEjxGIiIiIhI8RiIiIiISPEYiIiIiEjx+KRqqlKPP7ka4NOriYhI9xiIiIiIdIHfbl+t8JQZERERKR4DERERESkeT5kRERFVNp4eq/YYiEjnHr/QmhdZExFRVWMgomqHd6IREVFVU9Q1RGvXroWzszNMTEzQunVrHD9+XNclERERUTWgmEC0Y8cOTJ06FZ9++ilOnjyJpk2bwtvbG+np6boujYiIiHRMMafMli9fjrFjx2L06NEAgA0bNmDfvn34+uuvMXPmTB1XR09T0mm0suCpNiIiKgtFBKLc3FzExcUhKChIatPT04OXlxeio6N1WBlVtrJcsM2LuonoufAOspeCIgLRP//8g4KCAtjZ2cna7ezscOHChRKXycnJQU5OjvQ+KysLAKDVaiu8vgf3sit8nVSy4N0nK2RMWQR0fa1C1kNE1dy9B7qu4OVQCZ+vD1f7cL1CiCeOU0QgKo/g4GDMnTu3WLujo6MOqqEX0f/TdQFERC+Uzyp17Xfv3oVGoym1XxGBqGbNmtDX10daWpqsPS0tDfb29iUuExQUhKlTp0rvCwsLkZGRARsbG6hUqjJvW6vVwtHRETdu3IBarS7fDigM5+zZcc6eHefs2XHOng3n69lVxpwJIXD37l04ODg8cZwiApGRkRFatGiBiIgI9OvXD8DDgBMREYHAwMASlzE2NoaxsbGszdLSstw1qNVq/kE8I87Zs+OcPTvO2bPjnD0bztezq+g5e9KRoSKKCEQAMHXqVPj5+cHDwwOtWrXCypUrce/ePemuMyIiIlIuxQSiIUOG4Pbt25g9ezZSU1PRrFkz7N+/v9iF1kRERKQ8iglEABAYGFjqKbLKYmxsjE8//bTY6TcqHefs2XHOnh3n7Nlxzp4N5+vZ6XLOVOJp96ERERERveQU89UdRERERKVhICIiIiLFYyAiIiIixWMgIiIiIsVjIKpka9euhbOzM0xMTNC6dWscP35c1yXpRHBwMFq2bIkaNWrA1tYW/fr1Q1JSkmzMgwcPEBAQABsbG1hYWGDgwIHFni6ekpICHx8fmJmZwdbWFtOnT0d+fn5V7opOLFy4ECqVCpMnT5baOF/F/f333xgxYgRsbGxgamoKd3d3xMbGSv1CCMyePRu1a9eGqakpvLy8cOnSJdk6MjIy4OvrC7VaDUtLS/j7+yM7++X8vsGCggLMmjULLi4uMDU1Rb169TBv3jzZdz4pfc6ioqLQp08fODg4QKVSYffu3bL+ipqfhIQEdOjQASYmJnB0dMTixYsre9cqzZPmLC8vDzNmzIC7uzvMzc3h4OCAkSNH4ubNm7J16GTOBFWa7du3CyMjI/H111+Lc+fOibFjxwpLS0uRlpam69KqnLe3t9i0aZM4e/asiI+PF7169RJ169YV2dnZ0pj3339fODo6ioiICBEbGyvatGkj2rZtK/Xn5+eLJk2aCC8vL3Hq1Cnxyy+/iJo1a4qgoCBd7FKVOX78uHB2dhavv/66mDRpktTO+ZLLyMgQTk5OYtSoUSImJkZcvXpV/Pbbb+Ly5cvSmIULFwqNRiN2794tTp8+Ld566y3h4uIi/vvvP2lMjx49RNOmTcWxY8fEH3/8IV577TUxbNgwXexSpVuwYIGwsbERe/fuFcnJyeL7778XFhYWYtWqVdIYpc/ZL7/8Ij7++GOxc+dOAUDs2rVL1l8R85OVlSXs7OyEr6+vOHv2rNi2bZswNTUVGzdurKrdrFBPmrPMzEzh5eUlduzYIS5cuCCio6NFq1atRIsWLWTr0MWcMRBVolatWomAgADpfUFBgXBwcBDBwcE6rKp6SE9PFwDE4cOHhRAP/0gMDQ3F999/L41JTEwUAER0dLQQ4uEfmZ6enkhNTZXGrF+/XqjVapGTk1O1O1BF7t69K+rXry/Cw8NFp06dpEDE+SpuxowZon379qX2FxYWCnt7e7FkyRKpLTMzUxgbG4tt27YJIYQ4f/68ACBOnDghjfn111+FSqUSf//9d+UVryM+Pj5izJgxsrYBAwYIX19fIQTn7HGPf7hX1PysW7dOWFlZyf4uZ8yYIRo2bFjJe1T5SgqRjzt+/LgAIK5fvy6E0N2c8ZRZJcnNzUVcXBy8vLykNj09PXh5eSE6OlqHlVUPWVlZAABra2sAQFxcHPLy8mTz1ahRI9StW1ear+joaLi7u8ueLu7t7Q2tVotz585VYfVVJyAgAD4+PrJ5AThfJfn555/h4eGBt99+G7a2tmjevDm++uorqT85ORmpqamyOdNoNGjdurVsziwtLeHh4SGN8fLygp6eHmJiYqpuZ6pI27ZtERERgYsXLwIATp8+jT///BM9e/YEwDl7moqan+joaHTs2BFGRkbSGG9vbyQlJeHOnTtVtDe6k5WVBZVKJX1fqK7mTFFPqq5K//zzDwoKCop9NYidnR0uXLigo6qqh8LCQkyePBnt2rVDkyZNAACpqakwMjIq9gW6dnZ2SE1NlcaUNJ9FfS+b7du34+TJkzhx4kSxPs5XcVevXsX69esxdepU/L//9/9w4sQJTJw4EUZGRvDz85P2uaQ5eXTObG1tZf0GBgawtrZ+Keds5syZ0Gq1aNSoEfT19VFQUIAFCxbA19cXADhnT1FR85OamgoXF5di6yjqs7KyqpT6q4MHDx5gxowZGDZsmPRlrrqaMwYiqnIBAQE4e/Ys/vzzT12XUm3duHEDkyZNQnh4OExMTHRdzguhsLAQHh4e+PzzzwEAzZs3x9mzZ7Fhwwb4+fnpuLrq6bvvvkNYWBi2bt2Kxo0bIz4+HpMnT4aDgwPnjCpdXl4eBg8eDCEE1q9fr+tyeJdZZalZsyb09fWL3fWTlpYGe3t7HVWle4GBgdi7dy8iIyNRp04dqd3e3h65ubnIzMyUjX90vuzt7Uucz6K+l0lcXBzS09PxxhtvwMDAAAYGBjh8+DBWr14NAwMD2NnZcb4eU7t2bbi5ucnaXF1dkZKSAuD/9vlJf5P29vZIT0+X9efn5yMjI+OlnLPp06dj5syZGDp0KNzd3fHOO+9gypQpCA4OBsA5e5qKmh+l/a0C/xeGrl+/jvDwcOnoEKC7OWMgqiRGRkZo0aIFIiIipLbCwkJERETA09NTh5XphhACgYGB2LVrFw4ePFjsUGeLFi1gaGgom6+kpCSkpKRI8+Xp6YkzZ87I/lCK/pAe/yB80XXr1g1nzpxBfHy89PLw8ICvr6/0b86XXLt27Yo9yuHixYtwcnICALi4uMDe3l42Z1qtFjExMbI5y8zMRFxcnDTm4MGDKCwsROvWratgL6rW/fv3oacn/xjQ19dHYWEhAM7Z01TU/Hh6eiIqKgp5eXnSmPDwcDRs2PClPF1WFIYuXbqE33//HTY2NrJ+nc1ZuS/Hpqfavn27MDY2FqGhoeL8+fNi3LhxwtLSUnbXj1J88MEHQqPRiEOHDolbt25Jr/v370tj3n//fVG3bl1x8OBBERsbKzw9PYWnp6fUX3Qbeffu3UV8fLzYv3+/qFWr1kt7G/njHr3LTAjO1+OOHz8uDAwMxIIFC8SlS5dEWFiYMDMzE99++600ZuHChcLS0lL89NNPIiEhQfTt27fEW6SbN28uYmJixJ9//inq16//0txC/jg/Pz/xyiuvSLfd79y5U9SsWVN89NFH0hilz9ndu3fFqVOnxKlTpwQAsXz5cnHq1CnpjqiKmJ/MzExhZ2cn3nnnHXH27Fmxfft2YWZm9sLedv+kOcvNzRVvvfWWqFOnjoiPj5d9Hjx6x5gu5oyBqJKtWbNG1K1bVxgZGYlWrVqJY8eO6boknQBQ4mvTpk3SmP/++0+MHz9eWFlZCTMzM9G/f39x69Yt2XquXbsmevbsKUxNTUXNmjXFtGnTRF5eXhXvjW48Hog4X8Xt2bNHNGnSRBgbG4tGjRqJL7/8UtZfWFgoZs2aJezs7ISxsbHo1q2bSEpKko35999/xbBhw4SFhYVQq9Vi9OjR4u7du1W5G1VGq9WKSZMmibp16woTExPx6quvio8//lj2waT0OYuMjCzxv11+fn5CiIqbn9OnT4v27dsLY2Nj8corr4iFCxdW1S5WuCfNWXJycqmfB5GRkdI6dDFnKiEeeSQpERERkQLxGiIiIiJSPAYiIiIiUjwGIiIiIlI8BiIiIiJSPAYiIiIiUjwGIiIiIlI8BiIiIiJSPAYiIlI0Z2dnrFy5UtdlEJGOMRAR0XOLjo6Gvr4+fHx8dF0KEVG5MBAR0XMLCQnBhAkTEBUVhZs3b+q6nJfao19mSUQVh4GIiJ5LdnY2duzYgQ8++AA+Pj4IDQ2V9R86dAgqlQoRERHw8PCAmZkZ2rZtW+yb6devX4969erByMgIDRs2xDfffCPrV6lU2LhxI3r37g0zMzO4uroiOjoaly9fRufOnWFubo62bdviypUr0jJXrlxB3759YWdnBwsLC7Rs2RK///57qfsyZswY9O7dW9aWl5cHW1tbhISElLjM9evX0adPH1hZWcHc3ByNGzfGL7/8IvWfO3cOvXv3hlqtRo0aNdChQwepxsLCQnz22WeoU6cOjI2N0axZM+zfv19a9tq1a1CpVNixYwc6deoEExMThIWFAQD+97//wdXVFSYmJmjUqBHWrVtX6n4RURk81zehEZHihYSECA8PDyHEwy9XrVevnigsLJT6i77osXXr1uLQoUPi3LlzokOHDqJt27bSmJ07dwpDQ0Oxdu1akZSUJJYtWyb09fXFwYMHpTEAxCuvvCJ27NghkpKSRL9+/YSzs7Po2rWr2L9/vzh//rxo06aN6NGjh7RMfHy82LBhgzhz5oy4ePGi+OSTT4SJiYn0TeVCCOHk5CRWrFghhBDiyJEjQl9fX9y8eVNWm7m5ealfVurj4yPefPNNkZCQIK5cuSL27NkjDh8+LIQQ4q+//hLW1tZiwIAB4sSJEyIpKUl8/fXX4sKFC0IIIZYvXy7UarXYtm2buHDhgvjoo4+EoaGhuHjxohBCSF+E6ezsLH788Udx9epVcfPmTfHtt9+K2rVrS20//vijsLa2FqGhoeX6GRIRv+2eiJ5T27ZtxcqVK4UQQuTl5YmaNWvKvrW6KBD9/vvvUtu+ffsEAPHff/9J6xg7dqxsvW+//bbo1auX9B6A+OSTT6T30dHRAoAICQmR2rZt2yZMTEyeWG/jxo3FmjVrpPePBiIhhHBzcxOLFi2S3vfp00eMGjWq1PW5u7uLOXPmlNgXFBQkXFxcRG5ubon9Dg4OYsGCBbK2li1bivHjxwsh/i8QFc1vkXr16omtW7fK2ubNmyc8PT1LrZOInoynzIio3JKSknD8+HEMGzYMAGBgYIAhQ4aUeHrp9ddfl/5du3ZtAEB6ejoAIDExEe3atZONb9euHRITE0tdh52dHQDA3d1d1vbgwQNotVoAD0/nffjhh3B1dYWlpSUsLCyQmJiIlJSUUvfp3XffxaZNmwAAaWlp+PXXXzFmzJhSx0+cOBHz589Hu3bt8OmnnyIhIUHqi4+PR4cOHWBoaFhsOa1Wi5s3b5Zpvz08PKR/37t3D1euXIG/vz8sLCyk1/z582WnC4no2RjougAienGFhIQgPz8fDg4OUpsQAsbGxvjiiy+g0Wik9kdDgUqlAvDwGppnUdI6nrTeDz/8EOHh4Vi6dClee+01mJqaYtCgQcjNzS11GyNHjsTMmTMRHR2No0ePwsXFBR06dCh1/Lvvvgtvb2/s27cPBw4cQHBwMJYtW4YJEybA1NT0mfavNObm5tK/s7OzAQBfffUVWrduLRunr69fIdsjUiIeISKicsnPz8eWLVuwbNkyxMfHS6/Tp0/DwcEB27ZtK/O6XF1dceTIEVnbkSNH4Obm9lw1HjlyBKNGjUL//v3h7u4Oe3t7XLt27YnL2NjYoF+/fti0aRNCQ0MxevTop27H0dER77//Pnbu3Ilp06bhq6++AvDwiNYff/xR4p1harUaDg4Oz7zfdnZ2cHBwwNWrV/Haa6/JXi4uLk+tlYhKxiNERFQue/fuxZ07d+Dv7y87EgQAAwcOREhICN5///0yrWv69OkYPHgwmjdvDi8vL+zZswc7d+584h1hZVG/fn3s3LkTffr0gUqlwqxZs8p0VOrdd99F7969UVBQAD8/vyeOnTx5Mnr27IkGDRrgzp07iIyMhKurKwAgMDAQa9aswdChQxEUFASNRoNjx46hVatWaNiwIaZPn45PP/0U9erVQ7NmzbBp0ybEx8dLd5KVZu7cuZg4cSI0Gg169OiBnJwcxMbG4s6dO5g6dWrZJ4iIJAxERFQuISEh8PLyKhaGgIeBaPHixbLraZ6kX79+WLVqFZYuXYpJkybBxcUFmzZtQufOnZ+rxuXLl2PMmDFo27YtatasiRkzZkjXFz2Jl5cXateujcaNG8tOB5akoKAAAQEB+Ouvv6BWq9GjRw+sWLECwMOjTQcPHsT06dPRqVMn6Ovro1mzZtJ1QxMnTkRWVhamTZuG9PR0uLm54eeff0b9+vWfuM13330XZmZmWLJkCaZPnw5zc3O4u7tj8uTJZZsYIipGJYQQui6CiKg6yc7OxiuvvIJNmzZhwIABui6HiKoAjxAREf3/CgsL8c8//2DZsmWwtLTEW2+9peuSiKiKMBAREf3/UlJS4OLigjp16iA0NBQGBvxPJJFS8JQZERERKR5vuyciIiLFYyAiIiIixWMgIiIiIsVjICIiIiLFYyAiIiIixWMgIiIiIsVjICIiIiLFYyAiIiIixWMgIiIiIsX7/wAOgIvBvG+GogAAAABJRU5ErkJggg==", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "scripts.train_detector(\n", - " scripts.TrainDetectorConfig(\n", - " path=Path(\"logs/demo/detector\"),\n", - " task=tasks.BackdoorDetection(\n", - " # We pass in the path of the trained classifier, as well as what backdoor\n", - " # to use. The backdoor is the same one we used for training in this case,\n", - " # we could also have stored that.\n", - " path=Path(\"logs/demo/classifier\"),\n", - " backdoor=data.CornerPixelBackdoor(),\n", - " ),\n", - " detector=detectors.MahalanobisConfig(),\n", - " )\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As we can see, this was a trivial detection task. As an ablation, we can test whether the detector specifically flags backdoored inputs as anomalous, or just anything out of distribution:" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\u001b[32m2024-02-14 19:38:05.113\u001b[0m | \u001b[34m\u001b[1mDEBUG \u001b[0m | \u001b[36mcupbearer.utils.scripts\u001b[0m:\u001b[36mload_config\u001b[0m:\u001b[36m55\u001b[0m - \u001b[34m\u001b[1mLoading config 'model' from logs/demo/classifier\u001b[0m\n", - "\u001b[32m2024-02-14 19:38:05.132\u001b[0m | \u001b[34m\u001b[1mDEBUG \u001b[0m | \u001b[36mcupbearer.utils.scripts\u001b[0m:\u001b[36mload_config\u001b[0m:\u001b[36m55\u001b[0m - \u001b[34m\u001b[1mLoading config 'detector' from logs/demo/detector\u001b[0m\n", - "\u001b[32m2024-02-14 19:38:05.135\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mcupbearer.detectors.anomaly_detector\u001b[0m:\u001b[36mload_weights\u001b[0m:\u001b[36m209\u001b[0m - \u001b[1mLoading detector from logs/demo/detector/detector\u001b[0m\n", - "\u001b[32m2024-02-14 19:38:07.769\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mcupbearer.detectors.anomaly_detector\u001b[0m:\u001b[36meval\u001b[0m:\u001b[36m128\u001b[0m - \u001b[1mAUC_ROC: 0.9998\u001b[0m\n", - "\u001b[32m2024-02-14 19:38:07.770\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mcupbearer.detectors.anomaly_detector\u001b[0m:\u001b[36meval\u001b[0m:\u001b[36m129\u001b[0m - \u001b[1mAP: 0.9994\u001b[0m\n" - ] - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAkQAAAHHCAYAAABeLEexAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAABNNUlEQVR4nO3dd1gUV/828HvpdWnCIrFAjAWU2LAgdomoaKyxYWwYE4XYG7/EFlTsGjWW5CFgwRJjif2RYEsUsWJF7KJBwEQB0YiU8/7hyzwuoCIu7OLcn+va63Jnzsx+54Du7ZkzMwohhAARERGRjOlpuwAiIiIibWMgIiIiItljICIiIiLZYyAiIiIi2WMgIiIiItljICIiIiLZYyAiIiIi2WMgIiIiItljICIiIiLZYyAiomJRKBSYNm2atssoE8LDw6FQKHD79m1pWcuWLdGyZctS+fz8P6tp06ZBoVDg77//LpXPd3Z2xsCBA0vls4iKi4GIqIQsX74cCoUCjRo10nYp9J44duwYpk2bhtTUVG2XUoAu10ZUFAbaLoDofRUREQFnZ2ecOHEC169fx0cffaTtkkiH7N+//623OXbsGKZPn46BAwfC2tq6yNv9+++/MDAo2X/uX1dbfHw89PT4/2/SbfwNJSoBt27dwrFjx7Bw4ULY29sjIiJC2yXRGzx58qRUP8/IyAhGRkYltv/c3Fw8e/YMAGBiYlLigeh1jI2NYWhoqLXPJyoKBiKiEhAREQEbGxv4+vqiR48ehQai27dvQ6FQYP78+fjxxx9RpUoVGBsbo0GDBjh58mSB9gcOHECzZs1gbm4Oa2trdO7cGXFxcWpt8uaGXL16Ff369YOVlRXs7e0xefJkCCFw9+5ddO7cGUqlEo6OjliwYIHa9s+fP8eUKVNQv359WFlZwdzcHM2aNcPBgwdfe7wHDx6EQqHAtm3bCqxbv349FAoFoqOjX7l9VlYWpk+fjqpVq8LExAR2dnZo2rQpIiMj1dpduXIFPXv2hL29PUxNTVG9enV88803am3Onj2L9u3bQ6lUwsLCAm3atMHx48fV2uTN6Tl8+DCGDx8OBwcHVKhQQVq/d+9eqa8tLS3h6+uLS5cuvbYP8ly6dAmtW7eGqakpKlSogBkzZiA3N7dAu8LmEC1duhQ1a9aEmZkZbGxs4OHhgfXr1wN48bMdP348AMDFxQUKhUJtXpJCoUBgYCAiIiJQs2ZNGBsbY9++fdK6wuZ7/f333+jZsyeUSiXs7OwwcuRIKUQB//sdDQ8PL7Dty/t8U22FzSG6efMmPvvsM9ja2sLMzAyNGzfG7t271docOnQICoUCv/zyC2bOnIkKFSrAxMQEbdq0wfXr1wvURPQueMqMqARERESgW7duMDIyQp8+fbBixQqcPHkSDRo0KNB2/fr1ePz4Mb788ksoFArMnTsX3bp1w82bN6X/Vf/+++9o3749PvzwQ0ybNg3//vsvli5dCi8vL5w5cwbOzs5q++zVqxdcXV0xe/Zs7N69GzNmzICtrS1WrVqF1q1bY86cOYiIiMC4cePQoEEDNG/eHACQnp6O//znP+jTpw+++OILPH78GKGhofDx8cGJEydQp06dQo+3ZcuWqFixIiIiItC1a9cCfVGlShV4enq+sr+mTZuGkJAQDBkyBA0bNkR6ejpOnTqFM2fO4JNPPgEAnD9/Hs2aNYOhoSGGDh0KZ2dn3LhxAzt37sTMmTMBvAgjzZo1g1KpxIQJE2BoaIhVq1ahZcuWOHz4cIH5XMOHD4e9vT2mTJkijRCtXbsWAwYMgI+PD+bMmYOnT59ixYoVaNq0Kc6ePVugr1+WlJSEVq1aITs7G5MmTYK5uTl+/PFHmJqavnKbPD/99BNGjBiBHj16SMHk/PnziImJQd++fdGtWzdcvXoVGzZswKJFi1CuXDkAgL29vbSPAwcO4JdffkFgYCDKlSv32loBoGfPnnB2dkZISAiOHz+OJUuW4NGjR1izZs0b631ZUWp7WXJyMpo0aYKnT59ixIgRsLOzw+rVq/Hpp5/i119/LfA7NHv2bOjp6WHcuHFIS0vD3Llz4efnh5iYmLeqk+i1BBFp1KlTpwQAERkZKYQQIjc3V1SoUEGMHDlSrd2tW7cEAGFnZycePnwoLf/tt98EALFz505pWZ06dYSDg4P4559/pGXnzp0Tenp6on///tKyqVOnCgBi6NCh0rLs7GxRoUIFoVAoxOzZs6Xljx49EqampmLAgAFqbTMzM9XqfPTokVCpVGLw4MFqywGIqVOnSu+DgoKEsbGxSE1NlZalpKQIAwMDtXaFqV27tvD19X1tm+bNmwtLS0tx584dteW5ubnSn7t06SKMjIzEjRs3pGWJiYnC0tJSNG/eXFoWFhYmAIimTZuK7Oxsafnjx4+FtbW1+OKLL9Q+IykpSVhZWRVYnt+oUaMEABETEyMtS0lJEVZWVgKAuHXrlrS8RYsWokWLFtL7zp07i5o1a752//PmzSuwnzwAhJ6enrh06VKh617+GeT9nnz66adq7YYPHy4AiHPnzgkh/vc7GhYW9sZ9vq62ypUrq/2e5fXTH3/8IS17/PixcHFxEc7OziInJ0cIIcTBgwcFAOHq6qr2e/n9998LAOLChQsFPououHjKjEjDIiIioFKp0KpVKwAvTi306tULGzduRE5OToH2vXr1go2NjfS+WbNmAF6cUgCA+/fvIzY2FgMHDoStra3U7uOPP8Ynn3yCPXv2FNjnkCFDpD/r6+vDw8MDQgj4+/tLy62trVG9enXpc/La5s1ryc3NxcOHD5GdnQ0PDw+cOXPmtcfdv39/ZGZm4tdff5WWbdq0CdnZ2ejXr99rt7W2tsalS5dw7dq1Qtc/ePAAR44cweDBg1GpUiW1dQqFAgCQk5OD/fv3o0uXLvjwww+l9eXLl0ffvn3x559/Ij09XW3bL774Avr6+tL7yMhIpKamok+fPvj777+ll76+Pho1avTGU4d79uxB48aN0bBhQ2mZvb09/Pz8XrtdXh/cu3ev0NOlRdWiRQu4ubkVuX1AQIDa+6+//hoACv2d0qQ9e/agYcOGaNq0qbTMwsICQ4cOxe3bt3H58mW19oMGDVKbb5X/7wiRJjAQEWlQTk4ONm7ciFatWuHWrVu4fv06rl+/jkaNGiE5ORlRUVEFtsn/BZ8Xjh49egQAuHPnDgCgevXqBbZ1dXXF33//XWBCcP59WllZwcTERDqV8fLyvM/Js3r1anz88cfSXB57e3vs3r0baWlprz32GjVqoEGDBmrzpSIiItC4ceM3XmH33XffITU1FdWqVYO7uzvGjx+P8+fPS+vzvvhq1ar1yn08ePAAT58+fWU/5ebm4u7du2rLXVxc1N7nBbLWrVvD3t5e7bV//36kpKS89jju3LmDqlWrFlheWE35TZw4ERYWFmjYsCGqVq2KgIAAHD169I3bvSz/8bxJ/lqrVKkCPT09tfsllYQ7d+688ueUt/5lb/o7QqQJnENEpEEHDhzA/fv3sXHjRmzcuLHA+oiICLRt21Zt2csjFC8TQhS7jsL2WZTPWbduHQYOHIguXbpg/PjxcHBwgL6+PkJCQnDjxo03fm7//v0xcuRI3Lt3D5mZmTh+/DiWLVv2xu2aN2+OGzdu4LfffsP+/fvxn//8B4sWLcLKlSvVRrs0Lf/cnrzJz2vXroWjo2OB9iV5pZarqyvi4+Oxa9cu7Nu3D1u2bMHy5csxZcoUTJ8+vUj7KMpcpdfJG2171fs8hY10lqSS+DtClB8DEZEGRUREwMHBAT/88EOBdVu3bsW2bduwcuXKt/riqly5MoAX93LJ78qVKyhXrhzMzc2LX/RLfv31V3z44YfYunWr2pfh1KlTi7R97969MWbMGGzYsAH//vsvDA0N0atXryJta2tri0GDBmHQoEHIyMhA8+bNMW3aNAwZMkQ6BXbx4sVXbm9vbw8zM7NX9pOenh4qVqz42hqqVKkCAHBwcIC3t3eR6n5Z5cqVCz3tV1hNhTE3N0evXr3Qq1cvPH/+HN26dcPMmTMRFBQEExOTVwaU4rp27ZraqNL169eRm5srTcbOG4nJf7PF/CM4wKvDU2EqV678yp9T3nqi0sZTZkQa8u+//2Lr1q3o2LEjevToUeAVGBiIx48fY8eOHW+13/Lly6NOnTpYvXq12hfTxYsXsX//fnTo0EFjx5D3P/GX/+cdExPz2kvmX1auXDm0b98e69atQ0REBNq1a1fgNF1h/vnnH7X3FhYW+Oijj5CZmQngRdhp3rw5fv75ZyQkJKi1zatVX18fbdu2xW+//aZ2yic5ORnr169H06ZNoVQqX1uHj48PlEolZs2ahaysrALrHzx48NrtO3TogOPHj+PEiRNq2xTlPlT5+8DIyAhubm4QQki15AVfTd0NOn9wX7p0KQCgffv2AAClUoly5crhyJEjau2WL19eYF9vU1uHDh1w4sQJtd+rJ0+e4Mcff4Szs/NbzYMi0hSOEBFpyI4dO/D48WN8+umnha5v3LixdJPGoo6a5Jk3bx7at28PT09P+Pv7S5fdW1lZafR5Yh07dsTWrVvRtWtX+Pr64tatW1i5ciXc3NyQkZFRpH30798fPXr0AAAEBwcXaRs3Nze0bNkS9evXh62tLU6dOoVff/0VgYGBUpslS5agadOmqFevHoYOHQoXFxfcvn0bu3fvRmxsLABgxowZiIyMRNOmTTF8+HAYGBhg1apVyMzMxNy5c99Yh1KpxIoVK/D555+jXr166N27N+zt7ZGQkIDdu3fDy8vrtacAJ0yYgLVr16Jdu3YYOXKkdNl95cqV1eZEFaZt27ZwdHSEl5cXVCoV4uLisGzZMvj6+sLS0hIAUL9+fQDAN998g969e8PQ0BCdOnUq9gjhrVu38Omnn6Jdu3aIjo7GunXr0LdvX9SuXVtqM2TIEMyePRtDhgyBh4cHjhw5gqtXrxbY19vUNmnSJGzYsAHt27fHiBEjYGtri9WrV+PWrVvYsmUL72pN2qHFK9yI3iudOnUSJiYm4smTJ69sM3DgQGFoaCj+/vtv6ZLmefPmFWiHfJc0CyHE77//Lry8vISpqalQKpWiU6dO4vLly2pt8i6nfvDggdryAQMGCHNz8wKf06JFC7VLvXNzc8WsWbNE5cqVhbGxsahbt67YtWuXGDBggKhcufIbaxRCiMzMTGFjYyOsrKzEv//++8q+eNmMGTNEw4YNhbW1tTA1NRU1atQQM2fOFM+fP1drd/HiRdG1a1dhbW0tTExMRPXq1cXkyZPV2pw5c0b4+PgICwsLYWZmJlq1aiWOHTum1ibvsvuTJ08WWs/BgweFj4+PsLKyEiYmJqJKlSpi4MCB4tSpU288lvPnz4sWLVoIExMT8cEHH4jg4GARGhr6xsvuV61aJZo3by7s7OyEsbGxqFKlihg/frxIS0tT239wcLD44IMPhJ6ento+AYiAgIBCa8r/s8r7Pbl8+bLo0aOHsLS0FDY2NiIwMLDAz+zp06fC399fWFlZCUtLS9GzZ0+RkpJS6M//VbXlv+xeCCFu3LghevToIf0sGzZsKHbt2qXWJu+y+82bN6stf93tAIiKSyEEZ6URkeZkZ2fDyckJnTp1QmhoqLbLISIqEo5LEpFGbd++HQ8ePED//v21XQoRUZFxhIiINCImJgbnz59HcHAwypUr98YbORIR6RKOEBGRRqxYsQLDhg2Dg4PDWz8Li4hI2zhCRERERLLHESIiIiKSPQYiIiIikj3emLEIcnNzkZiYCEtLS43fOp+IiIhKhhACjx8/hpOT0xtv+MlAVASJiYlvfAYSERER6aa7d++iQoUKr23DQFQEebfNv3v37hufhURERES6IT09HRUrVpS+x1+HgagI8k6TKZVKBiIiIqIypijTXTipmoiIiGSPgYiIiIhkj4GIiIiIZI9ziDQoJycHWVlZ2i6DSomRkdEbL+MkIqKygYFIA4QQSEpKQmpqqrZLoVKkp6cHFxcXGBkZabsUIiJ6RwxEGpAXhhwcHGBmZsabN8pA3s0679+/j0qVKvFnTkRUxjEQvaOcnBwpDNnZ2Wm7HCpF9vb2SExMRHZ2NgwNDbVdDhERvQNOgHhHeXOGzMzMtFwJlba8U2U5OTlaroSIiN4VA5GG8JSJ/PBnTkT0/mAgIiIiItljIKIy5dChQ1AoFLyij4iINIqTqkvQosirpfp5oz+p9lbtBw4ciNWrVyMkJASTJk2Slm/fvh1du3aFEELTJRIREekkjhDJnImJCebMmYNHjx5pbJ/Pnz/X2L6IiIhKAwORzHl7e8PR0REhISGvbLNlyxbUrFkTxsbGcHZ2xoIFC9TWOzs7Izg4GP3794dSqcTQoUMRHh4Oa2tr7Nq1C9WrV4eZmRl69OiBp0+fYvXq1XB2doaNjQ1GjBihdpXW2rVr4eHhAUtLSzg6OqJv375ISUkpseMnIiICGIhkT19fH7NmzcLSpUtx7969AutPnz6Nnj17onfv3rhw4QKmTZuGyZMnIzw8XK3d/PnzUbt2bZw9exaTJ08GADx9+hRLlizBxo0bsW/fPhw6dAhdu3bFnj17sGfPHqxduxarVq3Cr7/+Ku0nKysLwcHBOHfuHLZv347bt29j4MCBJdkFREREnENEQNeuXVGnTh1MnToVoaGhausWLlyINm3aSCGnWrVquHz5MubNm6cWVFq3bo2xY8dK7//44w9kZWVhxYoVqFKlCgCgR48eWLt2LZKTk2FhYQE3Nze0atUKBw8eRK9evQAAgwcPlvbx4YcfYsmSJWjQoAEyMjJgYWFRUl1ARCQvBws5K9AqqPTr0CEcISIAwJw5c7B69WrExcWpLY+Li4OXl5faMi8vL1y7dk3tVJeHh0eBfZqZmUlhCABUKhWcnZ3Vgo1KpVI7JXb69Gl06tQJlSpVgqWlJVq0aAEASEhIeLcDJCIieg0GIgIANG/eHD4+PggKKt7/EMzNzQssy/84C4VCUeiy3NxcAMCTJ0/g4+MDpVKJiIgInDx5Etu2bQPAidpERFSyeMqMJLNnz0adOnVQvXp1aZmrqyuOHj2q1u7o0aOoVq0a9PX1Nfr5V65cwT///IPZs2ejYsWKAIBTp05p9DOIiIgKo9URoiNHjqBTp05wcnKCQqHA9u3b1dYLITBlyhSUL18epqam8Pb2xrVr19TaPHz4EH5+flAqlbC2toa/vz8yMjLU2pw/fx7NmjWDiYkJKlasiLlz55b0oZVJ7u7u8PPzw5IlS6RlY8eORVRUFIKDg3H16lWsXr0ay5Ytw7hx4zT++ZUqVYKRkRGWLl2KmzdvYseOHQgODtb45xAREeWn1UD05MkT1K5dGz/88EOh6+fOnYslS5Zg5cqViImJgbm5OXx8fPDs2TOpjZ+fHy5duoTIyEjs2rULR44cwdChQ6X16enpaNu2LSpXrozTp09j3rx5mDZtGn788ccSP76y6LvvvpNOYQFAvXr18Msvv2Djxo2oVasWpkyZgu+++65Ervyyt7dHeHg4Nm/eDDc3N8yePRvz58/X+OcQERHlpxA6cjtihUKBbdu2oUuXLgBejA45OTlh7Nix0mhEWloaVCoVwsPD0bt3b8TFxcHNzQ0nT56UJvXu27cPHTp0wL179+Dk5IQVK1bgm2++QVJSkvR08kmTJmH79u24cuVKkWpLT0+HlZUV0tLSoFQq1dY9e/YMt27dgouLC0xMTDTUG1QW8GdPRGWWTK4ye933d346O6n61q1bSEpKgre3t7TMysoKjRo1QnR0NAAgOjoa1tbWalc4eXt7Q09PDzExMVKb5s2bS2EIAHx8fBAfH//KuzNnZmYiPT1d7UVERETvL50NRElJSQBeXJb9MpVKJa1LSkqCg4OD2noDAwPY2tqqtSlsHy9/Rn4hISGwsrKSXnkTfImIiOj9pLOBSJuCgoKQlpYmve7evavtkoiIiKgE6WwgcnR0BAAkJyerLU9OTpbWOTo6FnjOVXZ2Nh4+fKjWprB9vPwZ+RkbG0OpVKq9iIiI6P2ls4HIxcUFjo6OiIqKkpalp6cjJiYGnp6eAABPT0+kpqbi9OnTUpsDBw4gNzcXjRo1ktocOXIEWVlZUpvIyEhUr14dNjY2pXQ0REREpMu0GogyMjIQGxuL2NhYAC8mUsfGxiIhIQEKhQKjRo3CjBkzsGPHDly4cAH9+/eHk5OTdCWaq6sr2rVrhy+++AInTpzA0aNHERgYiN69e8PJyQkA0LdvXxgZGcHf3x+XLl3Cpk2b8P3332PMmDFaOmoiIiLSNVq9U/WpU6fQqlUr6X1eSBkwYADCw8MxYcIEPHnyBEOHDkVqaiqaNm2Kffv2qV3iHBERgcDAQLRp0wZ6enro3r272o0FrayssH//fgQEBKB+/fooV64cpkyZonavIiIiIpI3nbkPkS7jfYioMPzZE1GZxfsQFaCzc4iIiIiISgsDEZVZzs7OWLx4sbbLICKi9wCfdl+SChuSLEnFHO6Mjo5G06ZN0a5dO+zevVvDRREREek+jhARQkND8fXXX+PIkSNITEzUdjlERESljoFI5jIyMrBp0yYMGzYMvr6+CA8Pl9YdOnQICoUCUVFR8PDwgJmZGZo0aYL4+Hi1faxYsQJVqlSBkZERqlevjrVr16qtVygUWLVqFTp27AgzMzO4uroiOjoa169fR8uWLWFubo4mTZrgxo0b0jY3btxA586doVKpYGFhgQYNGuD3339/7bEkJCSgc+fOsLCwgFKpRM+ePdVuyjlw4EDplg15Ro0ahZYtW0rvf/31V7i7u8PU1BR2dnbw9vbGkydPitibRERUVjEQydwvv/yCGjVqoHr16ujXrx9+/vln5L/w8JtvvsGCBQtw6tQpGBgYYPDgwdK6bdu2YeTIkRg7diwuXryIL7/8EoMGDcLBgwfV9hEcHIz+/fsjNjYWNWrUQN++ffHll18iKCgIp06dghACgYGBUvuMjAx06NABUVFROHv2LNq1a4dOnTohISGh0OPIzc1F586d8fDhQxw+fBiRkZG4efMmevXqVeS+uH//Pvr06YPBgwcjLi4Ohw4dQrdu3Qr0BxERvX84h0jmQkND0a9fPwBAu3btkJaWhsOHD6uNmsycORMtWrQAAEyaNAm+vr549uwZTExMMH/+fAwcOBDDhw8H8OJeUsePH8f8+fPV7jE1aNAg9OzZEwAwceJEeHp6YvLkyfDx8QEAjBw5EoMGDZLa165dG7Vr15beBwcHY9u2bdixY4dacMoTFRWFCxcu4NatW9LDeNesWYOaNWvi5MmTaNCgwRv74v79+8jOzka3bt1QuXJlAIC7u/ubO5GIiMo8jhDJWHx8PE6cOIE+ffoAAAwMDNCrVy+Ehoaqtfv444+lP5cvXx4ApGfIxcXFwcvLS629l5cX4uLiXrkPlUoFQD1sqFQqPHv2DOnp6QBejBCNGzcOrq6usLa2hoWFBeLi4l45QhQXF4eKFStKYQgA3NzcYG1tXaCWV6lduzbatGkDd3d3fPbZZ/jpp5/w6NGjIm1LRERlGwORjIWGhiI7OxtOTk4wMDCAgYEBVqxYgS1btiAtLU1qZ2hoKP1ZoVAAeHGK6m0Uto/X7XfcuHHYtm0bZs2ahT/++AOxsbFwd3fH8+fP3/Io/0dPT6/A6a+Xn3Gnr6+PyMhI7N27F25ubli6dCmqV6+OW7duFfsziYiobGAgkqns7GysWbMGCxYskJ4nFxsbi3PnzsHJyQkbNmwo0n5cXV1x9OhRtWVHjx6Fm5vbO9V39OhRDBw4EF27doW7uzscHR1x+/bt19Zx9+5d3L17V1p2+fJlpKamSrXY29vj/v37atvlPUcvj0KhgJeXF6ZPn46zZ8/CyMgI27Zte6djISIi3cc5RDK1a9cuPHr0CP7+/rCyslJb1717d4SGhmLevHlv3M/48ePRs2dP1K1bF97e3ti5cye2bt36xivC3qRq1arYunUrOnXqBIVCgcmTJ792VMrb2xvu7u7w8/PD4sWLkZ2djeHDh6NFixbw8PAAALRu3Rrz5s3DmjVr4OnpiXXr1uHixYuoW7cuACAmJgZRUVFo27YtHBwcEBMTgwcPHsDV1fWdjoWIiHQfR4hkKjQ0FN7e3gXCEPAiEJ06dQrnz59/4366dOmC77//HvPnz0fNmjWxatUqhIWFqU3KLo6FCxfCxsYGTZo0QadOneDj44N69eq9sr1CocBvv/0GGxsbNG/eHN7e3vjwww+xadMmqY2Pjw8mT56MCRMmoEGDBnj8+DH69+8vrVcqlThy5Ag6dOiAatWq4dtvv8WCBQvQvn37dzoWIiLSfXy4axHw4a5UGP7siajM4sNdC+AIEREREckeAxERERHJHgMRERERyR6vMiMiIiLZzCt6FY4QaQjnpssPf+ZERO8PBqJ3lHe35adPn2q5EipteXfN1tfX13IlRET0rnjK7B3p6+vD2tpaeraXmZmZ9BgKen/l5ubiwYMHMDMzg4EB/xoREZV1/JdcAxwdHQH874GnJA96enqoVKkSAzAR0XuAgUgDFAoFypcvDwcHB7WHhdL7zcjICHp6POtMRPQ+YCDSIH19fc4nISIiKoP431siIiKSPQYiIiIikj0GIiIiIpI9ziEiIiJ63xV2F2pSwxEiIiIikj0GIiIiIpI9BiIiIiKSPQYiIiIikj0GIiIiIpI9BiIiIiKSPQYiIiIikj0GIiIiIpI9BiIiIiKSPQYiIiIikj0GIiIiIpI9BiIiIiKSPQYiIiIikj0GIiIiIpI9BiIiIiKSPQYiIiIikj0GIiIiIpI9BiIiIiKSPQYiIiIikj0GIiIiIpI9BiIiIiKSPQYiIiIikj0GIiIiIpI9BiIiIiKSPQYiIiIikj0GIiIiIpI9A20XQERERDrqYIj6+1ZB2qmjFHCEiIiIiGSPgYiIiIhkj4GIiIiIZI+BiIiIiGRPpwNRTk4OJk+eDBcXF5iamqJKlSoIDg6GEEJqI4TAlClTUL58eZiamsLb2xvXrl1T28/Dhw/h5+cHpVIJa2tr+Pv7IyMjo7QPh4iIiHSUTgeiOXPmYMWKFVi2bBni4uIwZ84czJ07F0uXLpXazJ07F0uWLMHKlSsRExMDc3Nz+Pj44NmzZ1IbPz8/XLp0CZGRkdi1axeOHDmCoUOHauOQiIiISAcpxMvDLTqmY8eOUKlUCA0NlZZ1794dpqamWLduHYQQcHJywtixYzFu3DgAQFpaGlQqFcLDw9G7d2/ExcXBzc0NJ0+ehIeHBwBg37596NChA+7duwcnJ6c31pGeng4rKyukpaVBqVSWzMESERGVlPyXzxdXGbvs/m2+v3V6hKhJkyaIiorC1atXAQDnzp3Dn3/+ifbt2wMAbt26haSkJHh7e0vbWFlZoVGjRoiOjgYAREdHw9raWgpDAODt7Q09PT3ExMQU+rmZmZlIT09XexEREdH7S6dvzDhp0iSkp6ejRo0a0NfXR05ODmbOnAk/Pz8AQFJSEgBApVKpbadSqaR1SUlJcHBwUFtvYGAAW1tbqU1+ISEhmD59uqYPh4iIiHSUTo8Q/fLLL4iIiMD69etx5swZrF69GvPnz8fq1atL9HODgoKQlpYmve7evVuin0dERETapdMjROPHj8ekSZPQu3dvAIC7uzvu3LmDkJAQDBgwAI6OjgCA5ORklC9fXtouOTkZderUAQA4OjoiJSVFbb/Z2dl4+PChtH1+xsbGMDY2LoEjIiIiIl2k0yNET58+hZ6eeon6+vrIzc0FALi4uMDR0RFRUVHS+vT0dMTExMDT0xMA4OnpidTUVJw+fVpqc+DAAeTm5qJRo0alcBRERESk63R6hKhTp06YOXMmKlWqhJo1a+Ls2bNYuHAhBg8eDABQKBQYNWoUZsyYgapVq8LFxQWTJ0+Gk5MTunTpAgBwdXVFu3bt8MUXX2DlypXIyspCYGAgevfuXaQrzIiIiOj9p9OBaOnSpZg8eTKGDx+OlJQUODk54csvv8SUKVOkNhMmTMCTJ08wdOhQpKamomnTpti3bx9MTEykNhEREQgMDESbNm2gp6eH7t27Y8mSJdo4JCIiItJBOn0fIl3B+xAREVGZxvsQle37EBERERGVBgYiIiIikj0GIiIiIpI9nZ5UTURERDqksLlIZWxe0atwhIiIiIhkj4GIiIiIZI+BiIiIiGSPgYiIiIhkj4GIiIiIZI+BiIiIiGSPgYiIiIhkj/chIiIiep9o6rllMsMRIiIiIpI9BiIiIiKSPQYiIiIikj0GIiIiIpI9BiIiIiKSPQYiIiIikj0GIiIiIpI9BiIiIiKSPQYiIiIikj3eqZpK1KLIq2rvR39STUuVEBERvRpHiIiIiEj2GIiIiIhI9hiIiIiISPYYiIiIiEj2GIiIiIhI9hiIiIiISPYYiIiIiEj2GIiIiIhI9hiIiIiISPYYiIiIiEj2+OgOKpb8j+QA+FgOIiIquzhCRERERLLHQERERESyx0BEREREssdARERERLLHQERERESyx0BEREREssfL7qlU8XJ9IiLSRRwhIiIiItnjCBEREVFZdjBE2xW8FzhCRERERLLHQERERESyx1NmREREVHz5T9m1CtJOHe+II0REREQkewxEREREJHsMRERERCR7DEREREQkewxEREREJHvFCkQ3b97UdB1EREREWlOsy+4/+ugjtGjRAv7+/ujRowdMTEw0XReVQYU9p4yIiKgsKNYI0ZkzZ/Dxxx9jzJgxcHR0xJdffokTJ05oujYiIiKiUlGsQFSnTh18//33SExMxM8//4z79++jadOmqFWrFhYuXIgHDx5ouk4iIiKiEvNOk6oNDAzQrVs3bN68GXPmzMH169cxbtw4VKxYEf3798f9+/c1VScRERFRiXmnQHTq1CkMHz4c5cuXx8KFCzFu3DjcuHEDkZGRSExMROfOnTVVJxEREVGJKdak6oULFyIsLAzx8fHo0KED1qxZgw4dOkBP70W+cnFxQXh4OJydnTVZKxEREVGJKNYI0YoVK9C3b1/cuXMH27dvR8eOHaUwlMfBwQGhoaHvXOBff/2Ffv36wc7ODqampnB3d8epU6ek9UIITJkyBeXLl4epqSm8vb1x7do1tX08fPgQfn5+UCqVsLa2hr+/PzIyMt65NtKMRZFX1V5ERESlrVgjRPkDR2GMjIwwYMCA4uxe8ujRI3h5eaFVq1bYu3cv7O3tce3aNdjY2Eht5s6diyVLlmD16tVwcXHB5MmT4ePjg8uXL0u3A/Dz88P9+/cRGRmJrKwsDBo0CEOHDsX69evfqT4iIiJ6PxQrEIWFhcHCwgKfffaZ2vLNmzfj6dOn7xyE8syZMwcVK1ZEWFiYtMzFxUX6sxACixcvxrfffivNV1qzZg1UKhW2b9+O3r17Iy4uDvv27cPJkyfh4eEBAFi6dCk6dOiA+fPnw8nJSSO1EhERUdlVrFNmISEhKFeuXIHlDg4OmDVr1jsXlWfHjh3w8PDAZ599BgcHB9StWxc//fSTtP7WrVtISkqCt7e3tMzKygqNGjVCdHQ0ACA6OhrW1tZSGAIAb29v6OnpISYmRmO1EhERUdlVrECUkJCgNlKTp3LlykhISHjnovLcvHkTK1asQNWqVfHf//4Xw4YNw4gRI7B69WoAQFJSEgBApVKpbadSqaR1SUlJcHBwUFtvYGAAW1tbqU1+mZmZSE9PV3sRERHR+6tYgcjBwQHnz58vsPzcuXOws7N756Ly5Obmol69epg1axbq1q2LoUOH4osvvsDKlSs19hmFCQkJgZWVlfSqWLFiiX4eERERaVex5hD16dMHI0aMgKWlJZo3bw4AOHz4MEaOHInevXtrrLjy5cvDzc1NbZmrqyu2bNkCAHB0dAQAJCcno3z58lKb5ORk1KlTR2qTkpKito/s7Gw8fPhQ2j6/oKAgjBkzRnqfnp4u+1DEq7+IiOh9VqwRouDgYDRq1Aht2rSBqakpTE1N0bZtW7Ru3Vqjc4i8vLwQHx+vtuzq1auoXLkygBcTrB0dHREVFSWtT09PR0xMDDw9PQEAnp6eSE1NxenTp6U2Bw4cQG5uLho1alTo5xobG0OpVKq9iIiI6P1VrBEiIyMjbNq0CcHBwTh37px0f6C8oKIpo0ePRpMmTTBr1iz07NkTJ06cwI8//ogff/wRAKBQKDBq1CjMmDEDVatWlS67d3JyQpcuXQC8GFFq166ddKotKysLgYGB6N27N68wIyIiIgDFDER5qlWrhmrVqmmqlgIaNGiAbdu2ISgoCN999x1cXFywePFi+Pn5SW0mTJiAJ0+eYOjQoUhNTUXTpk2xb98+6R5EABAREYHAwEC0adMGenp66N69O5YsWVJidRMREVHZohBCiLfdKCcnB+Hh4YiKikJKSgpyc3PV1h84cEBjBeqC9PR0WFlZIS0tTbanz0pzDtHoT0ouZBMRvXcOhmi7AnWtgrRdgeRtvr+LNUI0cuRIhIeHw9fXF7Vq1YJCoShWoURERES6oFiBaOPGjfjll1/QoUMHTddDREREVOqKdZWZkZERPvroI03XQkRERKQVxRohGjt2LL7//nssW7aMp8tI4wqbr8R5RUREVJKKFYj+/PNPHDx4EHv37kXNmjVhaGiotn7r1q0aKY6IiIioNBQrEFlbW6Nr166aroWIiIhIK4oViMLCwjRdBxEREZHWFPvGjNnZ2Th06BBu3LiBvn37wtLSEomJiVAqlbCwsNBkjURERFRWFHZfJB26N9GrFCsQ3blzB+3atUNCQgIyMzPxySefwNLSEnPmzEFmZmaJP42eiIiISJOKddn9yJEj4eHhgUePHsHU1FRa3rVrV7UHrRIRERGVBcUaIfrjjz9w7NgxGBkZqS13dnbGX3/9pZHCiIiIiEpLsUaIcnNzkZOTU2D5vXv3YGlp+c5FEREREZWmYgWitm3bYvHixdJ7hUKBjIwMTJ06lY/zICIiojKnWKfMFixYAB8fH7i5ueHZs2fo27cvrl27hnLlymHDhg2arpGIiIioRBUrEFWoUAHnzp3Dxo0bcf78eWRkZMDf3x9+fn5qk6yJiIiIyoJi34fIwMAA/fr102QtRERERFpRrEC0Zs2a167v379/sYohIiIi0oZiBaKRI0eqvc/KysLTp09hZGQEMzMzBiIiIiIqU4p1ldmjR4/UXhkZGYiPj0fTpk05qZqIiIjKnGLPIcqvatWqmD17Nvr164crV65oardEAIBFkVfV3o/+pJqWKiEiovdRsUaIXsXAwACJiYma3CURERFRiSvWCNGOHTvU3gshcP/+fSxbtgxeXl4aKYyIiIiotBQrEHXp0kXtvUKhgL29PVq3bo0FCxZooi4iIiKiUlOsQJSbm6vpOoiIiIi0RqNziIiIiIjKomKNEI0ZM6bIbRcuXFicjyAiIiIqNcUKRGfPnsXZs2eRlZWF6tWrAwCuXr0KfX191KtXT2qnUCg0UyURERFRCSpWIOrUqRMsLS2xevVq2NjYAHhxs8ZBgwahWbNmGDt2rEaLpNKV/54/RERE77tizSFasGABQkJCpDAEADY2NpgxYwavMiMiIqIyp1iBKD09HQ8ePCiw/MGDB3j8+PE7F0VERERUmooViLp27YpBgwZh69atuHfvHu7du4ctW7bA398f3bp103SNRERERCWqWHOIVq5ciXHjxqFv377Iysp6sSMDA/j7+2PevHkaLZCoMIXNc+LzzYiIqLiKFYjMzMywfPlyzJs3Dzdu3AAAVKlSBebm5hotjoiIiKg0vNONGe/fv4/79++jatWqMDc3hxBCU3URERERlZpiBaJ//vkHbdq0QbVq1dChQwfcv38fAODv789L7omIiKjMKVYgGj16NAwNDZGQkAAzMzNpea9evbBv3z6NFUdERERUGoo1h2j//v3473//iwoVKqgtr1q1Ku7cuaORwoiIiIhKS7FGiJ48eaI2MpTn4cOHMDY2fueiiIiIiEpTsUaImjVrhjVr1iA4OBjAi2eW5ebmYu7cuWjVqpVGCyQiIqL/72CItit4bxUrEM2dOxdt2rTBqVOn8Pz5c0yYMAGXLl3Cw4cPcfToUU3XSERERFSiinXKrFatWrh69SqaNm2Kzp0748mTJ+jWrRvOnj2LKlWqaLpGIiIiohL11iNEWVlZaNeuHVauXIlvvvmmJGoiIiIiKlVvPUJkaGiI8+fPl0QtRERERFpRrFNm/fr1Q2hoqKZrISIiItKKYk2qzs7Oxs8//4zff/8d9evXL/AMs4ULF2qkOCIiIqLS8FaB6ObNm3B2dsbFixdRr149AMDVq+pPHVcoFJqrjoiIiKgUvFUgqlq1Ku7fv4+DBw8CePGojiVLlkClUpVIcURERESl4a3mEOV/mv3evXvx5MkTjRZEREREVNqKNak6T/6ARERERFQWvVUgUigUBeYIcc4QERERlXVvNYdICIGBAwdKD3B99uwZvvrqqwJXmW3dulVzFRIV0aJI9Qn+oz+ppqVKiIiorHmrQDRgwAC19/369dNoMaQd+YMEERGR3LxVIAoLCyupOoiIiIi05p0mVRMRERG9DxiIiIiISPYYiIiIiEj2GIiIiIhI9hiIiIiISPbKVCCaPXs2FAoFRo0aJS179uwZAgICYGdnBwsLC3Tv3h3Jyclq2yUkJMDX1xdmZmZwcHDA+PHjkZ2dXcrVExERka4qM4Ho5MmTWLVqFT7++GO15aNHj8bOnTuxefNmHD58GImJiejWrZu0PicnB76+vnj+/DmOHTuG1atXIzw8HFOmTCntQyAiIiIdVSYCUUZGBvz8/PDTTz/BxsZGWp6WlobQ0FAsXLgQrVu3Rv369REWFoZjx47h+PHjAID9+/fj8uXLWLduHerUqYP27dsjODgYP/zwA54/f66tQyIiIiIdUiYCUUBAAHx9feHt7a22/PTp08jKylJbXqNGDVSqVAnR0dEAgOjoaLi7u0OlUkltfHx8kJ6ejkuXLhX6eZmZmUhPT1d7ERER0fvrre5UrQ0bN27EmTNncPLkyQLrkpKSYGRkBGtra7XlKpUKSUlJUpuXw1De+rx1hQkJCcH06dM1UD0RERGVBTo9QnT37l2MHDkSERERMDExKbXPDQoKQlpamvS6e/duqX02ERERlT6dDkSnT59GSkoK6tWrBwMDAxgYGODw4cNYsmQJDAwMoFKp8Pz5c6Smpqptl5ycDEdHRwCAo6NjgavO8t7ntcnP2NgYSqVS7UVERETvL50ORG3atMGFCxcQGxsrvTw8PODn5yf92dDQEFFRUdI28fHxSEhIgKenJwDA09MTFy5cQEpKitQmMjISSqUSbm5upX5MREREpHt0eg6RpaUlatWqpbbM3NwcdnZ20nJ/f3+MGTMGtra2UCqV+Prrr+Hp6YnGjRsDANq2bQs3Nzd8/vnnmDt3LpKSkvDtt98iICAAxsbGpX5MREREpHt0OhAVxaJFi6Cnp4fu3bsjMzMTPj4+WL58ubReX18fu3btwrBhw+Dp6Qlzc3MMGDAA3333nRarJiIiIl2iEEIIbReh69LT02FlZYW0tLT3cj7Rosir2i6hRIz+pJq2SyAi0qyDIdquQDNaBZXKx7zN97dOzyEiIiIiKg0MRERERCR7DEREREQkewxEREREJHsMRERERCR7DEREREQkewxEREREJHsMRERERCR7DEREREQkewxEREREJHsMRERERCR7DEREREQkewxEREREJHsMRERERCR7DEREREQkewbaLoCopCyKvFpg2ehPqmmhEiIi0nUMRERERLrqYIi2K5ANnjIjIiIi2WMgIiIiItljICIiIiLZYyAiIiIi2WMgIiIiItnjVWZERERUugq7eq5VUOnX8RKOEBEREZHsMRARERGR7DEQERERkewxEBEREZHscVK1zBT2fC8iIiK54wgRERERyR4DEREREckeAxERERHJHucQkawUNodq9CfVtFAJERHpEo4QERERkewxEBEREZHsMRARERGR7DEQERERkewxEBEREZHs8Sozkr38V57xqjMiIvnhCBERERHJHgMRERERyR4DEREREckeAxERERHJHgMRERERyR4DEREREckeAxERERHJHgMRERERyR4DEREREckeAxERERHJHgMRERERyR4DEREREckeAxERERHJHgMRERERyR4DEREREckeAxERERHJHgMRERERyR4DEREREcmegbYLICIiIgAHQ7RdgaxxhIiIiIhkT6cDUUhICBo0aABLS0s4ODigS5cuiI+PV2vz7NkzBAQEwM7ODhYWFujevTuSk5PV2iQkJMDX1xdmZmZwcHDA+PHjkZ2dXZqHQkRERDpMpwPR4cOHERAQgOPHjyMyMhJZWVlo27Ytnjx5IrUZPXo0du7cic2bN+Pw4cNITExEt27dpPU5OTnw9fXF8+fPcezYMaxevRrh4eGYMmWKNg6JiIiIdJBCCCG0XURRPXjwAA4ODjh8+DCaN2+OtLQ02NvbY/369ejRowcA4MqVK3B1dUV0dDQaN26MvXv3omPHjkhMTIRKpQIArFy5EhMnTsSDBw9gZGT0xs9NT0+HlZUV0tLSoFQqS/QYNW1R5FVtl1DmjP6kmrZLICI5kvscolZBGt/l23x/l6lJ1WlpaQAAW1tbAMDp06eRlZUFb29vqU2NGjVQqVIlKRBFR0fD3d1dCkMA4OPjg2HDhuHSpUuoW7dugc/JzMxEZmam9D49Pb2kDol0UGEhkiGJiOj9ptOnzF6Wm5uLUaNGwcvLC7Vq1QIAJCUlwcjICNbW1mptVSoVkpKSpDYvh6G89XnrChMSEgIrKyvpVbFiRQ0fDREREemSMhOIAgICcPHiRWzcuLHEPysoKAhpaWnS6+7duyX+mURERKQ9ZeKUWWBgIHbt2oUjR46gQoUK0nJHR0c8f/4cqampaqNEycnJcHR0lNqcOHFCbX95V6HltcnP2NgYxsbGGj4KIiIi0lU6PUIkhEBgYCC2bduGAwcOwMXFRW19/fr1YWhoiKioKGlZfHw8EhIS4OnpCQDw9PTEhQsXkJKSIrWJjIyEUqmEm5tb6RwIERER6TSdHiEKCAjA+vXr8dtvv8HS0lKa82NlZQVTU1NYWVnB398fY8aMga2tLZRKJb7++mt4enqicePGAIC2bdvCzc0Nn3/+OebOnYukpCR8++23CAgI4CgQERERAdDxQLRixQoAQMuWLdWWh4WFYeDAgQCARYsWQU9PD927d0dmZiZ8fHywfPlyqa2+vj527dqFYcOGwdPTE+bm5hgwYAC+++670joMIiIi0nE6HYiKcoskExMT/PDDD/jhhx9e2aZy5crYs2ePJksjIiKi94hOzyEiIiIiKg06PUJEpCvy36yRN2okInq/cISIiIiIZI+BiIiIiGSPgYiIiIhkj4GIiIiIZI+BiIiIiGSPgYiIiIhkj4GIiIiIZI+BiIiIiGSPgYiIiIhkj3eqJiqG/HeuBnj3aiKisowjRERERCR7DEREREQkewxEREREJHsMRERERCR7DEREREQkewxEREREJHsMRERERCR7vA/Re6Swe+MQERHRm3GEiIiIiGSPgYiIiIhkj4GIiIiIZI9ziIg0JP8cLj7bjIio7OAIEREREckeAxERERHJHgMRERERyR7nEBEREWnDwRBtV0AvYSAiKiFFuVEmJ14TEekGnjIjIiIi2eMIEZEWFTaKxFEjIqLSxxEiIiIikj0GIiIiIpI9BiIiIiKSPQYiIiIikj0GIiIiIpI9BiIiIiKSPQYiIiIikj3eh4hIx/DeREREpY+BqAwryqMhiIiI6M14yoyIiIhkj4GIiIiIZI+BiIiIiGSPgYiIiIhkj5OqiYiIStrBEG1XQG/AESIiIiKSPY4QEZUB+W+xwPsSERFpFkeIiIiISPY4QkRUBvFu1kREmsVARPSeKMqdyxmaiIgKx1NmREREJHscISKSEU7OJiIqHAMRkYzxNBtRCeA9h8oknjIjIiIi2eMIERFpBE/HEVFZxkBERK/FS/yJSA4YiMqIosz1ICprihu2OBpFRJomq0D0ww8/YN68eUhKSkLt2rWxdOlSNGzYUNtlEZU5RQno2h5Z0vbnE1HZIptAtGnTJowZMwYrV65Eo0aNsHjxYvj4+CA+Ph4ODg7aLo9IFt7XkU6GL5njVWXvBYUQQmi7iNLQqFEjNGjQAMuWLQMA5ObmomLFivj6668xadKk126bnp4OKysrpKWlQalUlka57+0XB9HbKixYlOTfj+IEGU3evoCnA8sgBiLNaBWk8V2+zfe3LEaInj9/jtOnTyMo6H+draenB29vb0RHR2uxshcYfoherbT/fpTm5/HUYxnE8PPekkUg+vvvv5GTkwOVSqW2XKVS4cqVKwXaZ2ZmIjMzU3qflpYG4EXSLAnPnmSUyH6JSHeEbD+jk/vS9c8PaP3RG9v8cOB6we0MflNf0HzsG7crsA2AE7cfvvHzGzrbvvU2hW1XVhWnjwpVAt+xed/bRTkZJotA9LZCQkIwffr0AssrVqyohWqIiOTr/zS23Xcl9lmkKW/+GRXX48ePYWVl9do2sghE5cqVg76+PpKTk9WWJycnw9HRsUD7oKAgjBkzRnqfm5uLhw8fws7ODgqFosTr1ab09HRUrFgRd+/eLbX5Uu8r9qVmsT81h32pOexLzdJ0fwoh8PjxYzg5Ob2xrSwCkZGREerXr4+oqCh06dIFwIuQExUVhcDAwALtjY2NYWxsrLbM2tq6FCrVHUqlkn+5NYR9qVnsT81hX2oO+1KzNNmfbxoZyiOLQAQAY8aMwYABA+Dh4YGGDRti8eLFePLkCQYNGqTt0oiIiEjLZBOIevXqhQcPHmDKlClISkpCnTp1sG/fvgITrYmIiEh+ZBOIACAwMLDQU2T0P8bGxpg6dWqBU4b09tiXmsX+1Bz2peawLzVLm/0pmxszEhEREb2KnrYLICIiItI2BiIiIiKSPQYiIiIikj0GIiIiIpI9BiIZCAkJQYMGDWBpaQkHBwd06dIF8fHxam2ePXuGgIAA2NnZwcLCAt27dy9wZ++EhAT4+vrCzMwMDg4OGD9+PLKzs0vzUHTO7NmzoVAoMGrUKGkZ+7Lo/vrrL/Tr1w92dnYwNTWFu7s7Tp06Ja0XQmDKlCkoX748TE1N4e3tjWvXrqnt4+HDh/Dz84NSqYS1tTX8/f2RkSG/5wPm5ORg8uTJcHFxgampKapUqYLg4GC1ZzixPwt35MgRdOrUCU5OTlAoFNi+fbvaek312/nz59GsWTOYmJigYsWKmDt3bkkfmla8rj+zsrIwceJEuLu7w9zcHE5OTujfvz8SExPV9qGV/hT03vPx8RFhYWHi4sWLIjY2VnTo0EFUqlRJZGRkSG2++uorUbFiRREVFSVOnTolGjduLJo0aSKtz87OFrVq1RLe3t7i7NmzYs+ePaJcuXIiKChIG4ekE06cOCGcnZ3Fxx9/LEaOHCktZ18WzcOHD0XlypXFwIEDRUxMjLh586b473//K65fvy61mT17trCyshLbt28X586dE59++qlwcXER//77r9SmXbt2onbt2uL48ePijz/+EB999JHo06ePNg5Jq2bOnCns7OzErl27xK1bt8TmzZuFhYWF+P7776U27M/C7dmzR3zzzTdi69atAoDYtm2b2npN9FtaWppQqVTCz89PXLx4UWzYsEGYmpqKVatWldZhlprX9Wdqaqrw9vYWmzZtEleuXBHR0dGiYcOGon79+mr70EZ/MhDJUEpKigAgDh8+LIR48QtqaGgoNm/eLLWJi4sTAER0dLQQ4sUvuJ6enkhKSpLarFixQiiVSpGZmVm6B6ADHj9+LKpWrSoiIyNFixYtpEDEviy6iRMniqZNm75yfW5urnB0dBTz5s2TlqWmpgpjY2OxYcMGIYQQly9fFgDEyZMnpTZ79+4VCoVC/PXXXyVXvA7y9fUVgwcPVlvWrVs34efnJ4RgfxZV/i9wTfXb8uXLhY2Njdrf8YkTJ4rq1auX8BFpV2EBM78TJ04IAOLOnTtCCO31J0+ZyVBaWhoAwNbWFgBw+vRpZGVlwdvbW2pTo0YNVKpUCdHR0QCA6OhouLu7q93Z28fHB+np6bh06VIpVq8bAgIC4Ovrq9ZnAPvybezYsQMeHh747LPP4ODggLp16+Knn36S1t+6dQtJSUlqfWllZYVGjRqp9aW1tTU8PDykNt7e3tDT00NMTEzpHYwOaNKkCaKionD16lUAwLlz5/Dnn3+iffv2ANifxaWpfouOjkbz5s1hZGQktfHx8UF8fDwePXpUSkejm9LS0qBQKKRnhmqrP2V1p2p68VDbUaNGwcvLC7Vq1QIAJCUlwcjIqMADbFUqFZKSkqQ2+R9zkvc+r41cbNy4EWfOnMHJkycLrGNfFt3NmzexYsUKjBkzBv/3f/+HkydPYsSIETAyMsKAAQOkviisr17uSwcHB7X1BgYGsLW1lVVfAsCkSZOQnp6OGjVqQF9fHzk5OZg5cyb8/PwAgP1ZTJrqt6SkJLi4uBTYR946GxubEqlf1z179gwTJ05Enz59pIe5aqs/GYhkJiAgABcvXsSff/6p7VLKpLt372LkyJGIjIyEiYmJtssp03Jzc+Hh4YFZs2YBAOrWrYuLFy9i5cqVGDBggJarK3t++eUXREREYP369ahZsyZiY2MxatQoODk5sT9JJ2VlZaFnz54QQmDFihXaLodXmclJYGAgdu3ahYMHD6JChQrSckdHRzx//hypqalq7ZOTk+Ho6Ci1yX+lVN77vDZycPr0aaSkpKBevXowMDCAgYEBDh8+jCVLlsDAwAAqlYp9WUTly5eHm5ub2jJXV1ckJCQA+F9fFNZXL/dlSkqK2vrs7Gw8fPhQVn0JAOPHj8ekSZPQu3dvuLu74/PPP8fo0aMREhICgP1ZXJrqN/69V5cXhu7cuYPIyEhpdAjQXn8yEMmAEAKBgYHYtm0bDhw4UGCYsX79+jA0NERUVJS0LD4+HgkJCfD09AQAeHp64sKFC2q/pHm/xPm/1N5nbdq0wYULFxAbGyu9PDw84OfnJ/2ZfVk0Xl5eBW7/cPXqVVSuXBkA4OLiAkdHR7W+TE9PR0xMjFpfpqam4vTp01KbAwcOIDc3F40aNSqFo9AdT58+hZ6e+j/p+vr6yM3NBcD+LC5N9ZunpyeOHDmCrKwsqU1kZCSqV68uu9NleWHo2rVr+P3332FnZ6e2Xmv9Wezp2FRmDBs2TFhZWYlDhw6J+/fvS6+nT59Kbb766itRqVIlceDAAXHq1Cnh6ekpPD09pfV5l4q3bdtWxMbGin379gl7e3vZXSpemJevMhOCfVlUJ06cEAYGBmLmzJni2rVrIiIiQpiZmYl169ZJbWbPni2sra3Fb7/9Js6fPy86d+5c6OXOdevWFTExMeLPP/8UVatWfe8vEy/MgAEDxAcffCBddr9161ZRrlw5MWHCBKkN+7Nwjx8/FmfPnhVnz54VAMTChQvF2bNnpaueNNFvqampQqVSic8//1xcvHhRbNy4UZiZmb2Xl92/rj+fP38uPv30U1GhQgURGxur9p308hVj2uhPBiIZAFDoKywsTGrz77//iuHDhwsbGxthZmYmunbtKu7fv6+2n9u3b4v27dsLU1NTUa5cOTF27FiRlZVVykeje/IHIvZl0e3cuVPUqlVLGBsbixo1aogff/xRbX1ubq6YPHmyUKlUwtjYWLRp00bEx8ertfnnn39Enz59hIWFhVAqlWLQoEHi8ePHpXkYOiE9PV2MHDlSVKpUSZiYmIgPP/xQfPPNN2pfMuzPwh08eLDQfyMHDBgghNBcv507d040bdpUGBsbiw8++EDMnj27tA6xVL2uP2/duvXK76SDBw9K+9BGfyqEeOk2pkREREQyxDlEREREJHsMRERERCR7DEREREQkewxEREREJHsMRERERCR7DEREREQkewxEREREJHsMREQka87Ozli8eLG2yyAiLWMgIqJ3Fh0dDX19ffj6+mq7FCKiYmEgIqJ3Fhoaiq+//hpHjhxBYmKitst5r738MEsi0hwGIiJ6JxkZGdi0aROGDRsGX19fhIeHq60/dOgQFAoFoqKi4OHhATMzMzRp0qTAk+5XrFiBKlWqwMjICNWrV8fatWvV1isUCqxatQodO3aEmZkZXF1dER0djevXr6Nly5YwNzdHkyZNcOPGDWmbGzduoHPnzlCpVLCwsECDBg3w+++/v/JYBg8ejI4dO6oty8rKgoODA0JDQwvd5s6dO+jUqRNsbGxgbm6OmjVrYs+ePdL6S5cuoWPHjlAqlbC0tESzZs2kGnNzc/Hdd9+hQoUKMDY2Rp06dbBv3z5p29u3b0OhUGDTpk1o0aIFTExMEBERAQD4z3/+A1dXV5iYmKBGjRpYvnz5K4+LiIrgnZ6ERkSyFxoaKjw8PIQQLx7WWqVKFZGbmyutz3vQY6NGjcShQ4fEpUuXRLNmzUSTJk2kNlu3bhWGhobihx9+EPHx8WLBggVCX19fHDhwQGoDQHzwwQdi06ZNIj4+XnTp0kU4OzuL1q1bi3379onLly+Lxo0bi3bt2knbxMbGipUrV4oLFy6Iq1evim+//VaYmJhITzEXQojKlSuLRYsWCSGEOHr0qNDX1xeJiYlqtZmbm7/yAae+vr7ik08+EefPnxc3btwQO3fuFIcPHxZCCHHv3j1ha2srunXrJk6ePCni4+PFzz//LK5cuSKEEGLhwoVCqVSKDRs2iCtXrogJEyYIQ0NDcfXqVSGEkB6E6ezsLLZs2SJu3rwpEhMTxbp160T58uWlZVu2bBG2trYiPDy8WD9DIuLT7onoHTVp0kQsXrxYCCFEVlaWKFeunNpTq/MC0e+//y4t2717twAg/v33X2kfX3zxhdp+P/vsM9GhQwfpPQDx7bffSu+jo6MFABEaGiot27BhgzAxMXltvTVr1hRLly6V3r8ciIQQws3NTcyZM0d636lTJzFw4MBX7s/d3V1Mmzat0HVBQUHCxcVFPH/+vND1Tk5OYubMmWrLGjRoIIYPHy6E+F8gyuvfPFWqVBHr169XWxYcHCw8PT1fWScRvR5PmRFRscXHx+PEiRPo06cPAMDAwAC9evUq9PTSxx9/LP25fPnyAICUlBQAQFxcHLy8vNTae3l5IS4u7pX7UKlUAAB3d3e1Zc+ePUN6ejqAF6fzxo0bB1dXV1hbW8PCwgJxcXFISEh45TENGTIEYWFhAIDk5GTs3bsXgwcPfmX7ESNGYMaMGfDy8sLUqVNx/vx5aV1sbCyaNWsGQ0PDAtulp6cjMTGxSMft4eEh/fnJkye4ceMG/P39YWFhIb1mzJihdrqQiN6OgbYLIKKyKzQ0FNnZ2XBycpKWCSFgbGyMZcuWwcrKSlr+cihQKBQAXsyheRuF7eN1+x03bhwiIyMxf/58fPTRRzA1NUWPHj3w/PnzV35G//79MWnSJERHR+PYsWNwcXFBs2bNXtl+yJAh8PHxwe7du7F//36EhIRgwYIF+Prrr2FqavpWx/cq5ubm0p8zMjIAAD/99BMaNWqk1k5fX18jn0ckRxwhIqJiyc7Oxpo1a7BgwQLExsZKr3PnzsHJyQkbNmwo8r5cXV1x9OhRtWVHjx6Fm5vbO9V49OhRDBw4EF27doW7uzscHR1x+/bt125jZ2eHLl26ICwsDOHh4Rg0aNAbP6dixYr46quvsHXrVowdOxY//fQTgBcjWn/88UehV4YplUo4OTm99XGrVCo4OTnh5s2b+Oijj9ReLi4ub6yViArHESIiKpZdu3bh0aNH8Pf3VxsJAoDu3bsjNDQUX331VZH2NX78ePTs2RN169aFt7c3du7cia1bt772irCiqFq1KrZu3YpOnTpBoVBg8uTJRRqVGjJkCDp27IicnBwMGDDgtW1HjRqF9u3bo1q1anj06BEOHjwIV1dXAEBgYCCWLl2K3r17IygoCFZWVjh+/DgaNmyI6tWrY/z48Zg6dSqqVKmCOnXqICwsDLGxsdKVZK8yffp0jBgxAlZWVmjXrh0yMzNx6tQpPHr0CGPGjCl6BxGRhIGIiIolNDQU3t7eBcIQ8CIQzZ07V20+zet06dIF33//PebPn4+RI0fCxcUFYWFhaNmy5TvVuHDhQgwePBhNmjRBuXLlMHHiRGl+0et4e3ujfPnyqFmzptrpwMLk5OQgICAA9+7dg1KpRLt27bBo0SIAL0abDhw4gPHjx6NFixbQ19dHnTp1pHlDI0aMQFpaGsaOHYuUlBS4ublhx44dqFq16ms/c8iQITAzM8O8efMwfvx4mJubw93dHaNGjSpaxxBRAQohhNB2EUREuiQjIwMffPABwsLC0K1bN22XQ0SlgCNERET/X25uLv7++28sWLAA1tbW+PTTT7VdEhGVEgYiIqL/LyEhAS4uLqhQoQLCw8NhYMB/IonkgqfMiIiISPZ42T0RERHJHgMRERERyR4DEREREckeAxERERHJHgMRERERyR4DEREREckeAxERERHJHgMRERERyR4DEREREcne/wMFkw0+GBBMAgAAAABJRU5ErkJggg==", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "scripts.eval_detector(\n", - " scripts.EvalDetectorConfig(\n", - " path=Path(\"logs/demo/detector\"),\n", - " task=tasks.CustomTask(\n", - " # TODO: this won't actually be used, plausibly Tasks should be split better\n", - " # into their training and test data.\n", - " train_data=data.MNIST(),\n", - " # Our anomalous data is the backdoor data from above, except we use the\n", - " # MNIST test split.\n", - " anomalous_data=data.BackdoorData(\n", - " original=data.MNIST(train=False),\n", - " backdoor=data.CornerPixelBackdoor(),\n", - " ),\n", - " # Our normal data is MNIST with added noise, this makes the images OOD\n", - " # but they shouldn't be mechanistically anomalous.\n", - " normal_test_data=data.MNIST(\n", - " train=False,\n", - " transforms={\n", - " \"to_tensor\": data.ToTensor(),\n", - " \"noise\": data.GaussianNoise(0.3),\n", - " },\n", - " ),\n", - " model=models.StoredModel(Path(\"logs/demo/classifier\")),\n", - " ),\n", - " )\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As we can see, adding noise did make the images quite a bit more \"anomalous\" according to our detector (the blue histogram has shifted to the right to higher anomaly scores). But we still have a very clear separation between these \"merely noisy\" inputs and the backdoored inputs. (This is a very easy to detect backdoor.)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "cupbearer", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.9" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/notebooks/simple_demo.ipynb b/notebooks/simple_demo.ipynb new file mode 100644 index 00000000..3f6a2bc6 --- /dev/null +++ b/notebooks/simple_demo.ipynb @@ -0,0 +1,514 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "from datetime import datetime\n", + "from pathlib import Path\n", + "\n", + "from cupbearer import data, detectors, models, scripts, tasks, utils" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "def get_path(base=\"logs\", time=True):\n", + " if time:\n", + " timestamp = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n", + " else:\n", + " timestamp = datetime.now().strftime(\"%Y-%m-%d\")\n", + " return Path(base) / timestamp" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Training a backdoored classifier\n", + "First, we train a classifier on poisoned data:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "train_data = data.MNIST()\n", + "val_data = data.MNIST(train=False)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "model = models.MLP(input_shape=(28, 28), hidden_dims=[128, 128], output_dim=10)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True (mps), used: True\n", + "TPU available: False, using: 0 TPU cores\n", + "IPU available: False, using: 0 IPUs\n", + "HPU available: False, using: 0 HPUs\n", + "\n", + " | Name | Type | Params\n", + "------------------------------------------------------\n", + "0 | model | MLP | 118 K \n", + "1 | train_accuracy | MulticlassAccuracy | 0 \n", + "2 | val_accuracy | ModuleList | 0 \n", + "3 | test_accuracy | ModuleList | 0 \n", + "------------------------------------------------------\n", + "118 K Trainable params\n", + "0 Non-trainable params\n", + "118 K Total params\n", + "0.473 Total estimated model params size (MB)\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "48219c8359284728a9ec6a2144927c0a", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Sanity Checking: | | 0/? [00:00┏━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n", + "┃ Test metric DataLoader 0 ┃\n", + "┡━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━┩\n", + "│ test/acc_epoch 0.9520999789237976 │\n", + "│ test/acc_step 0.9520999789237976 │\n", + "│ test/loss 0.15424881875514984 │\n", + "└───────────────────────────┴───────────────────────────┘\n", + "\n" + ], + "text/plain": [ + "┏━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n", + "┃\u001b[1m \u001b[0m\u001b[1m Test metric \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1m DataLoader 0 \u001b[0m\u001b[1m \u001b[0m┃\n", + "┡━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━┩\n", + "│\u001b[36m \u001b[0m\u001b[36m test/acc_epoch \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m 0.9520999789237976 \u001b[0m\u001b[35m \u001b[0m│\n", + "│\u001b[36m \u001b[0m\u001b[36m test/acc_step \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m 0.9520999789237976 \u001b[0m\u001b[35m \u001b[0m│\n", + "│\u001b[36m \u001b[0m\u001b[36m test/loss \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m 0.15424881875514984 \u001b[0m\u001b[35m \u001b[0m│\n", + "└───────────────────────────┴───────────────────────────┘\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "scripts.eval_classifier(\n", + " scripts.EvalClassifierConfig(path=classifier_path, data=val_data, model=model)\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "These results will also have been stored to `logs/demo/classifier/eval.json` if we want to process them further (e.g. to compare many runs):" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[{'test/loss': 0.15424881875514984, 'test/acc_step': 0.9520999789237976, 'test/acc_epoch': 0.9520999789237976}]\n" + ] + } + ], + "source": [ + "with open(classifier_path / \"eval.json\") as f:\n", + " print(json.load(f))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Training a backdoor detector\n", + "We'll train a very simple detector using the Mahalanobis distance. Our model is still in memory, but just for demonstration let's load it again:" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "models.load(model, classifier_path)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 15/15 [00:06<00:00, 2.30it/s]\n", + "\u001b[32m2024-02-29 22:14:34.794\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mcupbearer.detectors.anomaly_detector\u001b[0m:\u001b[36msave_weights\u001b[0m:\u001b[36m228\u001b[0m - \u001b[1mSaving detector to logs/demo/detector/2024-02-29_22-14-27/detector\u001b[0m\n", + "\u001b[32m2024-02-29 22:14:35.134\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mcupbearer.detectors.anomaly_detector\u001b[0m:\u001b[36meval\u001b[0m:\u001b[36m149\u001b[0m - \u001b[1mAUC_ROC: 1.0000\u001b[0m\n", + "\u001b[32m2024-02-29 22:14:35.135\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mcupbearer.detectors.anomaly_detector\u001b[0m:\u001b[36meval\u001b[0m:\u001b[36m150\u001b[0m - \u001b[1mAP: 1.0000\u001b[0m\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjsAAAHHCAYAAABZbpmkAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAABR8ElEQVR4nO3dd1gUV/8+/nvpdRdBYCUiEGMBxYoFeyGiorHGEg3Y4hMFu0b5JpbYMPZobMlDQCOWmGgSa0RsiWKN2MWGoo8UE4UVDf38/vDHfFxBhXVhcbxf17XX5Z45M/Oeg4Y7Z5pCCCFAREREJFNGhi6AiIiIqDQx7BAREZGsMewQERGRrDHsEBERkawx7BAREZGsMewQERGRrDHsEBERkawx7BAREZGsMewQERGRrDHsEFEhCoUCM2bMMHQZb4TIyEgoFArcunVLamvTpg3atGlTJvt//mc1Y8YMKBQK/P3332Wyf3d3dwwaNKhM9kWkK4YdIh2sXLkSCoUCTZo0MXQpJBNHjx7FjBkzkJaWZuhSCinPtREVh4mhCyB6E0VFRcHd3R0nTpzA9evX8d577xm6JCpH9u7dW+J1jh49ii+//BKDBg2CnZ1dsdf7999/YWJSuv8pf1lt8fHxMDLi/zdT+ca/oUQllJCQgKNHj2Lx4sVwdHREVFSUoUuiV3j8+HGZ7s/MzAxmZmaltv38/HxkZmYCACwsLEo97LyMubk5TE1NDbZ/ouJg2CEqoaioKFSoUAEBAQHo3bt3kWHn1q1bUCgUWLhwIb799ltUrVoV5ubmaNSoEU6ePFmo//79+9GyZUtYW1vDzs4O3bp1w+XLl7X6FFyLcfXqVQwcOBAqlQqOjo6YOnUqhBC4c+cOunXrBqVSCbVajUWLFmmtn52djWnTpqFhw4ZQqVSwtrZGy5YtceDAgZce74EDB6BQKLBt27ZCyzZs2ACFQoHY2NgXrp+Tk4Mvv/wS1apVg4WFBRwcHNCiRQtER0dr9bty5Qr69OkDR0dHWFpaokaNGvj888+1+pw5cwadOnWCUqmEjY0N2rdvj2PHjmn1KbiG5tChQxg5ciScnJxQuXJlafnu3bulsba1tUVAQAAuXrz40jEocPHiRbRr1w6WlpaoXLkyZs+ejfz8/EL9irpmZ/ny5ahVqxasrKxQoUIF+Pj4YMOGDQCe/mwnTZoEAPDw8IBCodC6DkihUCAkJARRUVGoVasWzM3NsWfPHmlZUddX/f333+jTpw+USiUcHBwwZswYKSAB//d3NDIystC6z27zVbUVdc3OzZs38eGHH8Le3h5WVlZo2rQpdu7cqdXn4MGDUCgU+PHHHzFnzhxUrlwZFhYWaN++Pa5fv16oJqLXwdNYRCUUFRWFnj17wszMDP3798eqVatw8uRJNGrUqFDfDRs24NGjR/jPf/4DhUKB+fPno2fPnrh586b0f8P79u1Dp06d8O6772LGjBn4999/sXz5cjRv3hx//fUX3N3dtbbZt29feHp6Yt68edi5cydmz54Ne3t7rFmzBu3atcNXX32FqKgoTJw4EY0aNUKrVq0AABqNBv/973/Rv39/fPLJJ3j06BHCw8Ph7++PEydOoF69ekUeb5s2beDq6oqoqCj06NGj0FhUrVoVvr6+LxyvGTNmICwsDMOGDUPjxo2h0Whw6tQp/PXXX3j//fcBAOfOnUPLli1hamqK4cOHw93dHTdu3MD27dsxZ84cAE+DRsuWLaFUKvHZZ5/B1NQUa9asQZs2bXDo0KFC10+NHDkSjo6OmDZtmjSz88MPPyAoKAj+/v746quv8OTJE6xatQotWrTAmTNnCo31s5KTk9G2bVvk5uZiypQpsLa2xrfffgtLS8sXrlPgu+++w+jRo9G7d28pdJw7dw7Hjx/HRx99hJ49e+Lq1avYuHEjlixZgooVKwIAHB0dpW3s378fP/74I0JCQlCxYsWX1goAffr0gbu7O8LCwnDs2DEsW7YMDx8+xLp1615Z77OKU9uzUlJS0KxZMzx58gSjR4+Gg4MD1q5diw8++AA//fRTob9D8+bNg5GRESZOnIj09HTMnz8fAwYMwPHjx0tUJ9FLCSIqtlOnTgkAIjo6WgghRH5+vqhcubIYM2aMVr+EhAQBQDg4OIgHDx5I7b/++qsAILZv3y611atXTzg5OYl//vlHajt79qwwMjISgYGBUtv06dMFADF8+HCpLTc3V1SuXFkoFAoxb948qf3hw4fC0tJSBAUFafXNysrSqvPhw4fC2dlZDBkyRKsdgJg+fbr0PTQ0VJibm4u0tDSpLTU1VZiYmGj1K0rdunVFQEDAS/u0atVK2Nraitu3b2u15+fnS3/u3r27MDMzEzdu3JDa7t27J2xtbUWrVq2ktoiICAFAtGjRQuTm5krtjx49EnZ2duKTTz7R2kdycrJQqVSF2p83duxYAUAcP35caktNTRUqlUoAEAkJCVJ769atRevWraXv3bp1E7Vq1Xrp9hcsWFBoOwUACCMjI3Hx4sUilz37Myj4e/LBBx9o9Rs5cqQAIM6ePSuE+L+/oxEREa/c5stqc3Nz0/p7VjBOf/zxh9T26NEj4eHhIdzd3UVeXp4QQogDBw4IAMLT01Pr7+XXX38tAIjz588X2heRrngai6gEoqKi4OzsjLZt2wJ4Ot3ft29fbNq0CXl5eYX69+3bFxUqVJC+t2zZEsDTaX4ASEpKQlxcHAYNGgR7e3upX506dfD+++9j165dhbY5bNgw6c/Gxsbw8fGBEAJDhw6V2u3s7FCjRg1pPwV9C64jyc/Px4MHD5CbmwsfHx/89ddfLz3uwMBAZGVl4aeffpLaNm/ejNzcXAwcOPCl69rZ2eHixYu4du1akcvv37+Pw4cPY8iQIahSpYrWMoVCAQDIy8vD3r170b17d7z77rvS8kqVKuGjjz7Cn3/+CY1Go7XuJ598AmNjY+l7dHQ00tLS0L9/f/z999/Sx9jYGE2aNHnl6bxdu3ahadOmaNy4sdTm6OiIAQMGvHS9gjG4e/dukacwi6t169bw8vIqdv/g4GCt76NGjQKAIv9O6dOuXbvQuHFjtGjRQmqzsbHB8OHDcevWLVy6dEmr/+DBg7Wub3r+3wiRPjDsEBVTXl4eNm3ahLZt2yIhIQHXr1/H9evX0aRJE6SkpCAmJqbQOs//8i4IPg8fPgQA3L59GwBQo0aNQut6enri77//LnRx7fPbVKlUsLCwkE4vPNtesJ8Ca9euRZ06daRrZxwdHbFz506kp6e/9Nhr1qyJRo0aaV2fFBUVhaZNm77yTrSZM2ciLS0N1atXh7e3NyZNmoRz585Jywt+qdWuXfuF27h//z6ePHnywnHKz8/HnTt3tNo9PDy0vheErXbt2sHR0VHrs3fvXqSmpr70OG7fvo1q1aoVai+qpudNnjwZNjY2aNy4MapVq4bg4GAcOXLkles96/njeZXna61atSqMjIy0ngdUGm7fvv3Cn1PB8me96t8IkT7wmh2iYtq/fz+SkpKwadMmbNq0qdDyqKgodOjQQavt2ZmFZwkhdK6jqG0WZz/r16/HoEGD0L17d0yaNAlOTk4wNjZGWFgYbty48cr9BgYGYsyYMbh79y6ysrJw7NgxfPPNN69cr1WrVrhx4wZ+/fVX7N27F//973+xZMkSrF69WmuWSt+ev5am4ELiH374AWq1ulD/0ryjydPTE/Hx8dixYwf27NmDn3/+GStXrsS0adPw5ZdfFmsbxbk26GUKZsle9L1AUTOUpak0/o0QPY9hh6iYoqKi4OTkhBUrVhRatnXrVmzbtg2rV68u0S8lNzc3AE+fVfK8K1euoGLFirC2tta96Gf89NNPePfdd7F161atX3TTp08v1vr9+vXD+PHjsXHjRvz7778wNTVF3759i7Wuvb09Bg8ejMGDByMjIwOtWrXCjBkzMGzYMOm01IULF164vqOjI6ysrF44TkZGRnB1dX1pDVWrVgUAODk5wc/Pr1h1P8vNza3IU3FF1VQUa2tr9O3bF3379kV2djZ69uyJOXPmIDQ0FBYWFi8MH7q6du2a1mzQ9evXkZ+fL13YXDCD8vyDAp+feQFeHIyK4ubm9sKfU8FyorLG01hExfDvv/9i69at6NKlC3r37l3oExISgkePHuG3334r0XYrVaqEevXqYe3atVq/dC5cuIC9e/eic+fOejuGgv+Dfvb/mI8fP/7S28afVbFiRXTq1Anr169HVFQUOnbsWOjUWVH++ecfre82NjZ47733kJWVBeBpkGnVqhW+//57JCYmavUtqNXY2BgdOnTAr7/+qnUaJiUlBRs2bECLFi2gVCpfWoe/vz+USiXmzp2LnJycQsvv37//0vU7d+6MY8eO4cSJE1rrFOc5S8+PgZmZGby8vCCEkGopCLX6ekrx86F8+fLlAIBOnToBAJRKJSpWrIjDhw9r9Vu5cmWhbZWkts6dO+PEiRNaf68eP36Mb7/9Fu7u7iW67ohIXzizQ1QMv/32Gx49eoQPPvigyOVNmzaVHjBY3NmOAgsWLECnTp3g6+uLoUOHSreeq1Qqvb6fqkuXLti6dSt69OiBgIAAJCQkYPXq1fDy8kJGRkaxthEYGIjevXsDAGbNmlWsdby8vNCmTRs0bNgQ9vb2OHXqFH766SeEhIRIfZYtW4YWLVqgQYMGGD58ODw8PHDr1i3s3LkTcXFxAIDZs2cjOjoaLVq0wMiRI2FiYoI1a9YgKysL8+fPf2UdSqUSq1atwscff4wGDRqgX79+cHR0RGJiInbu3InmzZu/9LTcZ599hh9++AEdO3bEmDFjpFvP3dzctK5BKkqHDh2gVqvRvHlzODs74/Lly/jmm28QEBAAW1tbAEDDhg0BAJ9//jn69esHU1NTdO3aVeeZvYSEBHzwwQfo2LEjYmNjsX79enz00UeoW7eu1GfYsGGYN28ehg0bBh8fHxw+fBhXr14ttK2S1DZlyhRs3LgRnTp1wujRo2Fvb4+1a9ciISEBP//8M5+2TIZhwDvBiN4YXbt2FRYWFuLx48cv7DNo0CBhamoq/v77b+m23gULFhTqh+du6xVCiH379onmzZsLS0tLoVQqRdeuXcWlS5e0+hTcUnz//n2t9qCgIGFtbV1oP61bt9a63Tk/P1/MnTtXuLm5CXNzc1G/fn2xY8cOERQUJNzc3F5ZoxBCZGVliQoVKgiVSiX+/fffF47Fs2bPni0aN24s7OzshKWlpahZs6aYM2eOyM7O1up34cIF0aNHD2FnZycsLCxEjRo1xNSpU7X6/PXXX8Lf31/Y2NgIKysr0bZtW3H06FGtPgW3np88ebLIeg4cOCD8/f2FSqUSFhYWomrVqmLQoEHi1KlTrzyWc+fOidatWwsLCwvxzjvviFmzZonw8PBX3nq+Zs0a0apVK+Hg4CDMzc1F1apVxaRJk0R6errW9mfNmiXeeecdYWRkpLVNACI4OLjImp7/WRX8Pbl06ZLo3bu3sLW1FRUqVBAhISGFfmZPnjwRQ4cOFSqVStja2oo+ffqI1NTUIn/+L6rt+VvPhRDixo0bonfv3tLPsnHjxmLHjh1afQpuPd+yZYtW+8tuiSfSlUIIXgVGRMWTm5sLFxcXdO3aFeHh4YYuh4ioWDifSETF9ssvv+D+/fsIDAw0dClERMXGmR0ieqXjx4/j3LlzmDVrFipWrPjKhxASEZUnnNkholdatWoVRowYAScnpxK/W4mIyNA4s0NERESyxpkdIiIikjWGHSIiIpI1PlQQT9+Zc+/ePdja2ur9ke1ERERUOoQQePToEVxcXF76wEqGHQD37t175Xt1iIiIqHy6c+cOKleu/MLlDDuA9Lj2O3fuvPL9OkRERFQ+aDQauLq6Sr/HX4RhB//3Rl+lUsmwQ0RE9IZ51SUovECZiIiIZI1hh4iIiGSNYYeIiIhkjdfsEBHRGyM/Px/Z2dmGLoPKiKmpKYyNjV97Oww7RET0RsjOzkZCQgLy8/MNXQqVITs7O6jV6td6Dh7DDhERlXtCCCQlJcHY2Biurq4vfYAcyYMQAk+ePEFqaioAoFKlSjpvi2GHiIjKvdzcXDx58gQuLi6wsrIydDlURiwtLQEAqampcHJy0vmUFqMxERGVe3l5eQAAMzMzA1dCZa0g3Obk5Oi8DYYdIiJ6Y/D9hW8fffzMGXaIiIhI1hh2iIiISHLw4EEoFAqkpaUZuhS9MegFyu7u7rh9+3ah9pEjR2LFihXIzMzEhAkTsGnTJmRlZcHf3x8rV66Es7Oz1DcxMREjRozAgQMHYGNjg6CgIISFhcHEhNdeExHJ3ZLoq2W6v3HvVy9R/0GDBmHt2rUICwvDlClTpPZffvkFPXr0gBBC3yVSEQw6s3Py5EkkJSVJn+joaADAhx9+CAAYN24ctm/fji1btuDQoUO4d+8eevbsKa2fl5eHgIAAZGdn4+jRo1i7di0iIyMxbdo0gxwPERHR8ywsLPDVV1/h4cOHetsmH6xYMgYNO46OjlCr1dJnx44dqFq1Klq3bo309HSEh4dj8eLFaNeuHRo2bIiIiAgcPXoUx44dAwDs3bsXly5dwvr161GvXj106tQJs2bNwooVK/gXgYiIygU/Pz+o1WqEhYW9sM/PP/+MWrVqwdzcHO7u7li0aJHWcnd3d8yaNQuBgYFQKpUYPnw4IiMjYWdnhx07dqBGjRqwsrJC79698eTJE6xduxbu7u6oUKECRo8eLd3NBgA//PADfHx8YGtrC7VajY8++kh6lo1clZtrdrKzs7F+/XoMGTIECoUCp0+fRk5ODvz8/KQ+NWvWRJUqVRAbGwsAiI2Nhbe3t9ZpLX9/f2g0Gly8eLHMj4GIiOh5xsbGmDt3LpYvX467d+8WWn769Gn06dMH/fr1w/nz5zFjxgxMnToVkZGRWv0WLlyIunXr4syZM5g6dSoA4MmTJ1i2bBk2bdqEPXv24ODBg+jRowd27dqFXbt24YcffsCaNWvw008/SdvJycnBrFmzcPbsWfzyyy+4desWBg0aVJpDYHDl5sKWX375BWlpadKAJycnw8zMDHZ2dlr9nJ2dkZycLPV5NugULC9Y9iJZWVnIysqSvms0Gj0cQekq6rx0Sc8dExGRYfTo0QP16tXD9OnTER4errVs8eLFaN++vRRgqlevjkuXLmHBggVaIaRdu3aYMGGC9P2PP/5ATk4OVq1ahapVqwIAevfujR9++AEpKSmwsbGBl5cX2rZtiwMHDqBv374AgCFDhkjbePfdd7Fs2TI0atQIGRkZsLGxKa0hMKhyM7MTHh6OTp06wcXFpdT3FRYWBpVKJX1cXV1LfZ9ERPR2++qrr7B27VpcvnxZq/3y5cto3ry5Vlvz5s1x7do1rdNPPj4+hbZpZWUlBR3g6f/wu7u7a4UWZ2dnrdNUp0+fRteuXVGlShXY2tqidevWAJ7e8CNX5SLs3L59G/v27cOwYcOkNrVajezs7EK3vqWkpECtVkt9UlJSCi0vWPYioaGhSE9Plz537tzR05EQEREVrVWrVvD390doaKhO61tbWxdqMzU11fquUCiKbCt4eerjx4/h7+8PpVKJqKgonDx5Etu2bQMg74uey0XYiYiIgJOTEwICAqS2hg0bwtTUFDExMVJbfHw8EhMT4evrCwDw9fXF+fPntRJrdHQ0lEolvLy8Xrg/c3NzKJVKrQ8REVFpmzdvHrZv3y5dewoAnp6eOHLkiFa/I0eOoHr16jq/C+pFrly5gn/++Qfz5s1Dy5YtUbNmTdlfnAyUg2t28vPzERERgaCgIK1n46hUKgwdOhTjx4+Hvb09lEolRo0aBV9fXzRt2hQA0KFDB3h5eeHjjz/G/PnzkZycjC+++ALBwcEwNzc31CEREREVydvbGwMGDMCyZcuktgkTJqBRo0aYNWsW+vbti9jYWHzzzTdYuXKl3vdfpUoVmJmZYfny5fj0009x4cIFzJo1S+/7KW8MPrOzb98+JCYmal0wVWDJkiXo0qULevXqhVatWkGtVmPr1q3ScmNjY+zYsQPGxsbw9fXFwIEDERgYiJkzZ5blIRARERXbzJkzpdNKANCgQQP8+OOP2LRpE2rXro1p06Zh5syZpXKHlKOjIyIjI7FlyxZ4eXlh3rx5WLhwod73U94oBB/fCI1GA5VKhfT09HJ7Sot3YxHR2ywzMxMJCQnw8PCAhYWFocuhMvSyn31xf38bfGaHiIiIqDQx7BAREZGsGfwCZdIdT20RERG9Gmd2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiIqEju7u5YunSpoct4bXzODhERvbkOhJXt/tqG6rRabGwsWrRogY4dO2Lnzp16LopehTM7REREpSw8PByjRo3C4cOHce/ePUOX89Zh2CmnlkRf1foQEdGbKSMjA5s3b8aIESMQEBCAyMhIadnBgwehUCgQExMDHx8fWFlZoVmzZoiPj9faxqpVq1C1alWYmZmhRo0a+OGHH7SWKxQKrFmzBl26dIGVlRU8PT0RGxuL69evo02bNrC2tkazZs1w48YNaZ0bN26gW7ducHZ2ho2NDRo1aoR9+/a99FgSExPRrVs32NjYQKlUok+fPkhJSZGWDxo0CN27d9daZ+zYsWjTpo30/aeffoK3tzcsLS3h4OAAPz8/PH78uJijqRuGHSIiolL0448/ombNmqhRowYGDhyI77//HkIIrT6ff/45Fi1ahFOnTsHExARDhgyRlm3btg1jxozBhAkTcOHCBfznP//B4MGDceDAAa1tzJo1C4GBgYiLi0PNmjXx0Ucf4T//+Q9CQ0Nx6tQpCCEQEhIi9c/IyEDnzp0RExODM2fOoGPHjujatSsSExOLPI78/Hx069YNDx48wKFDhxAdHY2bN2+ib9++xR6LpKQk9O/fH0OGDMHly5dx8OBB9OzZs9B46Buv2SEiIipF4eHhGDhwIACgY8eOSE9Px6FDh7RmO+bMmYPWrVsDAKZMmYKAgABkZmbCwsICCxcuxKBBgzBy5EgAwPjx43Hs2DEsXLgQbdu2lbYxePBg9OnTBwAwefJk+Pr6YurUqfD39wcAjBkzBoMHD5b6161bF3Xr1pW+z5o1C9u2bcNvv/2mFYoKxMTE4Pz580hISICrqysAYN26dahVqxZOnjyJRo0avXIskpKSkJubi549e8LNzQ0A4O3t/epBfE2c2SEiIiol8fHxOHHiBPr37w8AMDExQd++fREeHq7Vr06dOtKfK1WqBABITU0FAFy+fBnNmzfX6t+8eXNcvnz5hdtwdnYGoB0knJ2dkZmZCY1GA+DpzM7EiRPh6ekJOzs72NjY4PLlyy+c2bl8+TJcXV2loAMAXl5esLOzK1TLi9StWxft27eHt7c3PvzwQ3z33Xd4+PBhsdZ9HQw7REREpSQ8PBy5ublwcXGBiYkJTExMsGrVKvz8889IT0+X+pmamkp/VigUAJ6eNiqJorbxsu1OnDgR27Ztw9y5c/HHH38gLi4O3t7eyM7OLuFR/h8jI6NCp6RycnKkPxsbGyM6Ohq7d++Gl5cXli9fjho1aiAhIUHnfRarrlLdOhER0VsqNzcX69atw6JFixAXFyd9zp49CxcXF2zcuLFY2/H09MSRI0e02o4cOQIvL6/Xqu/IkSMYNGgQevToAW9vb6jVaty6deulddy5cwd37tyR2i5duoS0tDSpFkdHRyQlJWmtFxcXp/VdoVCgefPm+PLLL3HmzBmYmZlh27Ztr3Usr8JrdoiIiErBjh078PDhQwwdOhQqlUprWa9evRAeHo4FCxa8cjuTJk1Cnz59UL9+ffj5+WH79u3YunXrK++cepVq1aph69at6Nq1KxQKBaZOnfrS2SQ/Pz94e3tjwIABWLp0KXJzczFy5Ei0bt0aPj4+AIB27dphwYIFWLduHXx9fbF+/XpcuHAB9evXBwAcP34cMTEx6NChA5ycnHD8+HHcv38fnp6er3Usr8KZHSIiolIQHh4OPz+/QkEHeBp2Tp06hXPnzr1yO927d8fXX3+NhQsXolatWlizZg0iIiK0LnDWxeLFi1GhQgU0a9YMXbt2hb+/Pxo0aPDC/gqFAr/++isqVKiAVq1awc/PD++++y42b94s9fH398fUqVPx2WefoVGjRnj06BECAwOl5UqlEocPH0bnzp1RvXp1fPHFF1i0aBE6der0WsfyKgpR2vd7vQE0Gg1UKhXS09OhVCoNXQ4A6PxsnXHvV9dzJUREhpeZmYmEhAR4eHjAwsLC0OVQGXrZz764v785s0NERESyxrBDREREssawQ0RERLLGsENERESyxrBDRERvDN5T8/bRx8+cYYeIiMo9Y2NjAHitp/vSm+nJkycAtJ8GXVJ8qCAREZV7JiYmsLKywv3792FqagojI/6/utwJIfDkyROkpqbCzs5OCry6YNghIqJyT6FQoFKlSkhISMDt27cNXQ6VITs7O6jV6tfaBsMOERG9EczMzFCtWjWeynqLmJqavtaMTgGGHSIiemMYGRnxCcpUYjzpSURERLLGsENERESyxrBDREREssawQ0RERLLGsENERESyxrBDREREssawQ0RERLLGsENERESyxrBDREREsmbwsPO///0PAwcOhIODAywtLeHt7Y1Tp05Jy4UQmDZtGipVqgRLS0v4+fnh2rVrWtt48OABBgwYAKVSCTs7OwwdOhQZGRllfShERERUDhk07Dx8+BDNmzeHqakpdu/ejUuXLmHRokWoUKGC1Gf+/PlYtmwZVq9ejePHj8Pa2hr+/v7IzMyU+gwYMAAXL15EdHQ0duzYgcOHD2P48OGGOCQiIiIqZxRCCGGonU+ZMgVHjhzBH3/8UeRyIQRcXFwwYcIETJw4EQCQnp4OZ2dnREZGol+/frh8+TK8vLxw8uRJ+Pj4AAD27NmDzp074+7du3BxcXllHRqNBiqVCunp6VAqlfo7wNewJPqqTuuNe7+6nishIiIqn4r7+9ugMzu//fYbfHx88OGHH8LJyQn169fHd999Jy1PSEhAcnIy/Pz8pDaVSoUmTZogNjYWABAbGws7Ozsp6ACAn58fjIyMcPz48SL3m5WVBY1Go/UhIiIieTJo2Ll58yZWrVqFatWq4ffff8eIESMwevRorF27FgCQnJwMAHB2dtZaz9nZWVqWnJwMJycnreUmJiawt7eX+jwvLCwMKpVK+ri6uur70IiIiKicMGjYyc/PR4MGDTB37lzUr18fw4cPxyeffILVq1eX6n5DQ0ORnp4ufe7cuVOq+yMiIiLDMWjYqVSpEry8vLTaPD09kZiYCABQq9UAgJSUFK0+KSkp0jK1Wo3U1FSt5bm5uXjw4IHU53nm5uZQKpVaHyIiIpIng4ad5s2bIz4+Xqvt6tWrcHNzAwB4eHhArVYjJiZGWq7RaHD8+HH4+voCAHx9fZGWlobTp09Lffbv34/8/Hw0adKkDI6CiIiIyjMTQ+583LhxaNasGebOnYs+ffrgxIkT+Pbbb/Htt98CABQKBcaOHYvZs2ejWrVq8PDwwNSpU+Hi4oLu3bsDeDoT1LFjR+n0V05ODkJCQtCvX79i3YlFRERE8mbQsNOoUSNs27YNoaGhmDlzJjw8PLB06VIMGDBA6vPZZ5/h8ePHGD58ONLS0tCiRQvs2bMHFhYWUp+oqCiEhISgffv2MDIyQq9evbBs2TJDHBIRERGVMwZ9zk55wefsEBERvXneiOfsEBEREZU2hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjUTQxdA+rUk+qrW93HvVzdQJUREROUDZ3aIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYMGnZmzJgBhUKh9alZs6a0PDMzE8HBwXBwcICNjQ169eqFlJQUrW0kJiYiICAAVlZWcHJywqRJk5Cbm1vWh0JERETllImhC6hVqxb27dsnfTcx+b+Sxo0bh507d2LLli1QqVQICQlBz549ceTIEQBAXl4eAgICoFarcfToUSQlJSEwMBCmpqaYO3dumR8LERERlT8GDzsmJiZQq9WF2tPT0xEeHo4NGzagXbt2AICIiAh4enri2LFjaNq0Kfbu3YtLly5h3759cHZ2Rr169TBr1ixMnjwZM2bMgJmZWVkfDhEREZUzBr9m59q1a3BxccG7776LAQMGIDExEQBw+vRp5OTkwM/PT+pbs2ZNVKlSBbGxsQCA2NhYeHt7w9nZWerj7+8PjUaDixcvvnCfWVlZ0Gg0Wh8iIiKSJ4OGnSZNmiAyMhJ79uzBqlWrkJCQgJYtW+LRo0dITk6GmZkZ7OzstNZxdnZGcnIyACA5OVkr6BQsL1j2ImFhYVCpVNLH1dVVvwdGRERE5YZBT2N16tRJ+nOdOnXQpEkTuLm54ccff4SlpWWp7Tc0NBTjx4+Xvms0GgYeIiIimTL4aaxn2dnZoXr16rh+/TrUajWys7ORlpam1SclJUW6xketVhe6O6vge1HXARUwNzeHUqnU+hAREZE8lauwk5GRgRs3bqBSpUpo2LAhTE1NERMTIy2Pj49HYmIifH19AQC+vr44f/48UlNTpT7R0dFQKpXw8vIq8/qJiIio/DHoaayJEyeia9eucHNzw7179zB9+nQYGxujf//+UKlUGDp0KMaPHw97e3solUqMGjUKvr6+aNq0KQCgQ4cO8PLywscff4z58+cjOTkZX3zxBYKDg2Fubm7IQyMiIqJywqBh5+7du+jfvz/++ecfODo6okWLFjh27BgcHR0BAEuWLIGRkRF69eqFrKws+Pv7Y+XKldL6xsbG2LFjB0aMGAFfX19YW1sjKCgIM2fONNQhERERUTmjEEIIQxdhaBqNBiqVCunp6eXm+p0l0Vf1sp1x71fXy3aIiIjKm+L+/i5X1+wQERER6RvDDhEREckaww4RERHJGsMOERERyRrDDhEREckaww4RERHJGsMOERERyRrDDhEREcmaQZ+gTE/p6wGCREREVBhndoiIiEjWGHaIiIhI1ngai4iISG4OhGl/bxtqmDrKCc7sEBERkawx7BAREZGsMewQERGRrDHsEBERkawx7BAREZGsMewQERGRrDHsEBERkawx7BAREZGsMewQERGRrDHsEBERkawx7BAREZGsMewQERGRrDHsEBERkawx7BAREZGs6RR2bt68qe86iIiIiEqFTmHnvffeQ9u2bbF+/XpkZmbquyYiIiIivdEp7Pz111+oU6cOxo8fD7Vajf/85z84ceKEvmsjIiKi5x0I0/7QK+kUdurVq4evv/4a9+7dw/fff4+kpCS0aNECtWvXxuLFi3H//n1910lERES6ej4gvWUh6bUuUDYxMUHPnj2xZcsWfPXVV7h+/TomTpwIV1dXBAYGIikpSV91EhEREenktcLOqVOnMHLkSFSqVAmLFy/GxIkTcePGDURHR+PevXvo1q2bvuokIiIi0omJListXrwYERERiI+PR+fOnbFu3Tp07twZRkZPs5OHhwciIyPh7u6uz1qJiIjoeW/ZKSld6BR2Vq1ahSFDhmDQoEGoVKlSkX2cnJwQHh7+WsURERERvS6dws61a9de2cfMzAxBQUG6bJ6IiIhIb3S6ZiciIgJbtmwp1L5lyxasXbv2tYsiIiIi0hedwk5YWBgqVqxYqN3JyQlz58597aKIiIiI9EWnsJOYmAgPD49C7W5ubkhMTHztooiIiIj0Raew4+TkhHPnzhVqP3v2LBwcHF67KCIiIiJ90Sns9O/fH6NHj8aBAweQl5eHvLw87N+/H2PGjEG/fv10KmTevHlQKBQYO3as1JaZmYng4GA4ODjAxsYGvXr1QkpKitZ6iYmJCAgIgJWVFZycnDBp0iTk5ubqVAMRERHJj053Y82aNQu3bt1C+/btYWLydBP5+fkIDAzU6ZqdkydPYs2aNahTp45W+7hx47Bz505s2bIFKpUKISEh6NmzJ44cOQIAyMvLQ0BAANRqNY4ePYqkpCQEBgbC1NSU1w4RERERAB1ndszMzLB582ZcuXIFUVFR2Lp1K27cuIHvv/8eZmZmJdpWRkYGBgwYgO+++w4VKlSQ2tPT0xEeHo7FixejXbt2aNiwISIiInD06FEcO3YMALB3715cunQJ69evR7169dCpUyfMmjULK1asQHZ2ti6HRkRERDLzWq+LqF69Oj788EN06dIFbm5uOm0jODgYAQEB8PPz02o/ffo0cnJytNpr1qyJKlWqIDY2FgAQGxsLb29vODs7S338/f2h0Whw8eLFF+4zKysLGo1G60NERETypNNprLy8PERGRiImJgapqanIz8/XWr5///5ibWfTpk3466+/cPLkyULLkpOTYWZmBjs7O612Z2dnJCcnS32eDToFywuWvUhYWBi+/PLLYtVIREREbzadws6YMWMQGRmJgIAA1K5dGwqFosTbuHPnDsaMGYPo6GhYWFjoUobOQkNDMX78eOm7RqOBq6trmdZAREREZUOnsLNp0yb8+OOP6Ny5s847Pn36NFJTU9GgQQOpLS8vD4cPH8Y333yD33//HdnZ2UhLS9Oa3UlJSYFarQYAqNVqnDhxQmu7BXdrFfQpirm5OczNzXWunYiIiN4cOl+g/N57773Wjtu3b4/z588jLi5O+vj4+GDAgAHSn01NTRETEyOtEx8fj8TERPj6+gIAfH19cf78eaSmpkp9oqOjoVQq4eXl9Vr1ERERkTzoNLMzYcIEfP311/jmm290OoUFALa2tqhdu7ZWm7W1NRwcHKT2oUOHYvz48bC3t4dSqcSoUaPg6+uLpk2bAgA6dOgALy8vfPzxx5g/fz6Sk5PxxRdfIDg4mDM3REREBEDHsPPnn3/iwIED2L17N2rVqgVTU1Ot5Vu3btVLcUuWLIGRkRF69eqFrKws+Pv7Y+XKldJyY2Nj7NixAyNGjICvry+sra0RFBSEmTNn6mX/RERE9ObTKezY2dmhR48e+q4FBw8e1PpuYWGBFStWYMWKFS9cx83NDbt27dJ7LURERCQPOoWdiIgIfddBREREVCp0fqhgbm4u9u3bhzVr1uDRo0cAgHv37iEjI0NvxRERERG9Lp1mdm7fvo2OHTsiMTERWVlZeP/992Fra4uvvvoKWVlZWL16tb7rJCIievscCDN0BbKg08zOmDFj4OPjg4cPH8LS0lJq79Gjh9at4kRERESGptPMzh9//IGjR48Weumnu7s7/ve//+mlMCIiIipFz88atQ01TB1lQKewk5+fj7y8vELtd+/eha2t7WsXRfqzJPpqobZx71c3QCVERESGodNprA4dOmDp0qXSd4VCgYyMDEyfPv21XiFBREREpG86zewsWrQI/v7+8PLyQmZmJj766CNcu3YNFStWxMaNG/VdIxEREZHOdAo7lStXxtmzZ7Fp0yacO3cOGRkZGDp0KAYMGKB1wTIRERGRoekUdgDAxMQEAwcO1GctRERERHqnU9hZt27dS5cHBgbqVAwRERGRvukUdsaMGaP1PScnB0+ePIGZmRmsrKwYdoiIiKjc0OlurIcPH2p9MjIyEB8fjxYtWvACZSIiIipXdH431vOqVauGefPmFZr1ISIiIjIkvYUd4OlFy/fu3dPnJomIiIhei07X7Pz2229a34UQSEpKwjfffIPmzZvrpTAiIiIifdAp7HTv3l3ru0KhgKOjI9q1a4dFixbpoy4iIiIivdD53VhEREREbwK9XrNDREREVN7oNLMzfvz4YvddvHixLrsgIiIi0gudws6ZM2dw5swZ5OTkoEaNGgCAq1evwtjYGA0aNJD6KRQK/VRJREREpCOdwk7Xrl1ha2uLtWvXokKFCgCePmhw8ODBaNmyJSZMmKDXIomIiIh0pdM1O4sWLUJYWJgUdACgQoUKmD17Nu/GIiIionJFp7Cj0Whw//79Qu3379/Ho0ePXrsoIiIiIn3RKez06NEDgwcPxtatW3H37l3cvXsXP//8M4YOHYqePXvqu0YiIiIinel0zc7q1asxceJEfPTRR8jJyXm6IRMTDB06FAsWLNBrgURERESvQ6ewY2VlhZUrV2LBggW4ceMGAKBq1aqwtrbWa3FEREREr+u1HiqYlJSEpKQkVKtWDdbW1hBC6KsuIiIiIr3QKez8888/aN++PapXr47OnTsjKSkJADB06FDedk5ERETlik5hZ9y4cTA1NUViYiKsrKyk9r59+2LPnj16K46IiIjodel0zc7evXvx+++/o3Llylrt1apVw+3bt/VSGBEREZE+6DSz8/jxY60ZnQIPHjyAubn5axdFREREpC86hZ2WLVti3bp10neFQoH8/HzMnz8fbdu21VtxRERERK9Lp9NY8+fPR/v27XHq1ClkZ2fjs88+w8WLF/HgwQMcOXJE3zUSERER6UynmZ3atWvj6tWraNGiBbp164bHjx+jZ8+eOHPmDKpWrarvGomIiIh0VuKZnZycHHTs2BGrV6/G559/Xho1EREREelNiWd2TE1Nce7cudKohYiIiEjvdDqNNXDgQISHh+u7FiIiIiK90+kC5dzcXHz//ffYt28fGjZsWOidWIsXL9ZLcURERESvq0QzOzdv3kR+fj4uXLiABg0awNbWFlevXsWZM2ekT1xcXLG3t2rVKtSpUwdKpRJKpRK+vr7YvXu3tDwzMxPBwcFwcHCAjY0NevXqhZSUFK1tJCYmIiAgAFZWVnBycsKkSZOQm5tbksN66yyJvqr1ISIikrMSzexUq1YNSUlJOHDgAICnr4dYtmwZnJ2dddp55cqVMW/ePFSrVg1CCKxduxbdunXDmTNnUKtWLYwbNw47d+7Eli1boFKpEBISgp49e0q3t+fl5SEgIABqtRpHjx5FUlISAgMDYWpqirlz5+pUExEREcmLQpTgVeVGRkZITk6Gk5MTAECpVCIuLg7vvvuu3gqyt7fHggUL0Lt3bzg6OmLDhg3o3bs3AODKlSvw9PREbGwsmjZtit27d6NLly64d++eFLhWr16NyZMn4/79+zAzMyvWPjUaDVQqFdLT06FUKvV2LMVl6NmVce9XN+j+iYjoBQ6Eld2+2oaW3b70pLi/v3W6QLlACXLSK+Xl5WHTpk14/PgxfH19cfr0aeTk5MDPz0/qU7NmTVSpUgWxsbEAgNjYWHh7e2vNLPn7+0Oj0eDixYsv3FdWVhY0Go3Wh4iIiOSpRGFHoVBAoVAUansd58+fh42NDczNzfHpp59i27Zt8PLyQnJyMszMzGBnZ6fV39nZGcnJyQCA5OTkQqfQCr4X9ClKWFgYVCqV9HF1dX2tYyAiIqLyq0TX7AghMGjQIOlln5mZmfj0008L3Y21devWYm+zRo0aiIuLQ3p6On766ScEBQXh0KFDJSmrxEJDQzF+/Hjpu0ajKdPAY+jTVkRERG+TEoWdoKAgre8DBw587QLMzMzw3nvvAQAaNmyIkydP4uuvv0bfvn2RnZ2NtLQ0rdmdlJQUqNVqAIBarcaJEye0tldwt1ZBn6KYm5vz7exERERviRKFnYiIiNKqQ5Kfn4+srCw0bNgQpqamiImJQa9evQAA8fHxSExMhK+vLwDA19cXc+bMQWpqqnTRdHR0NJRKJby8vEq9ViIiIir/dHqooL6EhoaiU6dOqFKlCh49eoQNGzbg4MGD+P3336FSqTB06FCMHz8e9vb2UCqVGDVqFHx9fdG0aVMAQIcOHeDl5YWPP/4Y8+fPR3JyMr744gsEBwdz5oaIiIgAGDjspKamIjAwEElJSVCpVKhTpw5+//13vP/++wCAJUuWwMjICL169UJWVhb8/f2xcuVKaX1jY2Ps2LEDI0aMgK+vL6ytrREUFISZM2ca6pCIiIionCnRc3bkqqyfs1PeLlDmc3aIiMopPmfnpcrkOTtERERE5R3DDhEREckaww4RERHJGsMOERERyRrDDhEREckaww4RERHJGsMOERERyRrDDhEREckaww4RERHJmkFfF0FERET/v7J8WvJbhjM7REREJGsMO0RERCRrPI1FRERERZ9GewNfDloUzuwQERGRrDHsEBERkawx7BAREZGsMewQERGRrDHsEBERkawx7BAREZGsMewQERGRrDHsEBERkawx7BAREZGsMewQERGRrDHsEBERkawx7BAREZGs8UWgREREhlDUizepVHBmh4iIiGSNYYeIiIhkjWGHiIiIZI1hh4iIiGSNYYeIiIhkjWGHiIiIZI1hh4iIiGSNYYeIiIhkjWGHiIiIZI1PUCYsib5aqG3c+9UNUAkREZH+cWaHiIiIZI1hh4iIiGSNp7GIiIioaM+/rLRtqGHqeE0GndkJCwtDo0aNYGtrCycnJ3Tv3h3x8fFafTIzMxEcHAwHBwfY2NigV69eSElJ0eqTmJiIgIAAWFlZwcnJCZMmTUJubm5ZHgoRERGVUwYNO4cOHUJwcDCOHTuG6Oho5OTkoEOHDnj8+LHUZ9y4cdi+fTu2bNmCQ4cO4d69e+jZs6e0PC8vDwEBAcjOzsbRo0exdu1aREZGYtq0aYY4JCIiIipnFEIIYegiCty/fx9OTk44dOgQWrVqhfT0dDg6OmLDhg3o3bs3AODKlSvw9PREbGwsmjZtit27d6NLly64d+8enJ2dAQCrV6/G5MmTcf/+fZiZmb1yvxqNBiqVCunp6VAqlaV6jEDRdz+VN7wbi4iolD1/iuhNUM5OYxX393e5ukA5PT0dAGBvbw8AOH36NHJycuDn5yf1qVmzJqpUqYLY2FgAQGxsLLy9vaWgAwD+/v7QaDS4ePFikfvJysqCRqPR+hAREZE8lZuwk5+fj7Fjx6J58+aoXbs2ACA5ORlmZmaws7PT6uvs7Izk5GSpz7NBp2B5wbKihIWFQaVSSR9XV1c9Hw0RERGVF+Um7AQHB+PChQvYtGlTqe8rNDQU6enp0ufOnTulvk8iIiIyjHJx63lISAh27NiBw4cPo3LlylK7Wq1GdnY20tLStGZ3UlJSoFarpT4nTpzQ2l7B3VoFfZ5nbm4Oc3NzPR8FERERlUcGndkRQiAkJATbtm3D/v374eHhobW8YcOGMDU1RUxMjNQWHx+PxMRE+Pr6AgB8fX1x/vx5pKamSn2io6OhVCrh5eVVNgdCRERE5ZZBZ3aCg4OxYcMG/Prrr7C1tZWusVGpVLC0tIRKpcLQoUMxfvx42NvbQ6lUYtSoUfD19UXTpk0BAB06dICXlxc+/vhjzJ8/H8nJyfjiiy8QHBzM2RsiIiIybNhZtWoVAKBNmzZa7RERERg0aBAAYMmSJTAyMkKvXr2QlZUFf39/rFy5UuprbGyMHTt2YMSIEfD19YW1tTWCgoIwc+bMsjoMIiIiKscMGnaK84gfCwsLrFixAitWrHhhHzc3N+zatUufpREREZFMlJu7sYiIiIhKA8MOERERyRrDDhEREckaww4RERHJGsMOERERyVq5eIIyERERvQGKelN7OXsTelEYdoiIiEpbUSGBygxPYxEREZGsMewQERGRrPE0FhVpSfRVre/j3q9uoEqIiIheD2d2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1njrORERkb7xicnlCmd2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWeDdWKXv+hZpERERUthh2iIiISHdF3WbfNrTs63gJnsYiIiIiWWPYISIiIllj2CEiIiJZY9ghIiIiWWPYISIiIllj2CEiIiJZY9ghIiIiWWPYISIiIllj2CEiIiJZY9ghIiIiWWPYISIiIllj2CEiIiJZY9ghIiIiWWPYISIiIlkzMXQB9GZYEn21UNu496sboBIiIqKSMejMzuHDh9G1a1e4uLhAoVDgl19+0VouhMC0adNQqVIlWFpaws/PD9euXdPq8+DBAwwYMABKpRJ2dnYYOnQoMjIyyvAoiIiISMuBMO2PgRk07Dx+/Bh169bFihUrilw+f/58LFu2DKtXr8bx48dhbW0Nf39/ZGZmSn0GDBiAixcvIjo6Gjt27MDhw4cxfPjwsjoEIiIiKucMehqrU6dO6NSpU5HLhBBYunQpvvjiC3Tr1g0AsG7dOjg7O+OXX35Bv379cPnyZezZswcnT56Ej48PAGD58uXo3LkzFi5cCBcXlzI7FiIiIiqfyu0FygkJCUhOToafn5/UplKp0KRJE8TGxgIAYmNjYWdnJwUdAPDz84ORkRGOHz9e5jUTERFR+VNuL1BOTk4GADg7O2u1Ozs7S8uSk5Ph5OSktdzExAT29vZSn6JkZWUhKytL+q7RaPRVNhEREZUz5XZmpzSFhYVBpVJJH1dXV0OXRERERKWk3IYdtVoNAEhJSdFqT0lJkZap1WqkpqZqLc/NzcWDBw+kPkUJDQ1Fenq69Llz546eqyciIqLyotyGHQ8PD6jVasTExEhtGo0Gx48fh6+vLwDA19cXaWlpOH36tNRn//79yM/PR5MmTV64bXNzcyiVSq0PERERyZNBr9nJyMjA9evXpe8JCQmIi4uDvb09qlSpgrFjx2L27NmoVq0aPDw8MHXqVLi4uKB79+4AAE9PT3Ts2BGffPIJVq9ejZycHISEhKBfv368E4uIiIgAGDjsnDp1Cm3btpW+jx8/HgAQFBSEyMhIfPbZZ3j8+DGGDx+OtLQ0tGjRAnv27IGFhYW0TlRUFEJCQtC+fXsYGRmhV69eWLZsWZkfCxEREZVPCiGEMHQRhqbRaKBSqZCenq73U1pFvWZBLvi6CCJ6K5WDJwK/cdqGlspmi/v7u9xes0NERESkD+X2OTtU/j0/a8WZHiIiKo84s0NERESyxrBDREREssawQ0RERLLGsENERESyxrBDREREssawQ0RERLLGsENERESyxrBDREREssawQ0RERLLGsENERESyxtdFkN4U9dJTvkKCiIgMjTM7REREJGsMO0RERCRrDDtEREQkaww7REREJGsMO0RERCRrDDtEREQkaww7REREJGsMO0RERCRrfKgglSo+aJCI3mgHwgxdAekBZ3aIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1vicHTI4PouHiIhKE8MOlbmiwg0REVFp4WksIiIikjWGHSIiIpI1hh0iIiKSNV6zQ0REVIAv/pQlhh0ql56/iJl3ZxERka54GouIiIhkjTM79Mbi7A8RERUHZ3aIiIhI1mQzs7NixQosWLAAycnJqFu3LpYvX47GjRsbuizSEz6IkIiIdCWLsLN582aMHz8eq1evRpMmTbB06VL4+/sjPj4eTk5Ohi6PiIjKI9559daQRdhZvHgxPvnkEwwePBgAsHr1auzcuRPff/89pkyZYuDqyJCKc10P381FRCRvb3zYyc7OxunTpxEaGiq1GRkZwc/PD7GxsQasjMpacU516Xo6jIGI6A3HWZy32hsfdv7++2/k5eXB2dlZq93Z2RlXrlwpcp2srCxkZWVJ39PT0wEAGo1G7/VlPs7Q+zap9IX98pde+hRHcLv39LIdorfW4UWF21pN0P7+OLNsaqGilcLv16ebfbpdIcRL+73xYUcXYWFh+PLLLwu1u7q6GqAaetv9P0MXQCRLMw1dAGkp3Z/Ho0ePoFKpXrj8jQ87FStWhLGxMVJSUrTaU1JSoFari1wnNDQU48ePl77n5+fjwYMHcHBwgEKheOU+NRoNXF1dcefOHSiVytc7gLcEx6zkOGYlxzErGY5XyXHMSq40x0wIgUePHsHFxeWl/d74sGNmZoaGDRsiJiYG3bt3B/A0vMTExCAkJKTIdczNzWFubq7VZmdnV+J9K5VK/mUvIY5ZyXHMSo5jVjIcr5LjmJVcaY3Zy2Z0CrzxYQcAxo8fj6CgIPj4+KBx48ZYunQpHj9+LN2dRURERG8vWYSdvn374v79+5g2bRqSk5NRr1497Nmzp9BFy0RERPT2kUXYAYCQkJAXnrbSN3Nzc0yfPr3QqTB6MY5ZyXHMSo5jVjIcr5LjmJVceRgzhXjV/VpEREREbzC+CJSIiIhkjWGHiIiIZI1hh4iIiGSNYYeIiIhkjWGnhFasWAF3d3dYWFigSZMmOHHihKFLMpiwsDA0atQItra2cHJyQvfu3REfH6/VJzMzE8HBwXBwcICNjQ169epV6GnXiYmJCAgIgJWVFZycnDBp0iTk5uaW5aEYxLx586BQKDB27FipjeNV2P/+9z8MHDgQDg4OsLS0hLe3N06dOiUtF0Jg2rRpqFSpEiwtLeHn54dr165pbePBgwcYMGAAlEol7OzsMHToUGRkyPO9dXl5eZg6dSo8PDxgaWmJqlWrYtasWVrvDnrbx+zw4cPo2rUrXFxcoFAo8Msvv2gt19f4nDt3Di1btoSFhQVcXV0xf/780j60UvOyMcvJycHkyZPh7e0Na2truLi4IDAwEPfu3dPahkHHTFCxbdq0SZiZmYnvv/9eXLx4UXzyySfCzs5OpKSkGLo0g/D39xcRERHiwoULIi4uTnTu3FlUqVJFZGRkSH0+/fRT4erqKmJiYsSpU6dE06ZNRbNmzaTlubm5onbt2sLPz0+cOXNG7Nq1S1SsWFGEhoYa4pDKzIkTJ4S7u7uoU6eOGDNmjNTO8dL24MED4ebmJgYNGiSOHz8ubt68KX7//Xdx/fp1qc+8efOESqUSv/zyizh79qz44IMPhIeHh/j333+lPh07dhR169YVx44dE3/88Yd47733RP/+/Q1xSKVuzpw5wsHBQezYsUMkJCSILVu2CBsbG/H1119Lfd72Mdu1a5f4/PPPxdatWwUAsW3bNq3l+hif9PR04ezsLAYMGCAuXLggNm7cKCwtLcWaNWvK6jD16mVjlpaWJvz8/MTmzZvFlStXRGxsrGjcuLFo2LCh1jYMOWYMOyXQuHFjERwcLH3Py8sTLi4uIiwszIBVlR+pqakCgDh06JAQ4uk/AFNTU7Flyxapz+XLlwUAERsbK4R4+g/IyMhIJCcnS31WrVollEqlyMrKKtsDKCOPHj0S1apVE9HR0aJ169ZS2OF4FTZ58mTRokWLFy7Pz88XarVaLFiwQGpLS0sT5ubmYuPGjUIIIS5duiQAiJMnT0p9du/eLRQKhfjf//5XesUbSEBAgBgyZIhWW8+ePcWAAQOEEByz5z3/i1tf47Ny5UpRoUIFrX+XkydPFjVq1CjlIyp9RQXE5504cUIAELdv3xZCGH7MeBqrmLKzs3H69Gn4+flJbUZGRvDz80NsbKwBKys/0tPTAQD29vYAgNOnTyMnJ0drzGrWrIkqVapIYxYbGwtvb2+tp137+/tDo9Hg4sWLZVh92QkODkZAQIDWuAAcr6L89ttv8PHxwYcffggnJyfUr18f3333nbQ8ISEBycnJWmOmUqnQpEkTrTGzs7ODj4+P1MfPzw9GRkY4fvx42R1MGWnWrBliYmJw9epVAMDZs2fx559/olOnTgA4Zq+ir/GJjY1Fq1atYGZmJvXx9/dHfHw8Hj58WEZHYzjp6elQKBTSeycNPWayeYJyafv777+Rl5dX6BUUzs7OuHLlioGqKj/y8/MxduxYNG/eHLVr1wYAJCcnw8zMrNBLVp2dnZGcnCz1KWpMC5bJzaZNm/DXX3/h5MmThZZxvAq7efMmVq1ahfHjx+P//b//h5MnT2L06NEwMzNDUFCQdMxFjcmzY+bk5KS13MTEBPb29rIcsylTpkCj0aBmzZowNjZGXl4e5syZgwEDBgAAx+wV9DU+ycnJ8PDwKLSNgmUVKlQolfrLg8zMTEyePBn9+/eXXvxp6DFj2CG9CA4OxoULF/Dnn38aupRy686dOxgzZgyio6NhYWFh6HLeCPn5+fDx8cHcuXMBAPXr18eFCxewevVqBAUFGbi68unHH39EVFQUNmzYgFq1aiEuLg5jx46Fi4sLx4xKXU5ODvr06QMhBFatWmXociQ8jVVMFStWhLGxcaE7Y1JSUqBWqw1UVfkQEhKCHTt24MCBA6hcubLUrlarkZ2djbS0NK3+z46ZWq0uckwLlsnJ6dOnkZqaigYNGsDExAQmJiY4dOgQli1bBhMTEzg7O3O8nlOpUiV4eXlptXl6eiIxMRHA/x3zy/5dqtVqpKamai3Pzc3FgwcPZDlmkyZNwpQpU9CvXz94e3vj448/xrhx4xAWFgaAY/Yq+hqft+3fKvB/Qef27duIjo6WZnUAw48Zw04xmZmZoWHDhoiJiZHa8vPzERMTA19fXwNWZjhCCISEhGDbtm3Yv39/oenHhg0bwtTUVGvM4uPjkZiYKI2Zr68vzp8/r/WPoOAfyfO/5N507du3x/nz5xEXFyd9fHx8MGDAAOnPHC9tzZs3L/Q4g6tXr8LNzQ0A4OHhAbVarTVmGo0Gx48f1xqztLQ0nD59Wuqzf/9+5Ofno0mTJmVwFGXryZMnMDLS/k+7sbEx8vPzAXDMXkVf4+Pr64vDhw8jJydH6hMdHY0aNWrI8hRWQdC5du0a9u3bBwcHB63lBh+z177E+S2yadMmYW5uLiIjI8WlS5fE8OHDhZ2dndadMW+TESNGCJVKJQ4ePCiSkpKkz5MnT6Q+n376qahSpYrYv3+/OHXqlPD19RW+vr7S8oJbqTt06CDi4uLEnj17hKOjo2xvpX7es3djCcHxet6JEyeEiYmJmDNnjrh27ZqIiooSVlZWYv369VKfefPmCTs7O/Hrr7+Kc+fOiW7duhV5m3D9+vXF8ePHxZ9//imqVasmm9uonxcUFCTeeecd6dbzrVu3iooVK4rPPvtM6vO2j9mjR4/EmTNnxJkzZwQAsXjxYnHmzBnpziF9jE9aWppwdnYWH3/8sbhw4YLYtGmTsLKyemNvPX/ZmGVnZ4sPPvhAVK5cWcTFxWn9Pnj2zipDjhnDTgktX75cVKlSRZiZmYnGjRuLY8eOGbokgwFQ5CciIkLq8++//4qRI0eKChUqCCsrK9GjRw+RlJSktZ1bt26JTp06CUtLS1GxYkUxYcIEkZOTU8ZHYxjPhx2OV2Hbt28XtWvXFubm5qJmzZri22+/1Vqen58vpk6dKpydnYW5ublo3769iI+P1+rzzz//iP79+wsbGxuhVCrF4MGDxaNHj8ryMMqMRqMRY8aMEVWqVBEWFhbi3XffFZ9//rnWL523fcwOHDhQ5H+7goKChBD6G5+zZ8+KFi1aCHNzc/HOO++IefPmldUh6t3LxiwhIeGFvw8OHDggbcOQY6YQ4pnHahIRERHJDK/ZISIiIllj2CEiIiJZY9ghIiIiWWPYISIiIllj2CEiIiJZY9ghIiIiWWPYISIiIllj2CEi2XJ3d8fSpUsNXQYRGRjDDhG9VGxsLIyNjREQEGDoUoiIdMKwQ0QvFR4ejlGjRuHw4cO4d++eocuRtWdfgEhE+sOwQ0QvlJGRgc2bN2PEiBEICAhAZGSk1vKDBw9CoVAgJiYGPj4+sLKyQrNmzQq9qXzVqlWoWrUqzMzMUKNGDfzwww9ayxUKBdasWYMuXbrAysoKnp6eiI2NxfXr19GmTRtYW1ujWbNmuHHjhrTOjRs30K1bNzg7O8PGxgaNGjXCvn37XngsQ4YMQZcuXbTacnJy4OTkhPDw8CLXuX37Nrp27YoKFSrA2toatWrVwq5du6TlFy9eRJcuXaBUKmFra4uWLVtKNebn52PmzJmoXLkyzM3NUa9ePezZs0da99atW1AoFNi8eTNat24NCwsLREVFAQD++9//wtPTExYWFqhZsyZWrlz5wuMiomLQyxu2iEiWwsPDhY+PjxDi6Qs5q1atKvLz86XlBS8HbNKkiTh48KC4ePGiaNmypWjWrJnUZ+vWrcLU1FSsWLFCxMfHi0WLFgljY2Oxf/9+qQ8A8c4774jNmzeL+Ph40b17d+Hu7i7atWsn9uzZIy5duiSaNm0qOnbsKK0TFxcnVq9eLc6fPy+uXr0qvvjiC2FhYSG9uVoIIdzc3MSSJUuEEEIcOXJEGBsbi3v37mnVZm1t/cIXXAYEBIj3339fnDt3Tty4cUNs375dHDp0SAghxN27d4W9vb3o2bOnOHnypIiPjxfff/+9uHLlihBCiMWLFwulUik2btworly5Ij777DNhamoqrl69KoQQ0ssT3d3dxc8//yxu3rwp7t27J9avXy8qVaoktf3888/C3t5eREZG6vQzJCK+9ZyIXqJZs2Zi6dKlQgghcnJyRMWKFbXeYlwQdvbt2ye17dy5UwAQ//77r7SNTz75RGu7H374oejcubP0HYD44osvpO+xsbECgAgPD5faNm7cKCwsLF5ab61atcTy5cul78+GHSGE8PLyEl999ZX0vWvXrmLQoEEv3J63t7eYMWNGkctCQ0OFh4eHyM7OLnK5i4uLmDNnjlZbo0aNxMiRI4UQ/xd2Csa3QNWqVcWGDRu02mbNmiV8fX1fWCcRvRxPYxFRkeLj43HixAn0798fAGBiYoK+ffsWecqnTp060p8rVaoEAEhNTQUAXL58Gc2bN9fq37x5c1y+fPmF23B2dgYAeHt7a7VlZmZCo9EAeHqKbeLEifD09ISdnR1sbGxw+fJlJCYmvvCYhg0bhoiICABASkoKdu/ejSFDhryw/+jRozF79mw0b94c06dPx7lz56RlcXFxaNmyJUxNTQutp9FocO/evWIdt4+Pj/Tnx48f48aNGxg6dChsbGykz+zZs7VO4RFRyZgYugAiKp/Cw8ORm5sLFxcXqU0IAXNzc3zzzTdQqVRS+7O/8BUKBYCn16yURFHbeNl2J06ciOjoaCxcuBDvvfceLC0t0bt3b2RnZ79wH4GBgZgyZQpiY2Nx9OhReHh4oGXLli/sP2zYMPj7+2Pnzp3Yu3cvwsLCsGjRIowaNQqWlpYlOr4Xsba2lv6ckZEBAPjuu+/QpEkTrX7GxsZ62R/R24gzO0RUSG5uLtatW4dFixYhLi5O+pw9exYuLi7YuHFjsbfl6emJI0eOaLUdOXIEXl5er1XjkSNHMGjQIPTo0QPe3t5Qq9W4devWS9dxcHBA9+7dERERgcjISAwePPiV+3F1dcWnn36KrVu3YsKECfjuu+8APJ2J+uOPP4q8g0qpVMLFxaXEx+3s7AwXFxfcvHkT7733ntbHw8PjlbUSUdE4s0NEhezYsQMPHz7E0KFDtWZwAKBXr14IDw/Hp59+WqxtTZo0CX369EH9+vXh5+eH7du3Y+vWrS+9c6o4qlWrhq1bt6Jr165QKBSYOnVqsWaThg0bhi5duiAvLw9BQUEv7Tt27Fh06tQJ1atXx8OHD3HgwAF4enoCAEJCQrB8+XL069cPoaGhUKlUOHbsGBo3bowaNWpg0qRJmD59OqpWrYp69eohIiICcXFx0h1XL/Lll19i9OjRUKlU6NixI7KysnDq1Ck8fPgQ48ePL/4AEZGEYYeICgkPD4efn1+hoAM8DTvz58/Xun7lZbp3746vv/4aCxcuxJgxY+Dh4YGIiAi0adPmtWpcvHgxhgwZgmbNmqFixYqYPHmydD3Py/j5+aFSpUqoVauW1im6ouTl5SE4OBh3796FUqlEx44dsWTJEgBPZ4n279+PSZMmoXXr1jA2Nka9evWk63RGjx6N9PR0TJgwAampqfDy8sJvv/2GatWqvXSfw4YNg5WVFRYsWIBJkybB2toa3t7eGDt2bPEGhogKUQghhKGLICIqKxkZGXjnnXcQERGBnj17GrocIioDnNkhordCfn4+/v77byxatAh2dnb44IMPDF0SEZURhh0ieiskJibCw8MDlStXRmRkJExM+J8/orcFT2MRERGRrPHWcyIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikrX/DwmDTMCRKI7YAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "scripts.train_detector(\n", + " scripts.TrainDetectorConfig(\n", + " path=(detector_path := get_path(\"logs/demo/detector\")),\n", + " task=tasks.backdoor_detection(\n", + " model, train_data, val_data, data.CornerPixelBackdoor()\n", + " ),\n", + " detector=detectors.MahalanobisDetector(save_path=detector_path),\n", + " train=detectors.MahalanobisTrainConfig(),\n", + " num_classes=10,\n", + " )\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As we can see, this was a trivial detection task. As an ablation, we can test whether the detector specifically flags backdoored inputs as anomalous, or just anything out of distribution:" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[32m2024-02-29 22:14:35.637\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mcupbearer.detectors.anomaly_detector\u001b[0m:\u001b[36mload_weights\u001b[0m:\u001b[36m232\u001b[0m - \u001b[1mLoading detector from logs/demo/detector/2024-02-29_22-14-27/detector\u001b[0m\n" + ] + } + ], + "source": [ + "detector = detectors.MahalanobisDetector(save_path=detector_path / \"ood_eval\")\n", + "# TODO: The fact that weights are saved in \"detector\" is just a convention used by\n", + "# the train_detector script, this is kind of weird.\n", + "detector.load_weights(detector_path / \"detector\")" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[32m2024-02-29 22:14:36.725\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mcupbearer.detectors.anomaly_detector\u001b[0m:\u001b[36meval\u001b[0m:\u001b[36m149\u001b[0m - \u001b[1mAUC_ROC: 0.9934\u001b[0m\n", + "\u001b[32m2024-02-29 22:14:36.726\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mcupbearer.detectors.anomaly_detector\u001b[0m:\u001b[36meval\u001b[0m:\u001b[36m150\u001b[0m - \u001b[1mAP: 0.9779\u001b[0m\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjsAAAHHCAYAAABZbpmkAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAABVb0lEQVR4nO3deVhUVeMH8O+wb84gKIMkCpkLKC6Jy7gvJCqaay5R4lKWQm5Jyi81d8w9LcV6CbRwydIWt0Q0LUVcEndxQ7GXzVIY0djP7w8f7usIKo4DM16/n+eZ53HOOXPvuQd0vp577r0KIYQAERERkUyZGbsDRERERBWJYYeIiIhkjWGHiIiIZI1hh4iIiGSNYYeIiIhkjWGHiIiIZI1hh4iIiGSNYYeIiIhkjWGHiIiIZI1hh4hKUSgUmDlzprG78VyIjo6GQqHAtWvXpLJOnTqhU6dOlbL/h39WM2fOhEKhwN9//10p+/fw8MDw4cMrZV9E+mLYIdLDqlWroFAo0KpVK2N3hWTi0KFDmDlzJrKysozdlVJMuW9E5WFh7A4QPY9iYmLg4eGBI0eO4PLly3jllVeM3SUyIbt3737qzxw6dAizZs3C8OHD4ejoWO7P/fvvv7CwqNh/yh/Xt6SkJJiZ8f/NZNr4G0r0lJKTk3Ho0CEsXboU1atXR0xMjLG7RE9w9+7dSt2flZUVrKysKmz7xcXFyM3NBQDY2NhUeNh5HGtra1haWhpt/0TlwbBD9JRiYmJQtWpVBAQEYODAgWWGnWvXrkGhUGDx4sX48ssvUadOHVhbW6NFixY4evRoqfZ79+5F+/btYW9vD0dHR/Tp0wfnz5/XaVOyFuPixYt46623oFKpUL16dUyfPh1CCNy4cQN9+vSBUqmEq6srlixZovP5/Px8zJgxA82bN4dKpYK9vT3at2+Pffv2PfZ49+3bB4VCga1bt5aqW79+PRQKBeLj4x/5+YKCAsyaNQt169aFjY0NnJ2d0a5dO8TGxuq0u3DhAgYNGoTq1avD1tYW9evXx8cff6zT5sSJE+jRoweUSiUcHBzQtWtXHD58WKdNyRqa/fv3Y+zYsXBxcUHNmjWl+p07d0pjXaVKFQQEBODs2bOPHYMSZ8+eRZcuXWBra4uaNWti7ty5KC4uLtWurDU7K1euRMOGDWFnZ4eqVavC19cX69evB3D/ZxsaGgoA8PT0hEKh0FkHpFAoEBISgpiYGDRs2BDW1tbYtWuXVFfW+qq///4bgwYNglKphLOzM8aPHy8FJOB/v6PR0dGlPvvgNp/Ut7LW7Fy9ehVvvPEGnJycYGdnh9atW2P79u06bX777TcoFAp89913mDdvHmrWrAkbGxt07doVly9fLtUnomfB01hETykmJgb9+/eHlZUVhg4ditWrV+Po0aNo0aJFqbbr16/HnTt38N5770GhUGDhwoXo378/rl69Kv1veM+ePejRowdefvllzJw5E//++y9WrlyJtm3b4s8//4SHh4fONgcPHgwvLy8sWLAA27dvx9y5c+Hk5IQ1a9agS5cu+PTTTxETE4PJkyejRYsW6NChAwBAq9XiP//5D4YOHYp3330Xd+7cQWRkJPz9/XHkyBE0bdq0zOPt1KkT3N3dERMTg379+pUaizp16kCj0TxyvGbOnInw8HC88847aNmyJbRaLY4dO4Y///wTr732GgDg1KlTaN++PSwtLTF69Gh4eHjgypUr+OWXXzBv3jwA94NG+/btoVQq8dFHH8HS0hJr1qxBp06dsH///lLrp8aOHYvq1atjxowZ0szON998g6CgIPj7++PTTz/FvXv3sHr1arRr1w4nTpwoNdYPSk9PR+fOnVFYWIipU6fC3t4eX375JWxtbR/5mRJfffUVxo0bh4EDB0qh49SpU0hISMCbb76J/v374+LFi9iwYQOWLVuGatWqAQCqV68ubWPv3r347rvvEBISgmrVqj22rwAwaNAgeHh4IDw8HIcPH8aKFStw+/ZtrFu37on9fVB5+vagjIwMtGnTBvfu3cO4cePg7OyMtWvX4vXXX8f3339f6ndowYIFMDMzw+TJk5GdnY2FCxciMDAQCQkJT9VPoscSRFRux44dEwBEbGysEEKI4uJiUbNmTTF+/HiddsnJyQKAcHZ2Frdu3ZLKf/rpJwFA/PLLL1JZ06ZNhYuLi/jnn3+kspMnTwozMzMxbNgwqeyTTz4RAMTo0aOlssLCQlGzZk2hUCjEggULpPLbt28LW1tbERQUpNM2Ly9Pp5+3b98WarVajBw5UqccgPjkk0+k92FhYcLa2lpkZWVJZZmZmcLCwkKnXVmaNGkiAgICHtumQ4cOokqVKuL69es65cXFxdKf+/btK6ysrMSVK1ekstTUVFGlShXRoUMHqSwqKkoAEO3atROFhYVS+Z07d4Sjo6N49913dfaRnp4uVCpVqfKHTZgwQQAQCQkJUllmZqZQqVQCgEhOTpbKO3bsKDp27Ci979Onj2jYsOFjt79o0aJS2ykBQJiZmYmzZ8+WWffgz6Dk9+T111/XaTd27FgBQJw8eVII8b/f0aioqCdu83F9q127ts7vWck4/f7771LZnTt3hKenp/Dw8BBFRUVCCCH27dsnAAgvLy+d38vPPvtMABCnT58utS8iffE0FtFTiImJgVqtRufOnQHcn+4fPHgwNm7ciKKiolLtBw8ejKpVq0rv27dvD+D+ND8ApKWlITExEcOHD4eTk5PUrnHjxnjttdewY8eOUtt85513pD+bm5vD19cXQgiMGjVKKnd0dET9+vWl/ZS0LVlHUlxcjFu3bqGwsBC+vr74888/H3vcw4YNQ15eHr7//nupbNOmTSgsLMRbb7312M86Ojri7NmzuHTpUpn1N2/exIEDBzBy5EjUqlVLp06hUAAAioqKsHv3bvTt2xcvv/yyVF+jRg28+eab+OOPP6DVanU+++6778Lc3Fx6Hxsbi6ysLAwdOhR///239DI3N0erVq2eeDpvx44daN26NVq2bCmVVa9eHYGBgY/9XMkY/PXXX2Wewiyvjh07wtvbu9ztg4ODdd5/8MEHAFDm75Qh7dixAy1btkS7du2kMgcHB4wePRrXrl3DuXPndNqPGDFCZ33Tw39HiAyBYYeonIqKirBx40Z07twZycnJuHz5Mi5fvoxWrVohIyMDcXFxpT7z8Jd3SfC5ffs2AOD69esAgPr165f6rJeXF/7+++9Si2sf3qZKpYKNjY10euHB8pL9lFi7di0aN24srZ2pXr06tm/fjuzs7Mcee4MGDdCiRQud9UkxMTFo3br1E69Emz17NrKyslCvXj34+PggNDQUp06dkupLvtQaNWr0yG3cvHkT9+7de+Q4FRcX48aNGzrlnp6eOu9LwlaXLl1QvXp1ndfu3buRmZn52OO4fv066tatW6q8rD49bMqUKXBwcEDLli1Rt25dBAcH4+DBg0/83IMePp4nebivderUgZmZmc79gCrC9evXH/lzKql/0JP+jhAZAtfsEJXT3r17kZaWho0bN2Ljxo2l6mNiYtCtWzedsgdnFh4khNC7H2Vtszz7+fbbbzF8+HD07dsXoaGhcHFxgbm5OcLDw3HlypUn7nfYsGEYP348/vrrL+Tl5eHw4cP4/PPPn/i5Dh064MqVK/jpp5+we/du/Oc//8GyZcsQERGhM0tlaA+vpSlZSPzNN9/A1dW1VPuKvKLJy8sLSUlJ2LZtG3bt2oUffvgBq1atwowZMzBr1qxybaM8a4Mep2SW7FHvS5Q1Q1mRKuLvCNHDGHaIyikmJgYuLi744osvStVt2bIFW7duRURExFN9KdWuXRvA/XuVPOzChQuoVq0a7O3t9e/0A77//nu8/PLL2LJli84X3SeffFKuzw8ZMgSTJk3Chg0b8O+//8LS0hKDBw8u12ednJwwYsQIjBgxAjk5OejQoQNmzpyJd955RzotdebMmUd+vnr16rCzs3vkOJmZmcHd3f2xfahTpw4AwMXFBX5+fuXq94Nq165d5qm4svpUFnt7ewwePBiDBw9Gfn4++vfvj3nz5iEsLAw2NjaPDB/6unTpks5s0OXLl1FcXCwtbC6ZQXn4RoEPz7wAjw5GZaldu/Yjf04l9USVjaexiMrh33//xZYtW9CrVy8MHDiw1CskJAR37tzBzz///FTbrVGjBpo2bYq1a9fqfOmcOXMGu3fvRs+ePQ12DCX/g37wf8wJCQmPvWz8QdWqVUOPHj3w7bffIiYmBt27dy916qws//zzj857BwcHvPLKK8jLywNwP8h06NABX3/9NVJSUnTalvTV3Nwc3bp1w08//aRzGiYjIwPr169Hu3btoFQqH9sPf39/KJVKzJ8/HwUFBaXqb968+djP9+zZE4cPH8aRI0d0PlOe+yw9PAZWVlbw9vaGEELqS0moNdRdih8O5StXrgQA9OjRAwCgVCpRrVo1HDhwQKfdqlWrSm3rafrWs2dPHDlyROf36u7du/jyyy/h4eHxVOuOiAyFMztE5fDzzz/jzp07eP3118usb926tXSDwfLOdpRYtGgRevToAY1Gg1GjRkmXnqtUKoM+n6pXr17YsmUL+vXrh4CAACQnJyMiIgLe3t7Iyckp1zaGDRuGgQMHAgDmzJlTrs94e3ujU6dOaN68OZycnHDs2DF8//33CAkJkdqsWLEC7dq1w6uvvorRo0fD09MT165dw/bt25GYmAgAmDt3LmJjY9GuXTuMHTsWFhYWWLNmDfLy8rBw4cIn9kOpVGL16tV4++238eqrr2LIkCGoXr06UlJSsH37drRt2/axp+U++ugjfPPNN+jevTvGjx8vXXpeu3ZtnTVIZenWrRtcXV3Rtm1bqNVqnD9/Hp9//jkCAgJQpUoVAEDz5s0BAB9//DGGDBkCS0tL9O7dW++ZveTkZLz++uvo3r074uPj8e233+LNN99EkyZNpDbvvPMOFixYgHfeeQe+vr44cOAALl68WGpbT9O3qVOnYsOGDejRowfGjRsHJycnrF27FsnJyfjhhx94t2UyDiNeCUb03Ojdu7ewsbERd+/efWSb4cOHC0tLS/H3339Ll/UuWrSoVDs8dFmvEELs2bNHtG3bVtja2gqlUil69+4tzp07p9Om5JLimzdv6pQHBQUJe3v7Uvvp2LGjzuXOxcXFYv78+aJ27drC2tpaNGvWTGzbtk0EBQWJ2rVrP7GPQgiRl5cnqlatKlQqlfj3338fORYPmjt3rmjZsqVwdHQUtra2okGDBmLevHkiPz9fp92ZM2dEv379hKOjo7CxsRH169cX06dP12nz559/Cn9/f+Hg4CDs7OxE586dxaFDh3TalFx6fvTo0TL7s2/fPuHv7y9UKpWwsbERderUEcOHDxfHjh174rGcOnVKdOzYUdjY2IiXXnpJzJkzR0RGRj7x0vM1a9aIDh06CGdnZ2FtbS3q1KkjQkNDRXZ2ts7258yZI1566SVhZmams00AIjg4uMw+PfyzKvk9OXfunBg4cKCoUqWKqFq1qggJCSn1M7t3754YNWqUUKlUokqVKmLQoEEiMzOzzJ//o/r28KXnQghx5coVMXDgQOln2bJlS7Ft2zadNiWXnm/evFmn/HGXxBPpSyEEV4ERUfkUFhbCzc0NvXv3RmRkpLG7Q0RULpxPJKJy+/HHH3Hz5k0MGzbM2F0hIio3zuwQ0RMlJCTg1KlTmDNnDqpVq/bEmxASEZkSzuwQ0ROtXr0aY8aMgYuLy1M/W4mIyNg4s0NERESyxpkdIiIikjWGHSIiIpI13lQQ95+Zk5qaiipVqhj8lu1ERERUMYQQuHPnDtzc3B57w0qGHQCpqalPfK4OERERmaYbN26gZs2aj6xn2AGk27XfuHHjic/XISIiItOg1Wrh7u4ufY8/CsMO/vdEX6VSybBDRET0nHnSEhQuUCYiIiJZY9ghIiIiWWPYISIiIlnjmp1yKi4uRn5+vrG7QZXE0tIS5ubmxu4GEREZAMNOOeTn5yM5ORnFxcXG7gpVIkdHR7i6uvLeS0REzzmGnScQQiAtLQ3m5uZwd3d/7E2LSB6EELh37x4yMzMBADVq1DByj4iI6Fkw7DxBYWEh7t27Bzc3N9jZ2Rm7O1RJbG1tAQCZmZlwcXHhKS0ioucYpymeoKioCABgZWVl5J5QZSsJtwUFBUbuCRERPQuGnXLiuo0XD3/mRETywLBDREREssawQybjt99+g0KhQFZWlrG7QkREMsIFynpaFnuxUvc38bV6T9V++PDhWLt2LcLDwzF16lSp/Mcff0S/fv0ghDB0F4mIiEwSZ3ZkzMbGBp9++ilu375tsG3yxopERPS8YdiRMT8/P7i6uiI8PPyRbX744Qc0bNgQ1tbW8PDwwJIlS3TqPTw8MGfOHAwbNgxKpRKjR49GdHQ0HB0dsW3bNtSvXx92dnYYOHAg7t27h7Vr18LDwwNVq1bFuHHjpKvZAOCbb76Br68vqlSpAldXV7z55pvSvWyIiIgqCsOOjJmbm2P+/PlYuXIl/vrrr1L1x48fx6BBgzBkyBCcPn0aM2fOxPTp0xEdHa3TbvHixWjSpAlOnDiB6dOnAwDu3buHFStWYOPGjdi1axd+++039OvXDzt27MCOHTvwzTffYM2aNfj++++l7RQUFGDOnDk4efIkfvzxR1y7dg3Dhw+vyCEgIiLimh2569evH5o2bYpPPvkEkZGROnVLly5F165dpQBTr149nDt3DosWLdIJIV26dMGHH34ovf/9999RUFCA1atXo06dOgCAgQMH4ptvvkFGRgYcHBzg7e2Nzp07Y9++fRg8eDAAYOTIkdI2Xn75ZaxYsQItWrRATk4OHBwcKmoIiIjkZd9Ds/Wdw4zTj+eIUWd2ioqKMH36dHh6esLW1hZ16tTBnDlzdBbPCiEwY8YM1KhRA7a2tvDz88OlS5d0tnPr1i0EBgZCqVTC0dERo0aNQk5OTmUfjsn69NNPsXbtWpw/f16n/Pz582jbtq1OWdu2bXHp0iWd00++vr6ltmlnZycFHQBQq9Xw8PDQCS1qtVrnNNXx48fRu3dv1KpVC1WqVEHHjh0BACkpKc92gERERI9h1LDz6aefYvXq1fj8889x/vx5fPrpp1i4cCFWrlwptVm4cCFWrFiBiIgIJCQkwN7eHv7+/sjNzZXaBAYG4uzZs4iNjcW2bdtw4MABjB492hiHZJI6dOgAf39/hIXpl/7t7e1LlVlaWuq8VygUZZaVPDz17t278Pf3h1KpRExMDI4ePYqtW7cC4KJnIiKqWEY9jXXo0CH06dMHAQEBAO4vht2wYQOOHDkC4P6szvLlyzFt2jT06dMHALBu3Tqo1Wr8+OOPGDJkCM6fP49du3bh6NGj0gzEypUr0bNnTyxevBhubm7GOTgTs2DBAjRt2hT169eXyry8vHDw4EGddgcPHkS9evUM/iyoCxcu4J9//sGCBQvg7u4OADh27JhB90FE9EJ6+LQWwFNbDzHqzE6bNm0QFxeHixfv37Pm5MmT+OOPP9CjRw8AQHJyMtLT0+Hn5yd9RqVSoVWrVoiPjwcAxMfHw9HRUedUi5+fH8zMzJCQkFDmfvPy8qDVanVecufj44PAwECsWLFCKvvwww8RFxeHOXPm4OLFi1i7di0+//xzTJ482eD7r1WrFqysrLBy5UpcvXoVP//8M+bMmWPw/RARET3MqGFn6tSpGDJkCBo0aABLS0s0a9YMEyZMQGBgIAAgPT0dwP21Hw9Sq9VSXXp6OlxcXHTqLSws4OTkJLV5WHh4OFQqlfQqmWmQu9mzZ0unlQDg1VdfxXfffYeNGzeiUaNGmDFjBmbPnl0hV0hVr14d0dHR2Lx5M7y9vbFgwQIsXrzY4PshIiJ6mEIY8Va6GzduRGhoKBYtWoSGDRsiMTEREyZMwNKlSxEUFIRDhw6hbdu2SE1NRY0aNaTPDRo0CAqFAps2bcL8+fOxdu1aJCUl6WzbxcUFs2bNwpgxY0rtNy8vD3l5edJ7rVYLd3d3ZGdnQ6lU6rTNzc1FcnIyPD09YWNjY+ARIFPGnz0RmaSyTls97AU5jaXVaqFSqcr8/n6QUdfshIaGSrM7wP1TLdevX0d4eDiCgoLg6uoKAMjIyNAJOxkZGWjatCkAwNXVtdSN6QoLC3Hr1i3p8w+ztraGtbV1BRwRERERmRqjnsa6d+8ezMx0u2Bubi6davH09ISrqyvi4uKkeq1Wi4SEBGg0GgCARqNBVlYWjh8/LrXZu3cviouL0apVq0o4CiIiIjJlRp3Z6d27N+bNm4datWqhYcOGOHHiBJYuXSrdfE6hUGDChAmYO3cu6tatC09PT0yfPh1ubm7o27cvgPtXFHXv3h3vvvsuIiIiUFBQgJCQEAwZMoRXYhEREZFxw87KlSsxffp0jB07FpmZmXBzc8N7772HGTNmSG0++ugj3L17F6NHj0ZWVhbatWuHXbt26ayhiImJQUhICLp27QozMzMMGDBA56ojIiIienEZdYGyqXjcAicuUn1x8WdPRCaJC5Ql5V2gzAeBEhERkawx7BAREZGsMewQERGRrDHsEBERkawx7JBJ8vDwwPLly43dDSIikgGjXnr+XCvPanhD0nNlfXx8PNq1a4fu3btj+/btBu4UERGR6ePMjsxFRkbigw8+wIEDB5Cammrs7hAREVU6hh0Zy8nJwaZNmzBmzBgEBAQgOjpaqvvtt9+gUCgQFxcHX19f2NnZoU2bNqUeqLp69WrUqVMHVlZWqF+/Pr755hudeoVCgTVr1qBXr16ws7ODl5cX4uPjcfnyZXTq1An29vZo06YNrly5In3mypUr6NOnD9RqNRwcHNCiRQvs2bPnsceSkpKCPn36wMHBAUqlEoMGDUJGRoZUP3z4cOmu2iUmTJiATp06Se+///57+Pj4wNbWFs7OzvDz88Pdu3fLOZpERPS8YtiRse+++w4NGjRA/fr18dZbb+Hrr7/Gw/eQ/Pjjj7FkyRIcO3YMFhYW0qM6AGDr1q0YP348PvzwQ5w5cwbvvfceRowYgX379ulsY86cORg2bBgSExPRoEEDvPnmm3jvvfcQFhaGY8eOQQiBkJAQqX1OTg569uyJuLg4nDhxAt27d0fv3r2RkpJS5nEUFxejT58+uHXrFvbv34/Y2FhcvXoVgwcPLvdYpKWlYejQoRg5ciTOnz+P3377Df379y81HkREJmVfeOkXPTWu2ZGxyMhIvPXWWwCA7t27Izs7G/v379eZ7Zg3bx46duwIAJg6dSoCAgKQm5sLGxsbLF68GMOHD8fYsWMBAJMmTcLhw4exePFidO7cWdrGiBEjMGjQIADAlClToNFoMH36dPj7+wMAxo8fjxEjRkjtmzRpgiZNmkjv58yZg61bt+Lnn3/WCUUl4uLicPr0aSQnJ8Pd3R0AsG7dOjRs2BBHjx5FixYtnjgWaWlpKCwsRP/+/VG7dm0AgI+Pz5MHkYiInnuc2ZGppKQkHDlyBEOHDgUAWFhYYPDgwYiMjNRp17hxY+nPNWrUAABkZmYCAM6fP4+2bdvqtG/bti3Onz//yG2o1WoAukFCrVYjNzcXWq0WwP2ZncmTJ8PLywuOjo5wcHDA+fPnHzmzc/78ebi7u0tBBwC8vb3h6OhYqi+P0qRJE3Tt2hU+Pj5444038NVXX+H27dvl+iwRET3fGHZkKjIyEoWFhXBzc4OFhQUsLCywevVq/PDDD8jOzpbaWVpaSn9WKBQA7p82ehplbeNx2508eTK2bt2K+fPn4/fff0diYiJ8fHyQn5//lEf5P2ZmZqVOSRUUFEh/Njc3R2xsLHbu3Alvb2+sXLkS9evXR3Jyst77JCKi5wPDjgwVFhZi3bp1WLJkCRITE6XXyZMn4ebmhg0bNpRrO15eXjh48KBO2cGDB+Ht7f1M/Tt48CCGDx+Ofv36wcfHB66urrh27dpj+3Hjxg3cuHFDKjt37hyysrKkvlSvXh1paWk6n0tMTNR5r1Ao0LZtW8yaNQsnTpyAlZUVtm7d+kzHQkREpo9rdmRo27ZtuH37NkaNGgWVSqVTN2DAAERGRmLRokVP3E5oaCgGDRqEZs2awc/PD7/88gu2bNnyxCunnqRu3brYsmULevfuDYVCgenTpz92NsnPzw8+Pj4IDAzE8uXLUVhYiLFjx6Jjx47w9fUFAHTp0gWLFi3CunXroNFo8O233+LMmTNo1qwZACAhIQFxcXHo1q0bXFxckJCQgJs3b8LLy+uZjoWIiEwfZ3ZkKDIyEn5+fqWCDnA/7Bw7dgynTp164nb69u2Lzz77DIsXL0bDhg2xZs0aREVF6Sxw1sfSpUtRtWpVtGnTBr1794a/vz9effXVR7ZXKBT46aefULVqVXTo0AF+fn54+eWXsWnTJqmNv78/pk+fjo8++ggtWrTAnTt3MGzYMKleqVTiwIED6NmzJ+rVq4dp06ZhyZIl6NGjxzMdCxERmT6F4LW30Gq1UKlUyM7OhlKp1KnLzc1FcnIyPD09YWNjY6QekjHwZ09ERqfvpeZ63nX/efO47+8HcWaHiIiIZI1hh4iIiGSNYYeIiIhkjWGHiIiIZI1hp5y4jvvFw585EZE8MOw8gbm5OQA809196fl07949ALp3gyYioucPbyr4BBYWFrCzs8PNmzdhaWkJMzPmQ7kTQuDevXvIzMyEo6OjFHiJiOj5xLDzBAqFAjVq1EBycjKuX79u7O5QJXJ0dISrq6uxu0FERM+IYaccrKysULduXZ7KeoFYWlpyRoeISCYYdsrJzMyMd9ElIiJ6DnEBChEREckaZ3aIiIhMgb7PwaIn4swOERERyRrDDhEREckaww4RERHJGsMOERERyZpRw46HhwcUCkWpV3BwMAAgNzcXwcHBcHZ2hoODAwYMGICMjAydbaSkpCAgIAB2dnZwcXFBaGgoCgsLjXE4REREZIKMGnaOHj2KtLQ06RUbGwsAeOONNwAAEydOxC+//ILNmzdj//79SE1NRf/+/aXPFxUVISAgAPn5+Th06BDWrl2L6OhozJgxwyjHQ0RERKZHIUzo0c4TJkzAtm3bcOnSJWi1WlSvXh3r16/HwIEDAQAXLlyAl5cX4uPj0bp1a+zcuRO9evVCamoq1Go1ACAiIgJTpkzBzZs3YWVlVa79arVaqFQqZGdnQ6lUVtjxERERPZIhLz3vHGa4bZmw8n5/m8yanfz8fHz77bcYOXIkFAoFjh8/joKCAvj5+UltGjRogFq1aiE+Ph4AEB8fDx8fHynoAIC/vz+0Wi3Onj37yH3l5eVBq9XqvIiIiEieTOamgj/++COysrIwfPhwAEB6ejqsrKzg6Oio006tViM9PV1q82DQKakvqXuU8PBwzJo1y3CdJyIiMiUPzxK9IDM9j2IyMzuRkZHo0aMH3NzcKnxfYWFhyM7Oll43btyo8H0SERGRcZjEzM7169exZ88ebNmyRSpzdXVFfn4+srKydGZ3MjIy4OrqKrU5cuSIzrZKrtYqaVMWa2trWFtbG/AIiIiIyFSZxMxOVFQUXFxcEBAQIJU1b94clpaWiIuLk8qSkpKQkpICjUYDANBoNDh9+jQyMzOlNrGxsVAqlfD29q68AyAiIiKTZfSZneLiYkRFRSEoKAgWFv/rjkqlwqhRozBp0iQ4OTlBqVTigw8+gEajQevWrQEA3bp1g7e3N95++20sXLgQ6enpmDZtGoKDgzlzQ0RERABMIOzs2bMHKSkpGDlyZKm6ZcuWwczMDAMGDEBeXh78/f2xatUqqd7c3Bzbtm3DmDFjoNFoYG9vj6CgIMyePbsyD4GIiIhMmEndZ8dYeJ8dIiIyOkPeZ+dhMr0a67m7zw4RERFRRWDYISIiIllj2CEiIiJZY9ghIiIiWWPYISIiIllj2CEiIiJZY9ghIiIiWWPYISIiIllj2CEiIiJZY9ghIiIiWWPYISIiIllj2CEiIiJZY9ghIiIiWWPYISIiIllj2CEiIiJZY9ghIiIiWWPYISIiIllj2CEiIiJZszB2B4iIiF5I+8KN3YMXBmd2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1vggUCIiIrkr66GjncMqvx9GYvSZnf/+979466234OzsDFtbW/j4+ODYsWNSvRACM2bMQI0aNWBraws/Pz9cunRJZxu3bt1CYGAglEolHB0dMWrUKOTk5FT2oRAREZEJMmrYuX37Ntq2bQtLS0vs3LkT586dw5IlS1C1alWpzcKFC7FixQpEREQgISEB9vb28Pf3R25urtQmMDAQZ8+eRWxsLLZt24YDBw5g9OjRxjgkIiIiMjEKIYQw1s6nTp2KgwcP4vfffy+zXggBNzc3fPjhh5g8eTIAIDs7G2q1GtHR0RgyZAjOnz8Pb29vHD16FL6+vgCAXbt2oWfPnvjrr7/g5ub2xH5otVqoVCpkZ2dDqVQa7gCJiIgepaxTS5VJBqexyvv9bdSZnZ9//hm+vr5444034OLigmbNmuGrr76S6pOTk5Geng4/Pz+pTKVSoVWrVoiPjwcAxMfHw9HRUQo6AODn5wczMzMkJCRU3sEQERGRSTJq2Ll69SpWr16NunXr4tdff8WYMWMwbtw4rF27FgCQnp4OAFCr1TqfU6vVUl16ejpcXFx06i0sLODk5CS1eVheXh60Wq3Oi4iIiOTJqFdjFRcXw9fXF/PnzwcANGvWDGfOnEFERASCgoIqbL/h4eGYNWtWhW2fiIiITIdRZ3Zq1KgBb29vnTIvLy+kpKQAAFxdXQEAGRkZOm0yMjKkOldXV2RmZurUFxYW4tatW1Kbh4WFhSE7O1t63bhxwyDHQ0RERKbHqGGnbdu2SEpK0im7ePEiateuDQDw9PSEq6sr4uLipHqtVouEhARoNBoAgEajQVZWFo4fPy612bt3L4qLi9GqVasy92ttbQ2lUqnzIiIiInky6mmsiRMnok2bNpg/fz4GDRqEI0eO4Msvv8SXX34JAFAoFJgwYQLmzp2LunXrwtPTE9OnT4ebmxv69u0L4P5MUPfu3fHuu+8iIiICBQUFCAkJwZAhQ8p1JRYRERHJm1HDTosWLbB161aEhYVh9uzZ8PT0xPLlyxEYGCi1+eijj3D37l2MHj0aWVlZaNeuHXbt2gUbGxupTUxMDEJCQtC1a1eYmZlhwIABWLFihTEOiYiIiEyMUe+zYyp4nx0iIqp0vM/OM3su7rNDREREVNEYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1owadmbOnAmFQqHzatCggVSfm5uL4OBgODs7w8HBAQMGDEBGRobONlJSUhAQEAA7Ozu4uLggNDQUhYWFlX0oREREz5d94bovGbMwdgcaNmyIPXv2SO8tLP7XpYkTJ2L79u3YvHkzVCoVQkJC0L9/fxw8eBAAUFRUhICAALi6uuLQoUNIS0vDsGHDYGlpifnz51f6sRAREZHpMXrYsbCwgKura6ny7OxsREZGYv369ejSpQsAICoqCl5eXjh8+DBat26N3bt349y5c9izZw/UajWaNm2KOXPmYMqUKZg5cyasrKwq+3CIiIjIxOh1Guvq1asG68ClS5fg5uaGl19+GYGBgUhJSQEAHD9+HAUFBfDz85PaNmjQALVq1UJ8fDwAID4+Hj4+PlCr1VIbf39/aLVanD179pH7zMvLg1ar1XkRERGRPOkVdl555RV07twZ3377LXJzc/XeeatWrRAdHY1du3Zh9erVSE5ORvv27XHnzh2kp6fDysoKjo6OOp9Rq9VIT08HAKSnp+sEnZL6krpHCQ8Ph0qlkl7u7u56HwMRERGZNr3Czp9//onGjRtj0qRJcHV1xXvvvYcjR4489XZ69OiBN954A40bN4a/vz927NiBrKwsfPfdd/p0q9zCwsKQnZ0tvW7cuFGh+yMiIiLj0SvsNG3aFJ999hlSU1Px9ddfIy0tDe3atUOjRo2wdOlS3Lx5U6/OODo6ol69erh8+TJcXV2Rn5+PrKwsnTYZGRnSGh9XV9dSV2eVvC9rHVAJa2trKJVKnRcRERHJ0zNdem5hYYH+/ftj8+bN+PTTT3H58mVMnjwZ7u7uGDZsGNLS0p5qezk5Obhy5Qpq1KiB5s2bw9LSEnFxcVJ9UlISUlJSoNFoAAAajQanT59GZmam1CY2NhZKpRLe3t7PcmhERESG8/Bl3jK/1NvUPFPYOXbsGMaOHYsaNWpg6dKlmDx5Mq5cuYLY2FikpqaiT58+j/385MmTsX//fly7dg2HDh1Cv379YG5ujqFDh0KlUmHUqFGYNGkS9u3bh+PHj2PEiBHQaDRo3bo1AKBbt27w9vbG22+/jZMnT+LXX3/FtGnTEBwcDGtr62c5NCIiIpIJvS49X7p0KaKiopCUlISePXti3bp16NmzJ8zM7mcnT09PREdHw8PD47Hb+euvvzB06FD8888/qF69Otq1a4fDhw+jevXqAIBly5bBzMwMAwYMQF5eHvz9/bFq1Srp8+bm5ti2bRvGjBkDjUYDe3t7BAUFYfbs2focFhEREcmQQgghnvZDdevWxciRIzF8+HDUqFGjzDb5+fnYsGEDgoKCnrmTFU2r1UKlUiE7O5vrd4iIyPCeh9NWncOM3YOnVt7vb71mdi5duvTENlZWVs9F0CEiIiJ502vNTlRUFDZv3lyqfPPmzVi7du0zd4qIiIjIUPQKO+Hh4ahWrVqpchcXFz6TioiIiEyKXmEnJSUFnp6epcpr164tPe6BiIiIyBToFXZcXFxw6tSpUuUnT56Es7PzM3eKiIiIyFD0CjtDhw7FuHHjsG/fPhQVFaGoqAh79+7F+PHjMWTIEEP3kYiIiEhvel2NNWfOHFy7dg1du3aFhcX9TRQXF2PYsGFcs0NEREQmRa+wY2VlhU2bNmHOnDk4efIkbG1t4ePjg9q1axu6f0RERETPRK+wU6JevXqoV6+eofpCREREZHB6hZ2ioiJER0cjLi4OmZmZKC4u1qnfu3evQTpHRERE9Kz0Cjvjx49HdHQ0AgIC0KhRIygUCkP3i4iIiMgg9Ao7GzduxHfffYeePXsauj9EREREBqXXpedWVlZ45ZVXDN0XIiIiIoPTK+x8+OGH+Oyzz6DHA9OJiIiIKpVep7H++OMP7Nu3Dzt37kTDhg1haWmpU79lyxaDdI7I0JbFXixVNvE1XlFIRCRneoUdR0dH9OvXz9B9IXokhhQiItKXXmEnKirK0P0gemoPByCGHyIiKotea3YAoLCwEHv27MGaNWtw584dAEBqaipycnIM1jkiIiKiZ6XXzM7169fRvXt3pKSkIC8vD6+99hqqVKmCTz/9FHl5eYiIiDB0P4mIiIj0otfMzvjx4+Hr64vbt2/D1tZWKu/Xrx/i4uIM1jkiIiKiZ6XXzM7vv/+OQ4cOwcrKSqfcw8MD//3vfw3SMXpxcPExERFVJL1mdoqLi1FUVFSq/K+//kKVKlWeuVNEREREhqLXzE63bt2wfPlyfPnllwAAhUKBnJwcfPLJJ3yEBJmUsmaNiIjoxaJX2FmyZAn8/f3h7e2N3NxcvPnmm7h06RKqVauGDRs2GLqPRERERHrTK+zUrFkTJ0+exMaNG3Hq1Cnk5ORg1KhRCAwM1FmwTERERGRseoUdALCwsMBbb71lyL4QSXj6iYiIDEWvsLNu3brH1g8bNkyvzhAREREZml5hZ/z48TrvCwoKcO/ePVhZWcHOzo5hh4yCs0FERFQWvS49v337ts4rJycHSUlJaNeuHRcoExERkUnR+9lYD6tbty4WLFhQataHiIiIyJgMFnaA+4uWU1NTDblJIiIiomei15qdn3/+Wee9EAJpaWn4/PPP0bZtW4N0jIiIiMgQ9JrZ6du3r86rf//+mDlzJho3boyvv/5ar44sWLAACoUCEyZMkMpyc3MRHBwMZ2dnODg4YMCAAcjIyND5XEpKCgICAmBnZwcXFxeEhoaisLBQrz4QERGR/Og1s1NcXGzQThw9ehRr1qxB48aNdconTpyI7du3Y/PmzVCpVAgJCUH//v1x8OBBAEBRURECAgLg6uqKQ4cOIS0tDcOGDYOlpSXmz59v0D4SERHJ2r7w0mWdwyq/HxXAoGt29JGTk4PAwEB89dVXqFq1qlSenZ2NyMhILF26FF26dEHz5s0RFRWFQ4cO4fDhwwCA3bt349y5c/j222/RtGlT9OjRA3PmzMEXX3yB/Px8Yx0SERERmRC9ZnYmTZpU7rZLly59bH1wcDACAgLg5+eHuXPnSuXHjx9HQUEB/Pz8pLIGDRqgVq1aiI+PR+vWrREfHw8fHx+o1Wqpjb+/P8aMGYOzZ8+iWbNmZe4zLy8PeXl50nutVlvu4yEiIqLni15h58SJEzhx4gQKCgpQv359AMDFixdhbm6OV199VWqnUCgeu52NGzfizz//xNGjR0vVpaenw8rKCo6OjjrlarUa6enpUpsHg05JfUndo4SHh2PWrFmP7RsRERHJg15hp3fv3qhSpQrWrl0rnXq6ffs2RowYgfbt2+PDDz984jZu3LiB8ePHIzY2FjY2Nvp0Q29hYWE6s1NarRbu7u6V2gciIiKqHHqFnSVLlmD37t06a2yqVq2KuXPnolu3buUKO8ePH0dmZqbOTFBRUREOHDiAzz//HL/++ivy8/ORlZWlM7uTkZEBV1dXAICrqyuOHDmis92Sq7VK2pTF2toa1tbW5TpWMjw+1oGIiCqTXmFHq9Xi5s2bpcpv3ryJO3fulGsbXbt2xenTp3XKRowYgQYNGmDKlClwd3eHpaUl4uLiMGDAAABAUlISUlJSoNFoAAAajQbz5s1DZmYmXFxcAACxsbFQKpXw9vbW59CIiIieXVlXNpHR6BV2+vXrhxEjRmDJkiVo2bIlACAhIQGhoaHo379/ubZRpUoVNGrUSKfM3t4ezs7OUvmoUaMwadIkODk5QalU4oMPPoBGo0Hr1q0BAN26dYO3tzfefvttLFy4EOnp6Zg2bRqCg4M5c0NEREQA9Aw7ERERmDx5Mt58800UFBTc35CFBUaNGoVFixYZrHPLli2DmZkZBgwYgLy8PPj7+2PVqlVSvbm5ObZt24YxY8ZAo9HA3t4eQUFBmD17tsH6QERERM83hRBC6Pvhu3fv4sqVKwCAOnXqwN7e3mAdq0xarRYqlQrZ2dlQKpXG7o7smdqanYmv1TN2F4hIbuRyGsvEbypY3u9vvWZ2SqSlpSEtLQ0dOnSAra0thBBPvNycyNSUFb4YgIiI5EOvOyj/888/6Nq1K+rVq4eePXsiLS0NwP01NuW5EouIiIiosugVdiZOnAhLS0ukpKTAzs5OKh88eDB27dplsM4RERERPSu9TmPt3r0bv/76K2rWrKlTXrduXVy/ft0gHSMiIiIyBL1mdu7evaszo1Pi1q1bvOSbiIiITIpeYad9+/ZYt26d9F6hUKC4uBgLFy5E586dDdY5IiIiomel12mshQsXomvXrjh27Bjy8/Px0Ucf4ezZs7h16xYOHjxo6D4SERER6U2vmZ1GjRrh4sWLaNeuHfr06YO7d++if//+OHHiBOrUqWPoPhIRERHp7alndgoKCtC9e3dERETg448/rog+ERERERnMU8/sWFpa4tSpUxXRFyIiIiKD0+s01ltvvYXIyEhD94WIiIjI4PRaoFxYWIivv/4ae/bsQfPmzUs9E2vp0qUG6RwRERHRs3qqsHP16lV4eHjgzJkzePXVVwEAFy/qPleIz8YiIiIiU/JUYadu3bpIS0vDvn37ANx/PMSKFSugVqsrpHNEREREz+qpwo4QQuf9zp07cffuXYN2iIiIiEzEvnDd953DjNOPZ6TXmp0SD4cfIrlaFnuxVNnE1+oZoSdERPS0nupqLIVCUWpNDtfoEBERkSl76tNYw4cPlx72mZubi/fff7/U1VhbtmwxXA+JiIiInsFThZ2goCCd92+99ZZBO0NkKso6bUVERM+npwo7UVFRFdUPIiIiogrxTAuUiZ6EMyRERGRsej0ugoiIiOh5wZkdIiKiZ/HwvWjI5HBmh4iIiGSNYYeIiIhkjaexiPT08OJr3lGZiMg0cWaHiIiIZI1hh4iIiGSNYYeIiIhkjWGHiIiIZI1hh4iIiGSNYYeIiIhkzahhZ/Xq1WjcuDGUSiWUSiU0Gg127twp1efm5iI4OBjOzs5wcHDAgAEDkJGRobONlJQUBAQEwM7ODi4uLggNDUVhYWFlHwoRERGZKKOGnZo1a2LBggU4fvw4jh07hi5duqBPnz44e/YsAGDixIn45ZdfsHnzZuzfvx+pqano37+/9PmioiIEBAQgPz8fhw4dwtq1axEdHY0ZM2YY65CIiIjIxCiEEMLYnXiQk5MTFi1ahIEDB6J69epYv349Bg4cCAC4cOECvLy8EB8fj9atW2Pnzp3o1asXUlNToVarAQARERGYMmUKbt68CSsrq3LtU6vVQqVSITs7G0qlssKO7UX0Ij31nDcVJHpBvUjPxuocZuwe6Cjv97fJrNkpKirCxo0bcffuXWg0Ghw/fhwFBQXw8/OT2jRo0AC1atVCfHw8ACA+Ph4+Pj5S0AEAf39/aLVaaXaIiIiIXmxGf1zE6dOnodFokJubCwcHB2zduhXe3t5ITEyElZUVHB0dddqr1Wqkp6cDANLT03WCTkl9Sd2j5OXlIS8vT3qv1WoNdDQvthdpFoeIiJ4fRg879evXR2JiIrKzs/H9998jKCgI+/fvr9B9hoeHY9asWRW6D3rxlBX2eGqLiMj4jH4ay8rKCq+88gqaN2+O8PBwNGnSBJ999hlcXV2Rn5+PrKwsnfYZGRlwdXUFALi6upa6OqvkfUmbsoSFhSE7O1t63bhxw7AHRURERCbD6GHnYcXFxcjLy0Pz5s1haWmJuLg4qS4pKQkpKSnQaDQAAI1Gg9OnTyMzM1NqExsbC6VSCW9v70fuw9raWrrcveRFRERE8mTU01hhYWHo0aMHatWqhTt37mD9+vX47bff8Ouvv0KlUmHUqFGYNGkSnJycoFQq8cEHH0Cj0aB169YAgG7dusHb2xtvv/02Fi5ciPT0dEybNg3BwcGwtrY25qERERGRiTBq2MnMzMSwYcOQlpYGlUqFxo0b49dff8Vrr70GAFi2bBnMzMwwYMAA5OXlwd/fH6tWrZI+b25ujm3btmHMmDHQaDSwt7dHUFAQZs+ebaxDIiIiIhNjcvfZMQbeZ8cweDVWaVygTPQC4H12jOa5u88OERERUUVg2CEiIiJZY9ghIiIiWWPYISIiIllj2CEiIiJZY9ghIiIiWWPYISIiIlkz+oNAiYiInisv0n11ZIIzO0RERCRrDDtEREQkaww7REREJGsMO0RERCRrDDtEREQkaww7REREJGsMO0RERCRrDDtEREQkaww7REREJGsMO0RERCRrfFwEUSVaFnuxVNnE1+oZoSdERHoo61EZncMqvx9PiTM7REREJGsMO0RERCRrPI1FVIHKOm31pDY8rUVEZFic2SEiIiJZY9ghIiIiWeNpLNJbeU7REBERGRvDDhEREenvObgcnaexiIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNaMGnbCw8PRokULVKlSBS4uLujbty+SkpJ02uTm5iI4OBjOzs5wcHDAgAEDkJGRodMmJSUFAQEBsLOzg4uLC0JDQ1FYWFiZh0JEREQmyqhhZ//+/QgODsbhw4cRGxuLgoICdOvWDXfv3pXaTJw4Eb/88gs2b96M/fv3IzU1Ff3795fqi4qKEBAQgPz8fBw6dAhr165FdHQ0ZsyYYYxDIiIiIhOjEEIIY3eixM2bN+Hi4oL9+/ejQ4cOyM7ORvXq1bF+/XoMHDgQAHDhwgV4eXkhPj4erVu3xs6dO9GrVy+kpqZCrVYDACIiIjBlyhTcvHkTVlZWT9yvVquFSqVCdnY2lEplhR6jnPBxERWDTz0nMnFl3TGYdFXSHZTL+/1tUmt2srOzAQBOTk4AgOPHj6OgoAB+fn5SmwYNGqBWrVqIj48HAMTHx8PHx0cKOgDg7+8PrVaLs2fPlrmfvLw8aLVanRcRERHJk8mEneLiYkyYMAFt27ZFo0aNAADp6emwsrKCo6OjTlu1Wo309HSpzYNBp6S+pK4s4eHhUKlU0svd3d3AR0NERESmwmTCTnBwMM6cOYONGzdW+L7CwsKQnZ0tvW7cuFHh+yQiIiLjMImnnoeEhGDbtm04cOAAatasKZW7uroiPz8fWVlZOrM7GRkZcHV1ldocOXJEZ3slV2uVtHmYtbU1rK2tDXwUREREZIqMOrMjhEBISAi2bt2KvXv3wtPTU6e+efPmsLS0RFxcnFSWlJSElJQUaDQaAIBGo8Hp06eRmZkptYmNjYVSqYS3t3flHAgRERGZLKPO7AQHB2P9+vX46aefUKVKFWmNjUqlgq2tLVQqFUaNGoVJkybByckJSqUSH3zwATQaDVq3bg0A6NatG7y9vfH2229j4cKFSE9Px7Rp0xAcHMzZG3oulXWVG6/QIjISXnklC0YNO6tXrwYAdOrUSac8KioKw4cPBwAsW7YMZmZmGDBgAPLy8uDv749Vq1ZJbc3NzbFt2zaMGTMGGo0G9vb2CAoKwuzZsyvrMIiIiMiEGTXslOcWPzY2Nvjiiy/wxRdfPLJN7dq1sWPHDkN2jYiIiGTCZK7GIiIiIqoIDDtEREQkaww7REREJGsmcZ8dIiIikpGHr2KrpGdlPQpndoiIiEjWGHaIiIhI1ngai8qlrBvdUeUpz/jzxoNERGXjzA4RERHJGsMOERERyRrDDhEREckaww4RERHJGsMOERERyRrDDhEREckaww4RERHJGsMOERERyRpvKkhl4k0Enz9l/cx4o0EiIoYdIiKi/3n4AZYkCzyNRURERLLGsENERESyxtNYRDL28DoeruEhohcRZ3aIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1rhAmegFx0XMRCR3DDtELxDeGZuIXkQMO0Skg4+dICK54ZodIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1o4adAwcOoHfv3nBzc4NCocCPP/6oUy+EwIwZM1CjRg3Y2trCz88Ply5d0mlz69YtBAYGQqlUwtHREaNGjUJOTk4lHgURERGZMqOGnbt376JJkyb44osvyqxfuHAhVqxYgYiICCQkJMDe3h7+/v7Izc2V2gQGBuLs2bOIjY3Ftm3bcODAAYwePbqyDoGIiIhMnFHvs9OjRw/06NGjzDohBJYvX45p06ahT58+AIB169ZBrVbjxx9/xJAhQ3D+/Hns2rULR48eha+vLwBg5cqV6NmzJxYvXgw3N7dKOxYiInrO7As3dg+okpjsTQWTk5ORnp4OPz8/qUylUqFVq1aIj4/HkCFDEB8fD0dHRynoAICfnx/MzMyQkJCAfv36lbntvLw85OXlSe+1Wm3FHchzgHfVJSIiOTPZBcrp6ekAALVarVOuVquluvT0dLi4uOjUW1hYwMnJSWpTlvDwcKhUKunl7u5u4N4TERGRqTDZsFORwsLCkJ2dLb1u3Lhh7C4RERFRBTHZsOPq6goAyMjI0CnPyMiQ6lxdXZGZmalTX1hYiFu3bkltymJtbQ2lUqnzIiIiInky2bDj6ekJV1dXxMXFSWVarRYJCQnQaDQAAI1Gg6ysLBw/flxqs3fvXhQXF6NVq1aV3meiF9my2Is6LyIiU2HUBco5OTm4fPmy9D45ORmJiYlwcnJCrVq1MGHCBMydOxd169aFp6cnpk+fDjc3N/Tt2xcA4OXlhe7du+Pdd99FREQECgoKEBISgiFDhvBKLCIiIgJg5LBz7NgxdO7cWXo/adIkAEBQUBCio6Px0Ucf4e7duxg9ejSysrLQrl077Nq1CzY2NtJnYmJiEBISgq5du8LMzAwDBgzAihUrKv1YiOTs4Zmaia/VM1JPiIienlHDTqdOnSCEeGS9QqHA7NmzMXv27Ee2cXJywvr16yuie0RERCQDJnufHSIyXVyTQ0TPE4adFxC/qIiI6EXCsENEFaKsUM21PkRkDCZ76TkRERGRIXBmh4hMCmeEiMjQOLNDREREssaZHSIikr994cbuARkRww4RVRqeoiIiY+BpLCIiIpI1hh0iIiKSNYYdIiIikjWu2SEio+IdvYmoojHsEJHJ41PXiehZ8DQWERERyRpndojohcVL4YleDAw7RCRLDDJEVIKnsYiIiEjWGHaIiIhI1ngai4ieOzxFRURPg2FH5ngPEyIietEx7BARPaA8s0acWSJ6vjDsENELgzOdRC8mhh0ikgUGGdKxL9zYPSATwrAjM/wHn8jwyvP3io+0IDJdDDtERJWkPKGJIYnI8Bh2iIgqAGdZiUwHbypIREREssaZHSIiE2bIy9y5roheVAw7REQmRN/TX4YKMlxXRHLEsENE9JzR5+owohcZww4RERlcRd5lmqfj6Gkx7BARvaCMPfvDx25QZZFN2Pniiy+waNEipKeno0mTJli5ciVatmxp7G5VKGP/Q0VELyaT+7eHd0umJ5BF2Nm0aRMmTZqEiIgItGrVCsuXL4e/vz+SkpLg4uJi7O4REVE5VVSQir/6T6kyzcvOFbIvMj0KIYQwdieeVatWrdCiRQt8/vnnAIDi4mK4u7vjgw8+wNSpU5/4ea1WC5VKhezsbCiVyorurl5M7n9SREQmonXKl6XKHg4y5Q07D7fTNxAZajuVue0KDYSdwwyznYeU9/v7uZ/Zyc/Px/HjxxEW9r+BNDMzg5+fH+Lj443Ys/LjeWsiIsMq64u7otqUJxCUZzv6Kk9I0Xf/+h5/qUDWWa/dG8xzH3b+/vtvFBUVQa1W65Sr1WpcuHChzM/k5eUhLy9Pep+dnQ3gfkI0tC/2Xi5VFtzlFZ33uXdzSrV5uC9ltSEikrsWf0U9sc3dSujH4+w5m2rkHpRWmX0qz74q4vv1we0+6STVcx929BEeHo5Zs2aVKnd3d6+U/f+fgdoQERE9Fz74vEI3f+fOHahUqkfWP/dhp1q1ajA3N0dGRoZOeUZGBlxdXcv8TFhYGCZNmiS9Ly4uxq1bt+Ds7AyFQlGh/X0WWq0W7u7uuHHjhsmuLXoecVwrBse1YnBcKwbHtWJU9LgKIXDnzh24ubk9tt1zH3asrKzQvHlzxMXFoW/fvgDuh5e4uDiEhISU+Rlra2tYW1vrlDk6OlZwTw1HqVTyL2MF4LhWDI5rxeC4VgyOa8WoyHF93IxOiec+7ADApEmTEBQUBF9fX7Rs2RLLly/H3bt3MWLECGN3jYiIiIxMFmFn8ODBuHnzJmbMmIH09HQ0bdoUu3btKrVomYiIiF48sgg7ABASEvLI01ZyYW1tjU8++aTUKTh6NhzXisFxrRgc14rBca0YpjKusripIBEREdGjmBm7A0REREQViWGHiIiIZI1hh4iIiGSNYYeIiIhkjWHHyMLDw9GiRQtUqVIFLi4u6Nu3L5KSknTa5ObmIjg4GM7OznBwcMCAAQNK3TE6JSUFAQEBsLOzg4uLC0JDQ1FYWFiZh2KyFixYAIVCgQkTJkhlHFP9/fe//8Vbb70FZ2dn2NrawsfHB8eOHZPqhRCYMWMGatSoAVtbW/j5+eHSpUs627h16xYCAwOhVCrh6OiIUaNGISfnxX3+W1FREaZPnw5PT0/Y2tqiTp06mDNnjs7zfjiuT3bgwAH07t0bbm5uUCgU+PHHH3XqDTWGp06dQvv27WFjYwN3d3csXLiwog/NqB43rgUFBZgyZQp8fHxgb28PNzc3DBs2DKmpus/LMvq4CjIqf39/ERUVJc6cOSMSExNFz549Ra1atUROTo7U5v333xfu7u4iLi5OHDt2TLRu3Vq0adNGqi8sLBSNGjUSfn5+4sSJE2LHjh2iWrVqIiwszBiHZFKOHDkiPDw8ROPGjcX48eOlco6pfm7duiVq164thg8fLhISEsTVq1fFr7/+Ki5fviy1WbBggVCpVOLHH38UJ0+eFK+//rrw9PQU//77r9Sme/fuokmTJuLw4cPi999/F6+88ooYOnSoMQ7JJMybN084OzuLbdu2ieTkZLF582bh4OAgPvvsM6kNx/XJduzYIT7++GOxZcsWAUBs3bpVp94QY5idnS3UarUIDAwUZ86cERs2bBC2trZizZo1lXWYle5x45qVlSX8/PzEpk2bxIULF0R8fLxo2bKlaN68uc42jD2uDDsmJjMzUwAQ+/fvF0Lc/0WytLQUmzdvltqcP39eABDx8fFCiPu/iGZmZiI9PV1qs3r1aqFUKkVeXl7lHoAJuXPnjqhbt66IjY0VHTt2lMIOx1R/U6ZMEe3atXtkfXFxsXB1dRWLFi2SyrKysoS1tbXYsGGDEEKIc+fOCQDi6NGjUpudO3cKhUIh/vvf/1Zc501YQECAGDlypE5Z//79RWBgoBCC46qPh7+UDTWGq1atElWrVtX5d2DKlCmifv36FXxEpqGsEPmwI0eOCADi+vXrQgjTGFeexjIx2dnZAAAnJycAwPHjx1FQUAA/Pz+pTYMGDVCrVi3Ex8cDAOLj4+Hj46Nzx2h/f39otVqcPXu2EntvWoKDgxEQEKAzdgDH9Fn8/PPP8PX1xRtvvAEXFxc0a9YMX331lVSfnJyM9PR0nbFVqVRo1aqVztg6OjrC19dXauPn5wczMzMkJCRU3sGYkDZt2iAuLg4XL14EAJw8eRJ//PEHevToAYDjagiGGsP4+Hh06NABVlZWUht/f38kJSXh9u3blXQ0pi07OxsKhUJ65qQpjKts7qAsB8XFxZgwYQLatm2LRo0aAQDS09NhZWVV6kGlarUa6enpUpuHH41R8r6kzYtm48aN+PPPP3H06NFSdRxT/V29ehWrV6/GpEmT8H//9384evQoxo0bBysrKwQFBUljU9bYPTi2Li4uOvUWFhZwcnJ6Ycd26tSp0Gq1aNCgAczNzVFUVIR58+YhMDAQADiuBmCoMUxPT4enp2epbZTUVa1atUL6/7zIzc3FlClTMHToUOnBn6Ywrgw7JiQ4OBhnzpzBH3/8YeyuPNdu3LiB8ePHIzY2FjY2NsbujqwUFxfD19cX8+fPBwA0a9YMZ86cQUREBIKCgozcu+fXd999h5iYGKxfvx4NGzZEYmIiJkyYADc3N44rPTcKCgowaNAgCCGwevVqY3dHB09jmYiQkBBs27YN+/btQ82aNaVyV1dX5OfnIysrS6d9RkYGXF1dpTYPX0lU8r6kzYvk+PHjyMzMxKuvvgoLCwtYWFhg//79WLFiBSwsLKBWqzmmeqpRowa8vb11yry8vJCSkgLgf2NT1tg9OLaZmZk69YWFhbh169YLO7ahoaGYOnUqhgwZAh8fH7z99tuYOHEiwsPDAXBcDcFQY8h/G8pWEnSuX7+O2NhYaVYHMI1xZdgxMiEEQkJCsHXrVuzdu7fUNF7z5s1haWmJuLg4qSwpKQkpKSnQaDQAAI1Gg9OnT+v8MpX8sj38xfQi6Nq1K06fPo3ExETp5evri8DAQOnPHFP9tG3bttStES5evIjatWsDADw9PeHq6qoztlqtFgkJCTpjm5WVhePHj0tt9u7di+LiYrRq1aoSjsL03Lt3D2Zmuv8cm5ubo7i4GADH1RAMNYYajQYHDhxAQUGB1CY2Nhb169d/YU9hlQSdS5cuYc+ePXB2dtapN4lxNcgyZ9LbmDFjhEqlEr/99ptIS0uTXvfu3ZPavP/++6JWrVpi79694tixY0Kj0QiNRiPVl1wm3a1bN5GYmCh27dolqlev/sJfJv2gB6/GEoJjqq8jR44ICwsLMW/ePHHp0iURExMj7OzsxLfffiu1WbBggXB0dBQ//fSTOHXqlOjTp0+Zl/c2a9ZMJCQkiD/++EPUrVv3hbpE+mFBQUHipZdeki4937Jli6hWrZr46KOPpDYc1ye7c+eOOHHihDhx4oQAIJYuXSpOnDghXRVkiDHMysoSarVavP322+LMmTNi48aNws7OTtaXnj9uXPPz88Xrr78uatasKRITE3W+xx68ssrY48qwY2QAynxFRUVJbf79918xduxYUbVqVWFnZyf69esn0tLSdLZz7do10aNHD2FrayuqVasmPvzwQ1FQUFDJR2O6Hg47HFP9/fLLL6JRo0bC2tpaNGjQQHz55Zc69cXFxWL69OlCrVYLa2tr0bVrV5GUlKTT5p9//hFDhw4VDg4OQqlUihEjRog7d+5U5mGYFK1WK8aPHy9q1aolbGxsxMsvvyw+/vhjnS8LjuuT7du3r8x/T4OCgoQQhhvDkydPinbt2glra2vx0ksviQULFlTWIRrF48Y1OTn5kd9j+/btk7Zh7HFVCPHALTqJiIiIZIZrdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaISLY8PDywfPlyY3eDiIyMYYeIHis+Ph7m5uYICAgwdleIiPTCsENEjxUZGYkPPvgABw4cQGpqqrG7I2sPPgSRiAyHYYeIHiknJwebNm3CmDFjEBAQgOjoaJ363377DQqFAnFxcfD19YWdnR3atGlT6snoq1evRp06dWBlZYX69evjm2++0alXKBRYs2YNevXqBTs7O3h5eSE+Ph6XL19Gp06dYG9vjzZt2uDKlSvSZ65cuYI+ffpArVbDwcEBLVq0wJ49ex55LCNHjkSvXr10ygoKCuDi4oLIyMgyP3P9+nX07t0bVatWhb29PRo2bIgdO3ZI9WfPnkWvXr2gVCpRpUoVtG/fXupjcXExZs+ejZo1a8La2hpNmzbFrl27pM9eu3YNCoUCmzZtQseOHWFjY4OYmBgAwH/+8x94eXnBxsYGDRo0wKpVqx55XERUDgZ7yhYRyU5kZKTw9fUVQtx/AGidOnVEcXGxVF/ygMBWrVqJ3377TZw9e1a0b99etGnTRmqzZcsWYWlpKb744guRlJQklixZIszNzcXevXulNgDESy+9JDZt2iSSkpJE3759hYeHh+jSpYvYtWuXOHfunGjdurXo3r279JnExEQREREhTp8+LS5evCimTZsmbGxspCdcCyFE7dq1xbJly4QQQhw8eFCYm5uL1NRUnb7Z29s/8mGZAQEB4rXXXhOnTp0SV65cEb/88ovYv3+/EEKIv/76Szg5OYn+/fuLo0ePiqSkJPH111+LCxcuCCGEWLp0qVAqlWLDhg3iwoUL4qOPPhKWlpbi4sWLQgghPUDRw8ND/PDDD+Lq1asiNTVVfPvtt6JGjRpS2Q8//CCcnJxEdHS0Xj9DIuJTz4noMdq0aSOWL18uhBCioKBAVKtWTedJxiVhZ8+ePVLZ9u3bBQDx77//Stt49913dbb7xhtviJ49e0rvAYhp06ZJ7+Pj4wUAERkZKZVt2LBB2NjYPLa/DRs2FCtXrpTePxh2hBDC29tbfPrpp9L73r17i+HDhz9yez4+PmLmzJll1oWFhQlPT0+Rn59fZr2bm5uYN2+eTlmLFi3E2LFjhRD/Czsl41uiTp06Yv369Tplc+bMERqN5pH9JKLH42ksIipTUlISjhw5gqFDhwIALCwsMHjw4DJP+TRu3Fj6c40aNQAAmZmZAIDz58+jbdu2Ou3btm2L8+fPP3IbarUaAODj46NTlpubC61WC+D+KbbJkyfDy8sLjo6OcHBwwPnz55GSkvLIY3rnnXcQFRUFAMjIyMDOnTsxcuTIR7YfN24c5s6di7Zt2+KTTz7BqVOnpLrExES0b98elpaWpT6n1WqRmpparuP29fWV/nz37l1cuXIFo0aNgoODg/SaO3euzik8Ino6FsbuABGZpsjISBQWFsLNzU0qE0LA2toan3/+OVQqlVT+4Be+QqEAcH/NytMoaxuP2+7kyZMRGxuLxYsX45VXXoGtrS0GDhyI/Pz8R+5j2LBhmDp1KuLj43Ho0CF4enqiffv2j2z/zjvvwN/fH9u3b8fu3bsRHh6OJUuW4IMPPoCtre1THd+j2NvbS3/OyckBAHz11Vdo1aqVTjtzc3OD7I/oRcSZHSIqpbCwEOvWrcOSJUuQmJgovU6ePAk3Nzds2LCh3Nvy8vLCwYMHdcoOHjwIb2/vZ+rjwYMHMXz4cPTr1w8+Pj5wdXXFtWvXHvsZZ2dn9O3bF1FRUYiOjsaIESOeuB93d3e8//772LJlCz788EN89dVXAO7PRP3+++9lXkGlVCrh5ub21MetVqvh5uaGq1ev4pVXXtF5eXp6PrGvRFQ2zuwQUSnbtm3D7du3MWrUKJ0ZHAAYMGAAIiMj8f7775drW6GhoRg0aBCaNWsGPz8//PLLL9iyZctjr5wqj7p162LLli3o3bs3FAoFpk+fXq7ZpHfeeQe9evVCUVERgoKCHtt2woQJ6NGjB+rVq4fbt29j37598PLyAgCEhIRg5cqVGDJkCMLCwqBSqXD48GG0bNkS9evXR2hoKD755BPUqVMHTZs2RVRUFBITE6Urrh5l1qxZGDduHFQqFbp37468vDwcO3YMt2/fxqRJk8o/QEQkYdgholIiIyPh5+dXKugA98POwoULddavPE7fvn3x2WefYfHixRg/fjw8PT0RFRWFTp06PVMfly5dipEjR6JNmzaoVq0apkyZIq3neRw/Pz/UqFEDDRs21DlFV5aioiIEBwfjr7/+glKpRPfu3bFs2TIA92eJ9u7di9DQUHTs2BHm5uZo2rSptE5n3LhxyM7OxocffojMzEx4e3vj559/Rt26dR+7z3feeQd2dnZYtGgRQkNDYW9vDx8fH0yYMKF8A0NEpSiEEMLYnSAiqiw5OTl46aWXEBUVhf79+xu7O0RUCTizQ0QvhOLiYvz9999YsmQJHB0d8frrrxu7S0RUSRh2iOiFkJKSAk9PT9SsWRPR0dGwsOA/f0QvCp7GIiIiIlnjpedEREQkaww7REREJGsMO0RERCRrDDtEREQkaww7REREJGsMO0RERCRrDDtEREQkaww7REREJGsMO0RERCRr/w+QjF6nEbzVpAAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "scripts.eval_detector(\n", + " scripts.EvalDetectorConfig(\n", + " detector=detector,\n", + " task=tasks.Task.from_separate_data(\n", + " model=model,\n", + " # TODO: this won't actually be used, plausibly Tasks should be split better\n", + " # into their training and test data.\n", + " trusted_data=train_data,\n", + " # Our anomalous data is the backdoor data from above, except we use the\n", + " # MNIST test split.\n", + " anomalous_test_data=data.BackdoorDataset(\n", + " original=val_data,\n", + " backdoor=data.CornerPixelBackdoor(),\n", + " ),\n", + " # Our normal data is MNIST with added noise, this makes the images OOD\n", + " # but they shouldn't be mechanistically anomalous.\n", + " clean_test_data=data.TransformDataset(val_data, data.GaussianNoise(0.3)),\n", + " ),\n", + " )\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As we can see, adding noise did make the images quite a bit more \"anomalous\" according to our detector (the blue histogram has shifted to the right to higher anomaly scores). But we still have a very clear separation between these \"merely noisy\" inputs and the backdoored inputs. (This is a very easy to detect backdoor.)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "cupbearer", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.9" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/src/cupbearer/data/__init__.py b/src/cupbearer/data/__init__.py index 479558ef..706d0ae8 100644 --- a/src/cupbearer/data/__init__.py +++ b/src/cupbearer/data/__init__.py @@ -4,14 +4,19 @@ MixedData, MixedDataConfig, SubsetConfig, - TrainDataFromRun, + TransformDataset, split_dataset_cfg, ) -from .adversarial import AdversarialExampleConfig -from .backdoor_data import BackdoorData -from .backdoors import Backdoor, CornerPixelBackdoor, NoiseBackdoor, WanetBackdoor -from .pytorch import CIFAR10, GTSRB, MNIST, PytorchConfig -from .toy_ambiguous_features import ToyFeaturesConfig +from .adversarial import AdversarialExampleDataset, make_adversarial_examples +from .backdoors import ( + Backdoor, + BackdoorDataset, + CornerPixelBackdoor, + NoiseBackdoor, + WanetBackdoor, +) +from .pytorch import CIFAR10, GTSRB, MNIST, PytorchDataset +from .toy_ambiguous_features import ToyDataset from .transforms import ( GaussianNoise, RandomCrop, diff --git a/src/cupbearer/data/_shared.py b/src/cupbearer/data/_shared.py index 911a77c6..fedbdce4 100644 --- a/src/cupbearer/data/_shared.py +++ b/src/cupbearer/data/_shared.py @@ -1,13 +1,11 @@ from abc import ABC, abstractproperty from dataclasses import dataclass, field -from pathlib import Path from typing import Optional from torch.utils.data import Dataset, Subset from torchvision.transforms import Compose from cupbearer.data.transforms import Transform -from cupbearer.utils.scripts import load_config from cupbearer.utils.utils import BaseConfig @@ -101,6 +99,27 @@ def get_test_split(self) -> "DatasetConfig": # to the full dataset on build. +# def split_dataset(dataset: Dataset, *fractions: float) -> list[Subset]: +# if not fractions: +# raise ValueError("At least one fraction must be provided.") +# if not all(0 <= f <= 1 for f in fractions): +# raise ValueError("Fractions must be between 0 and 1.") +# if not sum(fractions) == 1: +# fractions = fractions + (1 - sum(fractions),) + +# total = len(dataset) + +# markers = [int(total * fraction) for fraction in fractions] + +# subsets = [] +# current_start = 0 +# for marker in markers: +# subsets.append(Subset(dataset, range(current_start, current_start + marker))) +# current_start += marker +# assert current_start == total +# return subsets + + def split_dataset_cfg(cfg: DatasetConfig, *fractions: float) -> list[SubsetConfig]: if not fractions: raise ValueError("At least one fraction must be provided.") @@ -133,57 +152,29 @@ def __getitem__(self, index): return self.transform(sample) -@dataclass -class TrainDataFromRun(DatasetConfig): - path: Path - - def get_test_split(self) -> DatasetConfig: - return self.cfg.get_test_split() - - def __post_init__(self): - self._cfg = None - - @property - def cfg(self): - if self._cfg is None: - # It's important we cache this, not mainly for performance reasons, - # but because otherwise we'd get different instances every time. - # Mostly that would be fine, but e.g. the Wanet backdoor transform - # actually has state not captured by its fields - # (it's not a "real" dataclass) - self._cfg = load_config(self.path, "train_data", DatasetConfig) - - return self._cfg - - @property - def num_classes(self): - return self.cfg.num_classes - - def _build(self) -> Dataset: - return self.cfg._build() - - def get_transforms(self) -> list[Transform]: - transforms = self.cfg.get_transforms() + super().get_transforms() - return transforms - - class MixedData(Dataset): def __init__( self, normal: Dataset, anomalous: Dataset, - normal_weight: float = 0.5, + normal_weight: Optional[float] = 0.5, return_anomaly_labels: bool = True, ): self.normal_data = normal self.anomalous_data = anomalous self.normal_weight = normal_weight self.return_anomaly_labels = return_anomaly_labels - self._length = min( - int(len(normal) / normal_weight), int(len(anomalous) / (1 - normal_weight)) - ) - self.normal_len = int(self._length * normal_weight) - self.anomalous_len = self._length - self.normal_len + if normal_weight is None: + self.normal_len = len(normal) + self.anomalous_len = len(anomalous) + self._length = self.normal_len + self.anomalous_len + else: + self._length = min( + int(len(normal) / normal_weight), + int(len(anomalous) / (1 - normal_weight)), + ) + self.normal_len = int(self._length * normal_weight) + self.anomalous_len = self._length - self.normal_len def __len__(self): return self._length diff --git a/src/cupbearer/data/adversarial.py b/src/cupbearer/data/adversarial.py index efad5001..7847c1c4 100644 --- a/src/cupbearer/data/adversarial.py +++ b/src/cupbearer/data/adversarial.py @@ -1,5 +1,4 @@ import os -from dataclasses import dataclass from pathlib import Path from typing import Optional @@ -9,41 +8,62 @@ from matplotlib import pyplot as plt from torch.utils.data import DataLoader, Dataset, Subset -from cupbearer.models import StoredModel from cupbearer.utils import utils -from . import DatasetConfig, TrainDataFromRun +class AdversarialExampleDataset(Dataset): + def __init__(self, advexes: torch.Tensor, labels: torch.Tensor): + self.advexes = advexes + self.labels = labels + + @classmethod + def from_file(cls, filepath: Path, num_examples=None): + data = utils.load(filepath) + assert isinstance(data, dict) + advexes = data["adv_inputs"] + labels = data["labels"] + + if num_examples is None: + num_examples = len(advexes) + if len(advexes) < num_examples: + raise ValueError( + f"Only {len(advexes)} adversarial examples exist, " + f"but {num_examples} were requested" + ) + + return cls(advexes[:num_examples], labels[:num_examples]) + + def __len__(self): + return len(self.advexes) -def make_adversarial_example( - path: Path, - filename: str, + def __getitem__(self, idx): + # Labels are the original ones. We need to return them mainly for implementation + # reasons: for eval, normal and anomalous data will be batched together, so + # since the normal data includes labels, the anomalous one needs to as well. + # TODO: Probably detectors should just never have access to labels during evals + # (none of the current ones make use of them anyway). If a detector needs them, + # it should use the model-generated labels, not ground truth ones. + return self.advexes[idx], int(self.labels[idx]) + + +def make_adversarial_examples( + model: torch.nn.Module, + dataset: Dataset, + save_path: Path | str, batch_size: int = 128, eps: float = 8 / 255, max_examples: Optional[int] = None, success_threshold: float = 0.1, steps: int = 40, - use_test_data: bool = False, -): - save_path = path / f"{filename}.pt" +) -> AdversarialExampleDataset: + save_path = Path(save_path).with_suffix(".pt") if os.path.exists(save_path): logger.info("Adversarial examples already exist, skipping attack") - return - else: - logger.info( - "Adversarial examples not found, running attack with default settings" - ) - - model_cfg = StoredModel(path=path) - data_cfg = TrainDataFromRun(path=path) - if use_test_data: - data_cfg = data_cfg.get_test_split() + return AdversarialExampleDataset.from_file(save_path, num_examples=max_examples) - dataset = data_cfg.build() if max_examples: dataset = Subset(dataset, range(max_examples)) - image, _ = dataset[0] - model = model_cfg.build_model(input_shape=image.shape) + dataloader = DataLoader( dataset, batch_size=batch_size, @@ -57,6 +77,8 @@ def make_adversarial_example( # N.B. rob_acc is in percent while success_threshold is not if rob_acc > 100 * success_threshold: + # Make sure we delete the unsuccessful data so we don't load it later + save_path.unlink() raise RuntimeError( "Attack failed, new accuracy is" f" {rob_acc}% > {100 * success_threshold}%." @@ -74,67 +96,6 @@ def make_adversarial_example( except IndexError: pass plt.tight_layout() - plt.savefig(path / "adv_examples.pdf") - - -@dataclass -class AdversarialExampleConfig(DatasetConfig): - path: Path - attack_batch_size: int = 128 - success_threshold: float = 0.1 - steps: int = 40 - eps: float = 8 / 255 - use_test_data: bool = False - - def _build(self) -> Dataset: - filename = f"adv_examples_{'test' if self.use_test_data else 'train'}" - make_adversarial_example( - path=self.path, - filename=filename, - batch_size=self.attack_batch_size, - eps=self.eps, - max_examples=self.max_size, - success_threshold=self.success_threshold, - steps=self.steps, - use_test_data=self.use_test_data, - ) - - return AdversarialExampleDataset( - filepath=self.path / filename, num_examples=self.max_size - ) - - @property - def num_classes(self): - data_cfg = TrainDataFromRun(path=self.path) - return data_cfg.num_classes - - -class AdversarialExampleDataset(Dataset): - def __init__(self, filepath: Path, num_examples=None): - data = utils.load(filepath) - assert isinstance(data, dict) - self.examples = data["adv_inputs"] - self.labels = data["labels"] + plt.savefig(save_path.with_suffix(".pdf")) - if num_examples is None: - num_examples = len(self.examples) - self.num_examples = num_examples - if len(self.examples) < num_examples: - raise ValueError( - f"Only {len(self.examples)} adversarial examples exist, " - f"but {num_examples} were requested" - ) - - def __len__(self): - return self.num_examples - - def __getitem__(self, idx): - if idx >= self.num_examples: - raise IndexError(f"Index {idx} is out of range") - # Labels are the original ones. We need to return them mainly for implementation - # reasons: for eval, normal and anomalous data will be batched together, so - # since the normal data includes labels, the anomalous one needs to as well. - # TODO: Probably detectors should just never have access to labels during evals - # (none of the current ones make use of them anyway). If a detector needs them, - # it should use the model-generated labels, not ground truth ones. - return self.examples[idx], int(self.labels[idx]) + return AdversarialExampleDataset.from_file(save_path) diff --git a/src/cupbearer/data/backdoor_data.py b/src/cupbearer/data/backdoor_data.py deleted file mode 100644 index 44e79c4b..00000000 --- a/src/cupbearer/data/backdoor_data.py +++ /dev/null @@ -1,35 +0,0 @@ -# This needs to be in a separate file from backdoors.py because of circularity issues -# with the config groups. See __init__.py. -from dataclasses import dataclass - -from cupbearer.data import DatasetConfig -from cupbearer.data.backdoors import Backdoor -from cupbearer.data.transforms import Transform - - -@dataclass -class BackdoorData(DatasetConfig): - original: DatasetConfig - backdoor: Backdoor - - def get_test_split(self) -> DatasetConfig: - return BackdoorData( - original=self.original.get_test_split(), backdoor=self.backdoor - ) - - @property - def num_classes(self): - return self.original.num_classes - - def get_transforms(self) -> list[Transform]: - # We can't set this in __post_init__, since then the backdoor would be part of - # transforms in the config that's stored to disk. If we then load this config, - # another backdoor would be added to the transforms. - transforms = [] - transforms += self.original.get_transforms() - transforms += super().get_transforms() - transforms += [self.backdoor] - return transforms - - def _build(self): - return self.original._build() diff --git a/src/cupbearer/data/backdoors.py b/src/cupbearer/data/backdoors.py index 417e3488..04bb161a 100644 --- a/src/cupbearer/data/backdoors.py +++ b/src/cupbearer/data/backdoors.py @@ -6,8 +6,9 @@ import torch import torch.nn.functional as F from loguru import logger +from torch.utils.data import Dataset -from ._shared import Transform +from ._shared import Transform, TransformDataset @dataclass @@ -34,6 +35,15 @@ def __call__(self, sample: Tuple[torch.Tensor, int]) -> Tuple[torch.Tensor, int] return self.inject_backdoor(img), self.target_class +class BackdoorDataset(TransformDataset): + """Just a wrapper around TransformDataset with aliases and more specific types.""" + + def __init__(self, original: Dataset, backdoor: Backdoor): + super().__init__(dataset=original, transform=backdoor) + self.original = original + self.backdoor = backdoor + + @dataclass class CornerPixelBackdoor(Backdoor): """Adds a white/red pixel to the specified corner of the image and sets the target. diff --git a/src/cupbearer/data/pytorch.py b/src/cupbearer/data/pytorch.py index 804ce80e..43366d17 100644 --- a/src/cupbearer/data/pytorch.py +++ b/src/cupbearer/data/pytorch.py @@ -1,11 +1,9 @@ -import dataclasses from dataclasses import dataclass from torch.utils.data import Dataset from cupbearer.utils.utils import get_object, mutable_field -from . import DatasetConfig from .transforms import ( RandomCrop, RandomHorizontalFlip, @@ -17,29 +15,28 @@ @dataclass(kw_only=True) -class PytorchConfig(DatasetConfig): +class PytorchDataset(Dataset): name: str - # This is an abstractproperty on the parent class, but it's a bit more - # convenient to just make it a field here. - num_classes: int train: bool = True - transforms: dict[str, Transform] = mutable_field({"to_tensor": ToTensor()}) + transforms: list[Transform] = mutable_field([ToTensor()]) default_augmentations: bool = True - def get_test_split(self) -> DatasetConfig: - if self.train: - # TODO: this will keep the augmentations around, - # which we probably don't want? - return dataclasses.replace(self, train=False) - else: - raise ValueError("This dataset is already a test split.") - def __post_init__(self): - super().__post_init__() if self.default_augmentations and self.train: # Defaults from WaNet https://openreview.net/pdf?id=eEn8KTtJOx - self.transforms["random_crop"] = RandomCrop(p=0.8, padding=5) - self.transforms["random_rotation"] = RandomRotation(p=0.5, degrees=10) + self.transforms.append(RandomCrop(p=0.8, padding=5)) + self.transforms.append(RandomRotation(p=0.5, degrees=10)) + + self._dataset = self._build() + + def __len__(self): + return len(self._dataset) + + def __getitem__(self, index): + sample = self._dataset[index] + for transform in self.transforms: + sample = transform(sample) + return sample @property def _dataset_kws(self): @@ -57,31 +54,31 @@ def _build(self) -> Dataset: @dataclass -class MNIST(PytorchConfig): +class MNIST(PytorchDataset): name: str = "torchvision.datasets.MNIST" num_classes: int = 10 @dataclass -class CIFAR10(PytorchConfig): +class CIFAR10(PytorchDataset): name: str = "torchvision.datasets.CIFAR10" num_classes: int = 10 def __post_init__(self): super().__post_init__() if self.default_augmentations and self.train: - self.transforms["random_horizontal_flip"] = RandomHorizontalFlip(p=0.5) + self.transforms.append(RandomHorizontalFlip(p=0.5)) @dataclass -class GTSRB(PytorchConfig): +class GTSRB(PytorchDataset): name: str = "torchvision.datasets.GTSRB" num_classes: int = 43 - transforms: dict[str, Transform] = mutable_field( - { - "resize": Resize(size=(32, 32)), - "to_tensor": ToTensor(), - } + transforms: list[Transform] = mutable_field( + [ + Resize(size=(32, 32)), + ToTensor(), + ] ) @property diff --git a/src/cupbearer/data/toy_ambiguous_features.py b/src/cupbearer/data/toy_ambiguous_features.py index dbbd176d..bd3c334c 100644 --- a/src/cupbearer/data/toy_ambiguous_features.py +++ b/src/cupbearer/data/toy_ambiguous_features.py @@ -1,21 +1,6 @@ -from dataclasses import dataclass - import numpy as np from torch.utils.data import Dataset -from ._shared import DatasetConfig - - -@dataclass -class ToyFeaturesConfig(DatasetConfig): - correlated: bool = True - size: int = 1000 - noise: float = 0.1 - num_classes: int = 2 - - def _build(self): - return ToyDataset(self.size, self.correlated, self.noise) - class ToyDataset(Dataset): def __init__(self, size: int, correlated: bool, noise: float): diff --git a/src/cupbearer/data/transforms.py b/src/cupbearer/data/transforms.py index 92a21144..6b15cde3 100644 --- a/src/cupbearer/data/transforms.py +++ b/src/cupbearer/data/transforms.py @@ -5,11 +5,8 @@ import torch import torchvision.transforms.functional as F -from cupbearer.utils.utils import BaseConfig - -@dataclass -class Transform(BaseConfig, ABC): +class Transform(ABC): @abstractmethod def __call__(self, sample): pass @@ -23,7 +20,6 @@ def load(self, basepath): pass -@dataclass class AdaptedTransform(Transform, ABC): """Adapt a transform designed to work on inputs to work on img, label pairs.""" @@ -51,8 +47,6 @@ def __call__(self, sample): return (img, *rest) -# Needs to be a dataclass to make simple_parsing's serialization work correctly. -@dataclass class ToTensor(AdaptedTransform): def __img_call__(self, img): out = F.to_tensor(img) diff --git a/src/cupbearer/detectors/__init__.py b/src/cupbearer/detectors/__init__.py index 04ea74ab..2da3d794 100644 --- a/src/cupbearer/detectors/__init__.py +++ b/src/cupbearer/detectors/__init__.py @@ -1,13 +1,11 @@ # ruff: noqa: F401 from .abstraction import AbstractionDetectorConfig from .anomaly_detector import AnomalyDetector -from .config import DetectorConfig, StoredDetector -from .finetuning import FinetuningConfig +from .finetuning import FinetuningAnomalyDetector from .statistical import ( - DebugMahalanobisConfig, - DebugQuantumEntropyConfig, - DebugSpectralSignatureConfig, - MahalanobisConfig, - QuantumEntropyConfig, - SpectralSignatureConfig, + ActivationCovarianceTrainConfig, + MahalanobisDetector, + MahalanobisTrainConfig, + QuantumEntropyDetector, + SpectralSignatureDetector, ) diff --git a/src/cupbearer/detectors/abstraction/__init__.py b/src/cupbearer/detectors/abstraction/__init__.py index 0c8f9075..ba48e172 100644 --- a/src/cupbearer/detectors/abstraction/__init__.py +++ b/src/cupbearer/detectors/abstraction/__init__.py @@ -5,7 +5,6 @@ from cupbearer.utils.train import TrainConfig from cupbearer.utils.utils import BaseConfig -from ..config import DetectorConfig from .abstraction import ( Abstraction, AutoencoderAbstraction, @@ -45,7 +44,7 @@ def build(self, model: HookedModel) -> AutoencoderAbstraction: @dataclass -class AbstractionDetectorConfig(DetectorConfig): +class AbstractionDetectorConfig: abstraction: AbstractionConfig = field( default_factory=LocallyConsistentAbstractionConfig ) diff --git a/src/cupbearer/detectors/anomaly_detector.py b/src/cupbearer/detectors/anomaly_detector.py index 4fbb8724..75c5569e 100644 --- a/src/cupbearer/detectors/anomaly_detector.py +++ b/src/cupbearer/detectors/anomaly_detector.py @@ -21,11 +21,9 @@ class AnomalyDetector(ABC): def __init__( self, - model: HookedModel, max_batch_size: int = 4096, save_path: Optional[Path | str] = None, ): - self.model = model # For storing the original detector variables when finetuning self._original_variables = None self.max_batch_size = max_batch_size @@ -33,6 +31,16 @@ def __init__( self.trained = False + def set_model(self, model: HookedModel): + # This is separate from __init__ because we want to be able to set the model + # automatically based on the task, instead of letting the user pass it in. + # On the other hand, it's separate from train() because we might need to set + # the model even when just using the detector for inference. + # + # Subclasses can implement more complex logic here. + self.model = model + self.trained = False + @abstractmethod def train( self, @@ -42,7 +50,7 @@ def train( num_classes: int, train_config: utils.BaseConfig, ): - """Train the anomaly detector with the given datasets. + """Train the anomaly detector with the given datasets on the given model. At least one of trusted_data or untrusted_data must be provided. """ @@ -153,6 +161,8 @@ def eval( if not self.save_path: return + self.save_path.mkdir(parents=True, exist_ok=True) + # Everything from here is just saving metrics and creating figures # (which we skip if they aren't going to be saved anyway). with open(self.save_path / "eval.json", "w") as f: @@ -223,25 +233,35 @@ def load_weights(self, path: str | Path): self._set_trained_variables(utils.load(path)) +def default_activation_name_func(model): + return model.default_names + + class ActivationBasedDetector(AnomalyDetector): """AnomalyDetector using activations.""" def __init__( self, - model: HookedModel, - activation_name_func: Callable[[HookedModel], Collection[str]] | None = None, + activation_name_func: str + | Callable[[HookedModel], Collection[str]] + | None = None, max_batch_size: int = 4096, save_path: Path | str | None = None, ): - super().__init__( - model=model, max_batch_size=max_batch_size, save_path=save_path - ) + super().__init__(max_batch_size=max_batch_size, save_path=save_path) + if activation_name_func is None: + activation_name_func = default_activation_name_func + elif isinstance(activation_name_func, str): + activation_name_func = utils.get_object(activation_name_func) + + assert callable(activation_name_func) # make type checker happy - def activation_name_func(model): - return model.default_names + self.activation_name_func = activation_name_func - self.activation_names = activation_name_func(model) + def set_model(self, model: HookedModel): + super().set_model(model) + self.activation_names = self.activation_name_func(model) def get_activations(self, batch): inputs = utils.inputs_from_batch(batch) diff --git a/src/cupbearer/detectors/config.py b/src/cupbearer/detectors/config.py deleted file mode 100644 index c04f163c..00000000 --- a/src/cupbearer/detectors/config.py +++ /dev/null @@ -1,57 +0,0 @@ -from abc import ABC, abstractmethod -from collections.abc import Collection -from dataclasses import dataclass, field -from pathlib import Path -from typing import Callable, Optional - -from loguru import logger - -from cupbearer.detectors.anomaly_detector import AnomalyDetector -from cupbearer.models.models import HookedModel -from cupbearer.utils.scripts import load_config -from cupbearer.utils.train import TrainConfig -from cupbearer.utils.utils import BaseConfig, get_object - - -@dataclass(kw_only=True) -class DetectorConfig(BaseConfig, ABC): - train: TrainConfig = field(default_factory=TrainConfig) - - @abstractmethod - def build(self, model: HookedModel, save_dir: Path | None) -> AnomalyDetector: - pass - - -# TODO: this feels like unnecessary indirection, can maybe integrate this elsewhere -@dataclass(kw_only=True) -class ActivationBasedDetectorConfig(DetectorConfig): - name_func: Optional[str] = None - - def resolve_name_func(self) -> Callable[[HookedModel], Collection[str]] | None: - if isinstance(self.name_func, str): - return get_object(self.name_func) - return self.name_func - - -@dataclass(kw_only=True) -class StoredDetector(DetectorConfig): - path: Path - - def build(self, model, save_dir) -> AnomalyDetector: - detector_cfg = load_config(self.path, "detector", DetectorConfig) - if isinstance(detector_cfg, StoredDetector) and detector_cfg.path == self.path: - raise RuntimeError( - f"It looks like the detector you're trying to load from {self.path} " - "is a stored detector pointing to itself. This probably means " - "a configuration file is broken." - ) - detector = detector_cfg.build(model, save_dir) - try: - detector.load_weights(self.path / "detector") - except FileNotFoundError: - logger.warning( - f"Didn't find weights for detector from {self.path}. " - "This is normal if the detector doesn't have learned parameters." - ) - - return detector diff --git a/src/cupbearer/detectors/finetuning.py b/src/cupbearer/detectors/finetuning.py index 24ff4bbd..d0f2e014 100644 --- a/src/cupbearer/detectors/finetuning.py +++ b/src/cupbearer/detectors/finetuning.py @@ -1,20 +1,21 @@ import copy import warnings -from dataclasses import dataclass import torch import torch.nn.functional as F from cupbearer.detectors.anomaly_detector import AnomalyDetector -from cupbearer.detectors.config import DetectorConfig from cupbearer.scripts._shared import Classifier from cupbearer.utils import utils from cupbearer.utils.train import TrainConfig class FinetuningAnomalyDetector(AnomalyDetector): - def __init__(self, model, max_batch_size, save_path): - super().__init__(model, max_batch_size, save_path) + def __init__(self, max_batch_size, save_path): + super().__init__(max_batch_size, save_path) + + def set_model(self, model): + super().set_model(model) # We might as well make a copy here already, since whether we'll train this # detector or load weights for inference, we'll need to copy in both cases. self.finetuned_model = copy.deepcopy(self.model) @@ -92,13 +93,3 @@ def _get_trained_variables(self, saving: bool = False): def _set_trained_variables(self, variables): self.finetuned_model.load_state_dict(variables) - - -@dataclass -class FinetuningConfig(DetectorConfig): - def build(self, model, save_dir) -> FinetuningAnomalyDetector: - return FinetuningAnomalyDetector( - model=model, - max_batch_size=self.train.max_batch_size, - save_path=save_dir, - ) diff --git a/src/cupbearer/detectors/statistical/__init__.py b/src/cupbearer/detectors/statistical/__init__.py index 736c2b11..76bc19a2 100644 --- a/src/cupbearer/detectors/statistical/__init__.py +++ b/src/cupbearer/detectors/statistical/__init__.py @@ -1,7 +1,4 @@ -from dataclasses import dataclass, field - -from cupbearer.detectors.config import ActivationBasedDetectorConfig - +# flake8: noqa from .mahalanobis_detector import MahalanobisDetector from .que_detector import QuantumEntropyDetector from .spectral_detector import SpectralSignatureDetector @@ -11,65 +8,3 @@ DebugMahalanobisTrainConfig, MahalanobisTrainConfig, ) - - -@dataclass -class MahalanobisConfig(ActivationBasedDetectorConfig): - train: MahalanobisTrainConfig = field(default_factory=MahalanobisTrainConfig) - - def build(self, model, save_dir) -> MahalanobisDetector: - return MahalanobisDetector( - model=model, - activation_name_func=self.resolve_name_func(), - max_batch_size=self.train.max_batch_size, - save_path=save_dir, - ) - - -@dataclass -class DebugMahalanobisConfig(MahalanobisConfig): - train: MahalanobisTrainConfig = field(default_factory=DebugMahalanobisTrainConfig) - - -@dataclass -class SpectralSignatureConfig(ActivationBasedDetectorConfig): - train: ActivationCovarianceTrainConfig = field( - default_factory=ActivationCovarianceTrainConfig - ) - - def build(self, model, save_dir) -> SpectralSignatureDetector: - return SpectralSignatureDetector( - model=model, - activation_name_func=self.resolve_name_func(), - max_batch_size=self.train.max_batch_size, - save_path=save_dir, - ) - - -@dataclass -class DebugSpectralSignatureConfig(SpectralSignatureConfig): - train: ActivationCovarianceTrainConfig = field( - default_factory=DebugActivationCovarianceTrainConfig - ) - - -@dataclass -class QuantumEntropyConfig(ActivationBasedDetectorConfig): - train: ActivationCovarianceTrainConfig = field( - default_factory=ActivationCovarianceTrainConfig - ) - - def build(self, model, save_dir) -> QuantumEntropyDetector: - return QuantumEntropyDetector( - model=model, - activation_name_func=self.resolve_name_func(), - max_batch_size=self.train.max_batch_size, - save_path=save_dir, - ) - - -@dataclass -class DebugQuantumEntropyConfig(QuantumEntropyConfig): - train: ActivationCovarianceTrainConfig = field( - default_factory=DebugActivationCovarianceTrainConfig - ) diff --git a/src/cupbearer/models/__init__.py b/src/cupbearer/models/__init__.py index 1c847846..f1585f0e 100644 --- a/src/cupbearer/models/__init__.py +++ b/src/cupbearer/models/__init__.py @@ -1,91 +1,19 @@ -from abc import ABC, abstractmethod -from dataclasses import dataclass +# ruff: noqa: F401 from pathlib import Path import torch -from cupbearer.utils.scripts import load_config -from cupbearer.utils.utils import BaseConfig, mutable_field - from .hooked_model import HookedModel -from .models import CNN, MLP, PreActBlock, PreActResNet - - -@dataclass(kw_only=True) -class ModelConfig(BaseConfig, ABC): - @abstractmethod - def build_model(self, input_shape: list[int] | tuple[int]) -> HookedModel: - pass - - -@dataclass -class StoredModel(ModelConfig): - path: Path - - def build_model(self, input_shape) -> HookedModel: - model_cfg = load_config(self.path, "model", ModelConfig) - model = model_cfg.build_model(input_shape) - - # Our convention is that LightningModules store the actual pytorch model - # as a `model` attribute. We use the last checkpoint (generated via the - # save_last=True option to the ModelCheckpoint callback). - state_dict = torch.load(self.path / "checkpoints" / "last.ckpt")["state_dict"] - # We want the state_dict for the 'model' submodule, so remove - # the 'model.' prefix from the keys. - state_dict = {k[6:]: v for k, v in state_dict.items() if k.startswith("model.")} - assert isinstance(model, torch.nn.Module) - model.load_state_dict(state_dict) - return model - - -@dataclass -class MLPConfig(ModelConfig): - output_dim: int = 10 - hidden_dims: list[int] = mutable_field([256, 256]) - - def build_model(self, input_shape: list[int] | tuple[int]) -> HookedModel: - return MLP( - input_shape=input_shape, - output_dim=self.output_dim, - hidden_dims=self.hidden_dims, - ) - - -@dataclass -class DebugMLPConfig(MLPConfig): - # TODO: we need at least two layers here because abstractions currently - # only work in that case. Abstraction implementation should be fixed. - # Additionally, we make network with some width to reduce chance that all - # neurons are dead. - hidden_dims: list[int] = mutable_field([5, 5]) - - -@dataclass -class CNNConfig(ModelConfig): - output_dim: int = 10 - channels: list[int] = mutable_field([32, 64]) - dense_dims: list[int] = mutable_field([256, 256]) - - def build_model(self, input_shape: list[int] | tuple[int]) -> HookedModel: - return CNN( - input_shape=input_shape, - output_dim=self.output_dim, - channels=self.channels, - dense_dims=self.dense_dims, - ) - - -@dataclass -class DebugCNNConfig(CNNConfig): - channels: list[int] = mutable_field([2]) - dense_dims: list[int] = mutable_field([2]) - - -@dataclass -class ResnetConfig(ModelConfig): - output_dim: int = 10 - # ResNet18 default: - num_blocks: list[int] = mutable_field([2, 2, 2, 2]) - - def build_model(self, input_shape) -> HookedModel: - return PreActResNet(PreActBlock, self.num_blocks, num_classes=self.output_dim) +from .models import CNN, MLP, PreActResNet + + +def load(model: HookedModel, path: Path | str): + path = Path(path) + # Our convention is that LightningModules store the actual pytorch model + # as a `model` attribute. We use the last checkpoint (generated via the + # save_last=True option to the ModelCheckpoint callback). + state_dict = torch.load(path / "checkpoints" / "last.ckpt")["state_dict"] + # We want the state_dict for the 'model' submodule, so remove + # the 'model.' prefix from the keys. + state_dict = {k[6:]: v for k, v in state_dict.items() if k.startswith("model.")} + model.load_state_dict(state_dict) diff --git a/src/cupbearer/scripts/_shared.py b/src/cupbearer/scripts/_shared.py index a25e13d5..62729606 100644 --- a/src/cupbearer/scripts/_shared.py +++ b/src/cupbearer/scripts/_shared.py @@ -2,43 +2,29 @@ import torch from torchmetrics.classification import Accuracy -from cupbearer.models import HookedModel, ModelConfig +from cupbearer.models import HookedModel from cupbearer.utils.optimizers import OptimizerConfig class Classifier(L.LightningModule): def __init__( self, - model: ModelConfig | HookedModel, + model: HookedModel, num_classes: int, optim_cfg: OptimizerConfig, - input_shape: tuple[int, ...] | None = None, val_loader_names: list[str] | None = None, test_loader_names: list[str] | None = None, save_hparams: bool = True, ): super().__init__() - if isinstance(model, HookedModel) and save_hparams: - raise ValueError( - "Cannot save hyperparameters when model is already instantiated. " - "Either pass a ModelConfig or set save_hparams=False." - ) if save_hparams: - self.save_hyperparameters() + self.save_hyperparameters(ignore=["model"]) if val_loader_names is None: val_loader_names = [] if test_loader_names is None: test_loader_names = [] - if isinstance(model, HookedModel): - self.model = model - elif input_shape is None: - raise ValueError( - "Must provide input_shape when passing a ModelConfig " - "instead of an instantiated model." - ) - else: - self.model = model.build_model(input_shape=input_shape) + self.model = model self.optim_cfg = optim_cfg self.val_loader_names = val_loader_names self.test_loader_names = test_loader_names diff --git a/src/cupbearer/scripts/conf/eval_classifier_conf.py b/src/cupbearer/scripts/conf/eval_classifier_conf.py index fb17bbfb..10a365b1 100644 --- a/src/cupbearer/scripts/conf/eval_classifier_conf.py +++ b/src/cupbearer/scripts/conf/eval_classifier_conf.py @@ -1,13 +1,15 @@ from dataclasses import dataclass from typing import Optional -from cupbearer.data import DatasetConfig, TrainDataFromRun +from cupbearer.models import HookedModel from cupbearer.utils.scripts import ScriptConfig +from torch.utils.data import Dataset @dataclass(kw_only=True) class Config(ScriptConfig): - data: DatasetConfig | None = None + data: Dataset + model: HookedModel max_batches: Optional[int] = None max_batch_size: int = 2048 save_config: bool = False @@ -18,13 +20,6 @@ class Config(ScriptConfig): def __post_init__(self): if self.path is None: raise ValueError("Path must be set") - if self.data is None: - self.data = TrainDataFromRun(self.path) - - @property - def num_classes(self): - assert self.data is not None - return self.data.num_classes @dataclass diff --git a/src/cupbearer/scripts/conf/eval_detector_conf.py b/src/cupbearer/scripts/conf/eval_detector_conf.py index ca5f7d2c..33d6bbf8 100644 --- a/src/cupbearer/scripts/conf/eval_detector_conf.py +++ b/src/cupbearer/scripts/conf/eval_detector_conf.py @@ -1,19 +1,12 @@ from dataclasses import dataclass -from cupbearer.detectors import DetectorConfig, StoredDetector -from cupbearer.tasks import TaskConfig +from cupbearer.detectors import AnomalyDetector +from cupbearer.tasks import Task from cupbearer.utils.scripts import ScriptConfig @dataclass(kw_only=True) class Config(ScriptConfig): - task: TaskConfig - detector: DetectorConfig | None = None - save_config: bool = False + task: Task + detector: AnomalyDetector pbar: bool = False - - def __post_init__(self): - if self.detector is None: - if self.path is None: - raise ValueError("Path or detector must be set") - self.detector = StoredDetector(path=self.path) diff --git a/src/cupbearer/scripts/conf/train_classifier_conf.py b/src/cupbearer/scripts/conf/train_classifier_conf.py index b8209e61..5fcd3473 100644 --- a/src/cupbearer/scripts/conf/train_classifier_conf.py +++ b/src/cupbearer/scripts/conf/train_classifier_conf.py @@ -1,41 +1,36 @@ from dataclasses import dataclass, field -from cupbearer.data import BackdoorData, DatasetConfig, WanetBackdoor -from cupbearer.models import CNNConfig, MLPConfig, ModelConfig +from cupbearer.data import BackdoorDataset, WanetBackdoor +from cupbearer.models import HookedModel from cupbearer.utils.scripts import ScriptConfig from cupbearer.utils.train import DebugTrainConfig, TrainConfig +from torch.utils.data import Dataset @dataclass(kw_only=True) class Config(ScriptConfig): - model: ModelConfig + model: HookedModel train_config: TrainConfig = field(default_factory=TrainConfig) - train_data: DatasetConfig - val_data: dict[str, DatasetConfig] = field(default_factory=dict) + train_data: Dataset + num_classes: int + val_data: dict[str, Dataset] = field(default_factory=dict) # If True, returns the Lighting Trainer object (which has the model and a bunch # of other information, this may be useful when using interactively). # Otherwise (default), return only a dictionary of latest metrics, to avoid e.g. # submitit trying to pickle the entire Trainer object. return_trainer: bool = False - @property - def num_classes(self): - return self.train_data.num_classes - def __post_init__(self): super().__post_init__() - # HACK: Need to add new architectures here as they get implemented. - if isinstance(self.model, (MLPConfig, CNNConfig)): - self.model.output_dim = self.num_classes # For datasets that are not necessarily deterministic based only on # arguments, this is where validation sets are set to follow train_data - if isinstance(self.train_data, BackdoorData): + if isinstance(self.train_data, BackdoorDataset): for name, val_config in self.val_data.items(): # WanetBackdoor if ( isinstance(self.train_data.backdoor, WanetBackdoor) - and isinstance(val_config, BackdoorData) + and isinstance(val_config, BackdoorDataset) and isinstance(val_config.backdoor, WanetBackdoor) ): str_factor = ( diff --git a/src/cupbearer/scripts/conf/train_detector_conf.py b/src/cupbearer/scripts/conf/train_detector_conf.py index 0b51379c..84a362b0 100644 --- a/src/cupbearer/scripts/conf/train_detector_conf.py +++ b/src/cupbearer/scripts/conf/train_detector_conf.py @@ -1,11 +1,15 @@ from dataclasses import dataclass -from cupbearer.detectors import DetectorConfig -from cupbearer.tasks import TaskConfig +from cupbearer.detectors import AnomalyDetector +from cupbearer.tasks import Task from cupbearer.utils.scripts import ScriptConfig +from cupbearer.utils.train import TrainConfig +from cupbearer.utils.utils import BaseConfig, mutable_field @dataclass(kw_only=True) class Config(ScriptConfig): - task: TaskConfig - detector: DetectorConfig + task: Task + detector: AnomalyDetector + num_classes: int + train: BaseConfig = mutable_field(TrainConfig()) diff --git a/src/cupbearer/scripts/eval_classifier.py b/src/cupbearer/scripts/eval_classifier.py index 5a2b8259..021d866d 100644 --- a/src/cupbearer/scripts/eval_classifier.py +++ b/src/cupbearer/scripts/eval_classifier.py @@ -1,32 +1,34 @@ import json import lightning as L -from cupbearer.scripts._shared import Classifier -from cupbearer.utils.scripts import script from loguru import logger from torch.utils.data import DataLoader +from cupbearer.data import BackdoorDataset +from cupbearer.scripts._shared import Classifier +from cupbearer.utils.scripts import script + from .conf.eval_classifier_conf import Config @script def main(cfg: Config): - assert cfg.data is not None # make type checker happy assert cfg.path is not None # make type checker happy - for trafo in cfg.data.get_transforms(): - logger.debug(f"Loading transform: {trafo}") - trafo.load(cfg.path) + if isinstance(cfg.data, BackdoorDataset): + logger.debug(f"Loading transform: {cfg.data.backdoor}") + cfg.data.backdoor.load(cfg.path) - dataset = cfg.data.build() dataloader = DataLoader( - dataset, + cfg.data, batch_size=cfg.max_batch_size, shuffle=False, ) classifier = Classifier.load_from_checkpoint( - cfg.path / "checkpoints" / "last.ckpt", test_loader_names=["test"] + cfg.path / "checkpoints" / "last.ckpt", + model=cfg.model, + test_loader_names=["test"], ) trainer = L.Trainer( logger=False, diff --git a/src/cupbearer/scripts/eval_detector.py b/src/cupbearer/scripts/eval_detector.py index fe20b245..31217a7e 100644 --- a/src/cupbearer/scripts/eval_detector.py +++ b/src/cupbearer/scripts/eval_detector.py @@ -6,18 +6,12 @@ def main(cfg: Config): assert cfg.detector is not None # make type checker happy # Init - train_data = cfg.task.trusted_data.build() - test_data = cfg.task.test_data.build() - # train_data[0] is the first sample, which is (input, ...), so we need another [0] - example_input = train_data[0][0] - model = cfg.task.build_model(input_shape=example_input.shape) - detector = cfg.detector.build( - model=model, - save_dir=cfg.path, - ) + train_data = cfg.task.trusted_data + test_data = cfg.task.test_data + cfg.detector.set_model(cfg.task.model) # Evaluate detector - detector.eval( + cfg.detector.eval( train_dataset=train_data, test_dataset=test_data, pbar=cfg.pbar, diff --git a/src/cupbearer/scripts/train_classifier.py b/src/cupbearer/scripts/train_classifier.py index 5eb56eda..dce13a9e 100644 --- a/src/cupbearer/scripts/train_classifier.py +++ b/src/cupbearer/scripts/train_classifier.py @@ -4,6 +4,7 @@ import lightning as L from lightning.pytorch.callbacks import ModelCheckpoint +from cupbearer.data import BackdoorDataset from cupbearer.scripts._shared import Classifier from cupbearer.utils.scripts import script @@ -12,27 +13,20 @@ @script def main(cfg: Config) -> dict[str, Any] | L.Trainer: - dataset = cfg.train_data.build() - - train_loader = cfg.train_config.get_dataloader(dataset) + train_loader = cfg.train_config.get_dataloader(cfg.train_data) val_loaders = { - k: cfg.train_config.get_dataloader(v.build(), train=False) + k: cfg.train_config.get_dataloader(v, train=False) for k, v in cfg.val_data.items() } - # Store transforms to be used in training - if cfg.path: - for trafo in cfg.train_data.get_transforms(): - trafo.store(cfg.path) - - # Dataloader returns images and labels, only images get passed to model - images, _ = next(iter(train_loader)) - example_input = images[0] + # The WaNet backdoor (and maybe others in the future) has randomly generated state + # that needs to be stored if we want to load it later. + if isinstance(cfg.train_data, BackdoorDataset): + cfg.train_data.backdoor.store(cfg.path) classifier = Classifier( model=cfg.model, - input_shape=example_input.shape, num_classes=cfg.num_classes, optim_cfg=cfg.train_config.optimizer, val_loader_names=list(val_loaders.keys()), diff --git a/src/cupbearer/scripts/train_detector.py b/src/cupbearer/scripts/train_detector.py index f8641e3e..fbc4151c 100644 --- a/src/cupbearer/scripts/train_detector.py +++ b/src/cupbearer/scripts/train_detector.py @@ -6,38 +6,19 @@ @script def main(cfg: Config): - trusted_data = untrusted_data = None + cfg.detector.set_model(cfg.task.model) - if cfg.task.allow_trusted: - trusted_data = cfg.task.trusted_data.build() - if len(trusted_data) == 0: - trusted_data = None - if cfg.task.allow_untrusted: - untrusted_data = cfg.task.untrusted_data.build() - if len(untrusted_data) == 0: - untrusted_data = None - - example_data = trusted_data or untrusted_data - if example_data is None: - raise ValueError( - f"{type(cfg.task).__name__} does not allow trusted nor untrusted data." - ) - # example_data[0] is the first sample, which is (input, ...), so we need another - # [0] index - example_input = example_data[0][0] - model = cfg.task.build_model(input_shape=example_input.shape) - detector = cfg.detector.build(model=model, save_dir=cfg.path) - - detector.train( - trusted_data=trusted_data, - untrusted_data=untrusted_data, - num_classes=cfg.task.num_classes, - train_config=cfg.detector.train, + cfg.detector.train( + trusted_data=cfg.task.trusted_data, + untrusted_data=cfg.task.untrusted_train_data, + num_classes=cfg.num_classes, + train_config=cfg.train, ) - if cfg.path: - detector.save_weights(cfg.path / "detector") + path = cfg.detector.save_path + if path: + cfg.detector.save_weights(path / "detector") eval_cfg = EvalDetectorConfig( - path=cfg.path, + detector=cfg.detector, task=cfg.task, seed=cfg.seed, ) diff --git a/src/cupbearer/tasks/__init__.py b/src/cupbearer/tasks/__init__.py index 09baff94..635f0049 100644 --- a/src/cupbearer/tasks/__init__.py +++ b/src/cupbearer/tasks/__init__.py @@ -1,5 +1,4 @@ # ruff: noqa: F401 -from ._config import CustomTask, TaskConfig -from .adversarial_examples import AdversarialExampleTask -from .backdoor_detection import BackdoorDetection -from .toy_features import ToyFeaturesTask +from ._config import Task +from .adversarial_examples import adversarial_examples +from .backdoor_detection import backdoor_detection diff --git a/src/cupbearer/tasks/_config.py b/src/cupbearer/tasks/_config.py index b6309f2d..d5cc8f38 100644 --- a/src/cupbearer/tasks/_config.py +++ b/src/cupbearer/tasks/_config.py @@ -1,216 +1,95 @@ -from abc import ABC, abstractmethod -from copy import deepcopy from dataclasses import dataclass -from typing import Optional +from typing import Callable, Optional -from cupbearer.data import ( - DatasetConfig, - MixedDataConfig, - split_dataset_cfg, -) -from cupbearer.models import ModelConfig +from torch.utils.data import Dataset, random_split + +from cupbearer.data import MixedData from cupbearer.models.models import HookedModel @dataclass(kw_only=True) -class TaskConfig(ABC): - # Proportion of clean data in untrusted datasets: - clean_test_weight: float = 0.5 - clean_train_weight: float = 0.5 - # Whether to allow using trusted and untrusted data for training: - allow_trusted: bool = True - allow_untrusted: bool = True - - max_train_size: Optional[int] = None - max_test_size: Optional[int] = None - - def __post_init__(self): - # We'll only actually instantiate these when we need them, in case relevant - # attributes get changed after initialization. - - # TODO: I think this is no longer necessary after the config refactor. - self._trusted_data: Optional[DatasetConfig] = None - self._untrusted_data: Optional[DatasetConfig] = None - self._test_data: Optional[MixedDataConfig] = None - self._model: Optional[ModelConfig] = None - - def _get_trusted_data(self) -> DatasetConfig: - raise NotImplementedError - - def _get_clean_untrusted_data(self) -> DatasetConfig: - raise NotImplementedError - - def _get_anomalous_data(self) -> DatasetConfig: - raise NotImplementedError - - # The following two methods don't need to be implemented, the task will use - # get_test_split() on the untrusted data by default. - def _get_clean_test_data(self) -> DatasetConfig: - raise NotImplementedError - - def _get_anomalous_test_data(self) -> DatasetConfig: - raise NotImplementedError - - def _get_model(self) -> ModelConfig: - raise NotImplementedError - - @property - def trusted_data(self) -> DatasetConfig: - """Clean data that may be used for training.""" - if not self.allow_trusted: - raise ValueError( - "Using trusted training data is not allowed for this task." - ) - if not self._trusted_data: - self._trusted_data = deepcopy(self._get_trusted_data()) - self._trusted_data.max_size = self.max_train_size - return self._trusted_data - - @property - def untrusted_data(self) -> DatasetConfig: - """A mix of clean and anomalous data that may be used for training.""" - if not self.allow_untrusted: - raise ValueError( - "Using untrusted training data is not allowed for this task." - ) - if not self._untrusted_data: - anomalous_data = self._get_anomalous_data() - clean_data = self._get_clean_untrusted_data() - self._untrusted_data = MixedDataConfig( - normal=clean_data, +class Task: + trusted_data: Dataset + untrusted_train_data: Optional[MixedData] = None + test_data: MixedData + model: HookedModel + + @classmethod + def from_separate_data( + cls, + model: HookedModel, + trusted_data: Dataset, + clean_test_data: Dataset, + anomalous_test_data: Dataset, + clean_untrusted_data: Optional[Dataset] = None, + anomalous_data: Optional[Dataset] = None, + clean_train_weight: Optional[float] = 0.5, + clean_test_weight: Optional[float] = 0.5, + ): + untrusted_train_data = None + if clean_untrusted_data and anomalous_data: + untrusted_train_data = MixedData( + normal=clean_untrusted_data, anomalous=anomalous_data, - normal_weight=self.clean_train_weight, - max_size=self.max_train_size, + normal_weight=clean_train_weight, return_anomaly_labels=False, ) - return self._untrusted_data - - def build_model(self, input_shape: list[int] | tuple[int]) -> HookedModel: - if not self._model: - self._model = self._get_model() - return self._model.build_model(input_shape) - - @property - def test_data(self) -> MixedDataConfig: - if not self._test_data: - try: - anomalous_data = self._get_anomalous_test_data() - clean_data = self._get_clean_test_data() - except NotImplementedError: - anomalous_data = self._get_anomalous_data().get_test_split() - clean_data = self._get_clean_untrusted_data().get_test_split() - self._test_data = MixedDataConfig( - normal=clean_data, - anomalous=anomalous_data, - normal_weight=self.clean_test_weight, - max_size=self.max_test_size, - ) - return self._test_data - - @property - def num_classes(self): - try: - return self.trusted_data.num_classes - except ValueError: - return self.untrusted_data.num_classes - - -@dataclass -class FuzzedTask(TaskConfig): - """A task where the anomalous inputs are some modified version of clean ones.""" - - trusted_fraction: float = 1.0 - - def __post_init__(self): - super().__post_init__() - - # First we get the base (unmodified) data and its test split. - train_data = self._get_base_data() - test_data = train_data.get_test_split() - # We split the training data up into three parts: - # 1. A `trusted_fraction` part will be used as trusted data. - # 2. Out of the remaining part, a `clean_untrusted_fraction` part will be used - # as clean untrusted data. - # 3. The rest will be used as anomalous training data. - ( - self._trusted_data, - self._clean_untrusted_data, - _anomalous_base, - ) = split_dataset_cfg( - train_data, - self.trusted_fraction, - # Using clean_train_weight here means we'll end up using all our data, - # since this is also what's used later in the MixedDataConfig. - (1 - self.trusted_fraction) * self.clean_train_weight, - (1 - self.trusted_fraction) * (1 - self.clean_train_weight), + test_data = MixedData( + normal=clean_test_data, + anomalous=anomalous_test_data, + normal_weight=clean_test_weight, ) - - # Similarly, we plit up the test data, except there is no trusted subset. - self._clean_test_data, _anomalous_test_base = split_dataset_cfg( - test_data, - self.clean_test_weight, + return Task( + trusted_data=trusted_data, + untrusted_train_data=untrusted_train_data, + test_data=test_data, + model=model, ) - self._anomalous_data = self.fuzz(_anomalous_base) - self._anomalous_test_data = self.fuzz(_anomalous_test_base) - - @abstractmethod - def fuzz(self, data: DatasetConfig) -> DatasetConfig: - pass - - @abstractmethod - def _get_base_data(self) -> DatasetConfig: - pass - - def _get_trusted_data(self) -> DatasetConfig: - return self._trusted_data - - def _get_clean_untrusted_data(self) -> DatasetConfig: - return self._clean_untrusted_data - - def _get_anomalous_data(self) -> DatasetConfig: - return self._anomalous_data - - def _get_clean_test_data(self) -> DatasetConfig: - return self._clean_test_data - - def _get_anomalous_test_data(self) -> DatasetConfig: - return self._anomalous_test_data - - -@dataclass(kw_only=True) -class CustomTask(TaskConfig): - """A fully customizable task config, where all datasets are specified directly.""" - - trusted_data: DatasetConfig - clean_untrusted_data: DatasetConfig - anomalous_data: DatasetConfig - model: ModelConfig - - def _get_clean_untrusted_data(self) -> DatasetConfig: - return self.clean_untrusted_data - - def _get_trusted_data(self) -> DatasetConfig: - return self.trusted_data - - def _get_anomalous_data(self) -> DatasetConfig: - return self.anomalous_data - - def _get_model(self) -> ModelConfig: - return self.model - - -@dataclass(kw_only=True) -class DebugTaskConfig(TaskConfig): - """Debug configs for specific tasks can inherit from this for convenience. - - Note that children should inherit this first, to make sure MRO picks up on - the overriden defaults below! - """ + @classmethod + def from_base_data( + cls, + model: HookedModel, + train_data: Dataset, + test_data: Dataset, + anomaly_func: Callable[[Dataset, bool], Dataset], + clean_untrusted_func: Optional[Callable[[Dataset], Dataset]] = None, + trusted_fraction: float = 1.0, + clean_train_weight: float = 0.5, + clean_test_weight: float = 0.5, + ): + if trusted_fraction == 1.0: + trusted_data = train_data + clean_untrusted_data = anomalous_data = None + else: + untrusted_fraction = 1 - trusted_fraction + train_fractions = ( + trusted_fraction, + untrusted_fraction * clean_train_weight, + untrusted_fraction * (1 - clean_train_weight), + ) + trusted_data, clean_untrusted_data, anomalous_data = random_split( + train_data, train_fractions + ) - # Needs to be at least two because otherwise Mahalanobis distance scores are - # NaN. - max_train_size: int = 2 - # Needs to be at least two so it can contain both normal and anomalous data. - max_test_size: int = 2 + if clean_untrusted_func: + clean_untrusted_data = clean_untrusted_func(clean_untrusted_data) + # Second argument to anomaly_func is whether this is training data + anomalous_data = anomaly_func(anomalous_data, True) + + test_fractions = (clean_test_weight, 1 - clean_test_weight) + clean_test_data, anomalous_test_data = random_split(test_data, test_fractions) + + if clean_untrusted_func: + clean_test_data = clean_untrusted_func(clean_test_data) + anomalous_test_data = anomaly_func(anomalous_test_data, False) + + return Task.from_separate_data( + model=model, + trusted_data=trusted_data, + clean_untrusted_data=clean_untrusted_data, + anomalous_data=anomalous_data, + clean_test_data=clean_test_data, + anomalous_test_data=anomalous_test_data, + ) diff --git a/src/cupbearer/tasks/adversarial_examples.py b/src/cupbearer/tasks/adversarial_examples.py index 907967d2..c9bd23cb 100644 --- a/src/cupbearer/tasks/adversarial_examples.py +++ b/src/cupbearer/tasks/adversarial_examples.py @@ -1,49 +1,34 @@ -import math -from dataclasses import dataclass from pathlib import Path -from cupbearer.data import AdversarialExampleConfig, DatasetConfig, TrainDataFromRun -from cupbearer.models import ModelConfig, StoredModel - -from ._config import DebugTaskConfig, TaskConfig - - -@dataclass -class AdversarialExampleTask(TaskConfig): - path: Path - attack_batch_size: int = 128 - success_threshold: float = 0.1 - steps: int = 40 - eps: float = 8 / 255 - - def _get_clean_data(self, train: bool) -> DatasetConfig: - if train: - return TrainDataFromRun(path=self.path) - else: - return TrainDataFromRun(path=self.path).get_test_split() - - def _get_anomalous_data(self, train: bool) -> DatasetConfig: - max_size = None - if self.max_test_size: - # This isn't strictly necessary, but it lets us avoid generating more - # adversarial examples than needed. - max_size = math.ceil(self.max_test_size * (1 - self.clean_test_weight)) - return AdversarialExampleConfig( - path=self.path, - max_size=max_size, - attack_batch_size=self.attack_batch_size, - success_threshold=self.success_threshold, - steps=self.steps, - eps=self.eps, - use_test_data=not train, - ) - - def _get_model(self) -> ModelConfig: - return StoredModel(path=self.path) - - -@dataclass(kw_only=True) -class DebugAdversarialExampleTask(DebugTaskConfig, AdversarialExampleTask): - attack_batch_size: int = 1 - success_threshold: float = 1.0 - steps: int = 1 +from torch.utils.data import Dataset + +from cupbearer.data import make_adversarial_examples +from cupbearer.models import HookedModel + +from ._config import Task + + +def adversarial_examples( + model: HookedModel, + train_data: Dataset, + test_data: Dataset, + cache_path: Path, + trusted_fraction: float = 1.0, + clean_train_weight: float = 0.5, + clean_test_weight: float = 0.5, + **kwargs, +) -> Task: + return Task.from_base_data( + model=model, + train_data=train_data, + test_data=test_data, + anomaly_func=lambda dataset, train: make_adversarial_examples( + model, + dataset, + cache_path / f"advexes_{'train' if train else 'test'}", + **kwargs, + ), + trusted_fraction=trusted_fraction, + clean_train_weight=clean_train_weight, + clean_test_weight=clean_test_weight, + ) diff --git a/src/cupbearer/tasks/backdoor_detection.py b/src/cupbearer/tasks/backdoor_detection.py index cec9fdcc..d0e94b62 100644 --- a/src/cupbearer/tasks/backdoor_detection.py +++ b/src/cupbearer/tasks/backdoor_detection.py @@ -1,41 +1,38 @@ -from dataclasses import dataclass -from pathlib import Path - -from cupbearer.data import DatasetConfig -from cupbearer.data.backdoor_data import BackdoorData -from cupbearer.models import ModelConfig, StoredModel -from cupbearer.utils.scripts import load_config - -from ._config import DebugTaskConfig, FuzzedTask - - -@dataclass(kw_only=True) -class BackdoorDetection(FuzzedTask): - path: Path - no_load: bool = False - - def __post_init__(self): - backdoor_data = load_config(self.path, "train_data", BackdoorData) - self._original = backdoor_data.original - self._backdoor = backdoor_data.backdoor - self._backdoor.p_backdoor = 1.0 - - if not self.no_load: - self._backdoor.load(self.path) - - # Call this only now that _original and _backdoor are set. - super().__post_init__() - - def _get_base_data(self) -> DatasetConfig: - return self._original - - def fuzz(self, data: DatasetConfig) -> DatasetConfig: - return BackdoorData(original=data, backdoor=self._backdoor) - - def _get_model(self) -> ModelConfig: - return StoredModel(path=self.path) - - -@dataclass -class DebugBackdoorDetection(DebugTaskConfig, BackdoorDetection): - pass +from torch.utils.data import Dataset + +from cupbearer.data import Backdoor, BackdoorDataset +from cupbearer.models import HookedModel + +from ._config import Task + + +def backdoor_detection( + model: HookedModel, + train_data: Dataset, + test_data: Dataset, + backdoor: Backdoor, + trusted_fraction: float = 1.0, + clean_train_weight: float = 0.5, + clean_test_weight: float = 0.5, +): + assert backdoor.p_backdoor == 1.0, ( + "Your anomalous data is not pure backdoor data, " + "this is probably unintentional." + ) + + # TODO: for WaNet, we currently expect the user to load the control grid. + # (Otherwise we'd have to always take in a path here, and also when working + # in a notebook it might just be easier to pass in the existing backdoor object.) + # But we should somehow check somewhere that it's loaded to avoid silent errors. + + return Task.from_base_data( + model=model, + train_data=train_data, + test_data=test_data, + anomaly_func=lambda dataset, _: BackdoorDataset( + original=dataset, backdoor=backdoor + ), + trusted_fraction=trusted_fraction, + clean_train_weight=clean_train_weight, + clean_test_weight=clean_test_weight, + ) diff --git a/src/cupbearer/tasks/toy_features.py b/src/cupbearer/tasks/toy_features.py deleted file mode 100644 index 7a3f5c61..00000000 --- a/src/cupbearer/tasks/toy_features.py +++ /dev/null @@ -1,27 +0,0 @@ -from dataclasses import dataclass -from pathlib import Path - -from cupbearer.data.toy_ambiguous_features import ToyFeaturesConfig -from cupbearer.models import StoredModel - -from ._config import DebugTaskConfig, TaskConfig - - -@dataclass -class ToyFeaturesTask(TaskConfig): - path: Path - noise: float = 0.1 - - def _init_train_data(self): - self._train_data = ToyFeaturesConfig(correlated=True, noise=self.noise) - - def _get_anomalous_test_data(self): - return ToyFeaturesConfig(correlated=False, noise=self.noise) - - def _init_model(self): - self._model = StoredModel(path=self.path) - - -@dataclass -class DebugToyFeaturesTask(DebugTaskConfig, ToyFeaturesTask): - pass diff --git a/src/cupbearer/utils/scripts.py b/src/cupbearer/utils/scripts.py index e3fde2ef..488254be 100644 --- a/src/cupbearer/utils/scripts.py +++ b/src/cupbearer/utils/scripts.py @@ -1,11 +1,9 @@ import functools from dataclasses import dataclass from pathlib import Path -from typing import Any, Callable, Optional, Type, TypeVar +from typing import Any, Callable, Optional, TypeVar -import simple_parsing from cupbearer.utils.utils import BaseConfig -from loguru import logger @dataclass(kw_only=True) @@ -30,44 +28,19 @@ def run_script(cfg: ConfigType): def save_cfg(cfg: ScriptConfig, save_config: bool = True): - if cfg.path: - cfg.path.mkdir(parents=True, exist_ok=True) - if save_config: - # TODO: replace this with cfg.save if/when that exposes save_dc_types. - # Note that we need save_dc_types here even though `BaseConfig` already - # enables that, since `save` calls `to_dict` directly, not `obj.to_dict`. - simple_parsing.helpers.serialization.serializable.save( - cfg, - cfg.path / "config.yaml", - save_dc_types=True, - sort_keys=False, - ) + # if cfg.path: + # cfg.path.mkdir(parents=True, exist_ok=True) + # if save_config: + # # TODO: replace this with cfg.save if/when that exposes save_dc_types. + # # Note that we need save_dc_types here even though `BaseConfig` already + # # enables that, since `save` calls `to_dict` directly, not `obj.to_dict`. + # simple_parsing.helpers.serialization.serializable.save( + # cfg, + # cfg.path / "config.yaml", + # save_dc_types=True, + # sort_keys=False, + # ) + pass T = TypeVar("T") - - -def load_config( - path: str | Path, - name: Optional[str] = None, - expected_type: Type[T] = ScriptConfig, -) -> T: - logger.debug(f"Loading config '{name}' from {path}") - path = Path(path) - cfg = ScriptConfig.load(path / "config.yaml", drop_extra_fields=False) - - if name is None: - if not isinstance(cfg, expected_type): - raise ValueError(f"Expected config to be a {expected_type}, got {cfg}") - - return cfg - - if not hasattr(cfg, name): - raise ValueError(f"Expected {name} to be in config, got {cfg}") - - sub_cfg = getattr(cfg, name) - - if not isinstance(sub_cfg, expected_type): - raise ValueError(f"Expected {name} to be a {expected_type}, got {sub_cfg}") - - return sub_cfg From 51e6a25da56dcc493aa6426cc625e981cabbfb50 Mon Sep 17 00:00:00 2001 From: Erik Jenner Date: Thu, 29 Feb 2024 23:54:58 -0800 Subject: [PATCH 06/25] Remove unused DatasetConfigs --- src/cupbearer/data/__init__.py | 9 +- src/cupbearer/data/_shared.py | 177 +-------------------------------- 2 files changed, 2 insertions(+), 184 deletions(-) diff --git a/src/cupbearer/data/__init__.py b/src/cupbearer/data/__init__.py index 706d0ae8..1747c96c 100644 --- a/src/cupbearer/data/__init__.py +++ b/src/cupbearer/data/__init__.py @@ -1,12 +1,5 @@ # ruff: noqa: F401 -from ._shared import ( - DatasetConfig, - MixedData, - MixedDataConfig, - SubsetConfig, - TransformDataset, - split_dataset_cfg, -) +from ._shared import MixedData, TransformDataset from .adversarial import AdversarialExampleDataset, make_adversarial_examples from .backdoors import ( Backdoor, diff --git a/src/cupbearer/data/_shared.py b/src/cupbearer/data/_shared.py index fedbdce4..ac516e53 100644 --- a/src/cupbearer/data/_shared.py +++ b/src/cupbearer/data/_shared.py @@ -1,140 +1,8 @@ -from abc import ABC, abstractproperty -from dataclasses import dataclass, field from typing import Optional -from torch.utils.data import Dataset, Subset -from torchvision.transforms import Compose +from torch.utils.data import Dataset from cupbearer.data.transforms import Transform -from cupbearer.utils.utils import BaseConfig - - -@dataclass(kw_only=True) -class DatasetConfig(BaseConfig, ABC): - # Only the values of the transforms dict are used, but simple_parsing doesn't - # support lists of dataclasses, which is why we use a dict. One advantage - # of this is also that it's easier to override specific transforms. - # TODO: We should probably make this a list now that we're abandoning CLI. - transforms: dict[str, Transform] = field(default_factory=dict) - max_size: Optional[int] = None - - @abstractproperty - def num_classes(self) -> int: # type: ignore - pass - - def get_test_split(self) -> "DatasetConfig": - # Not every dataset will define this - raise NotImplementedError - - def get_transforms(self) -> list[Transform]: - """Return a list of transforms that should be applied to this dataset. - - Most subclasses won't need to override this, since it just returns - the transforms field by default. But in some cases, we need to apply custom - processing to this that can't be handled in __post_init__ (see BackdoorData - for an example). - """ - return list(self.transforms.values()) - - def build(self) -> Dataset: - """Create an instance of the Dataset described by this config.""" - dataset = self._build() - transform = Compose(self.get_transforms()) - dataset = TransformDataset(dataset, transform) - if self.max_size: - assert self.max_size <= len(dataset) - dataset = Subset(dataset, range(self.max_size)) - return dataset - - def _build(self) -> Dataset: - # Not an abstractmethod because e.g. TestDataConfig overrides build() instead. - raise NotImplementedError - - -@dataclass -class SubsetConfig(DatasetConfig): - full_dataset: DatasetConfig - start_fraction: float = 0.0 - end_fraction: float = 1.0 - - def __post_init__(self): - super().__post_init__() - if self.max_size: - raise ValueError( - "max_size should be set on the full dataset, not the subset." - ) - if self.start_fraction > self.end_fraction: - raise ValueError( - f"{self.start_fraction=} must be less than or equal " - f"to {self.end_fraction=}." - ) - if self.start_fraction < 0 or self.end_fraction > 1: - raise ValueError( - "Fractions must be between 0 and 1, " - f"got {self.start_fraction} and {self.end_fraction}." - ) - if self.transforms: - raise ValueError( - "Transforms should be applied to the full dataset, not the subset." - ) - - def _build(self) -> Dataset: - full = self.full_dataset.build() - start = int(self.start_fraction * len(full)) - end = int(self.end_fraction * len(full)) - return Subset(full, range(start, end)) - - @property - def num_classes(self) -> int: # type: ignore - return self.full_dataset.num_classes - - def get_test_split(self) -> "DatasetConfig": - return SubsetConfig( - full_dataset=self.full_dataset.get_test_split(), - start_fraction=self.start_fraction, - end_fraction=self.end_fraction, - ) - - # Mustn't inherit get_transforms() from full_dataset, they're already applied - # to the full dataset on build. - - -# def split_dataset(dataset: Dataset, *fractions: float) -> list[Subset]: -# if not fractions: -# raise ValueError("At least one fraction must be provided.") -# if not all(0 <= f <= 1 for f in fractions): -# raise ValueError("Fractions must be between 0 and 1.") -# if not sum(fractions) == 1: -# fractions = fractions + (1 - sum(fractions),) - -# total = len(dataset) - -# markers = [int(total * fraction) for fraction in fractions] - -# subsets = [] -# current_start = 0 -# for marker in markers: -# subsets.append(Subset(dataset, range(current_start, current_start + marker))) -# current_start += marker -# assert current_start == total -# return subsets - - -def split_dataset_cfg(cfg: DatasetConfig, *fractions: float) -> list[SubsetConfig]: - if not fractions: - raise ValueError("At least one fraction must be provided.") - if not all(0 <= f <= 1 for f in fractions): - raise ValueError("Fractions must be between 0 and 1.") - if not sum(fractions) == 1: - fractions = fractions + (1 - sum(fractions),) - - subsets = [] - current_start = 0.0 - for fraction in fractions: - subsets.append(SubsetConfig(cfg, current_start, current_start + fraction)) - current_start += fraction - assert current_start == 1.0 - return subsets class TransformDataset(Dataset): @@ -188,46 +56,3 @@ def __getitem__(self, index): if self.return_anomaly_labels: return self.anomalous_data[index - self.normal_len], 1 return self.anomalous_data[index - self.normal_len] - - -@dataclass -class MixedDataConfig(DatasetConfig): - normal: DatasetConfig - anomalous: DatasetConfig - normal_weight: float = 0.5 - return_anomaly_labels: bool = True - - def get_test_split(self) -> "MixedDataConfig": - return MixedDataConfig( - normal=self.normal.get_test_split(), - anomalous=self.anomalous.get_test_split(), - normal_weight=self.normal_weight, - return_anomaly_labels=self.return_anomaly_labels, - ) - - @property - def num_classes(self): - assert (n := self.normal.num_classes) == self.anomalous.num_classes - return n - - def build(self) -> MixedData: - # We need to override this method because max_size needs to be applied in a - # different way: TestDataMix just has normal data first and then anomalous data, - # if we just used a Subset with indices 1...n, we'd get an incorrect ratio. - normal = self.normal.build() - anomalous = self.anomalous.build() - if self.max_size: - normal_size = int(self.max_size * self.normal_weight) - normal_size = min(len(normal), normal_size) - normal = Subset(normal, range(normal_size)) - anomalous_size = self.max_size - normal_size - anomalous_size = min(len(anomalous), anomalous_size) - anomalous = Subset(anomalous, range(anomalous_size)) - dataset = MixedData( - normal, anomalous, self.normal_weight, self.return_anomaly_labels - ) - # We don't want to return a TransformDataset here. Transforms should be applied - # directly to the normal and anomalous data. - if self.transforms: - raise ValueError("Transforms are not supported for TestDataConfig.") - return dataset From 48f8292f788df5acb546b40ed207d811fd87813b Mon Sep 17 00:00:00 2001 From: Erik Jenner Date: Thu, 29 Feb 2024 23:57:12 -0800 Subject: [PATCH 07/25] Rename task file More appropriate now to not call it `_config.py` --- src/cupbearer/tasks/__init__.py | 2 +- src/cupbearer/tasks/adversarial_examples.py | 2 +- src/cupbearer/tasks/backdoor_detection.py | 2 +- src/cupbearer/tasks/{_config.py => task.py} | 0 4 files changed, 3 insertions(+), 3 deletions(-) rename src/cupbearer/tasks/{_config.py => task.py} (100%) diff --git a/src/cupbearer/tasks/__init__.py b/src/cupbearer/tasks/__init__.py index 635f0049..9fe5b58f 100644 --- a/src/cupbearer/tasks/__init__.py +++ b/src/cupbearer/tasks/__init__.py @@ -1,4 +1,4 @@ # ruff: noqa: F401 -from ._config import Task from .adversarial_examples import adversarial_examples from .backdoor_detection import backdoor_detection +from .task import Task diff --git a/src/cupbearer/tasks/adversarial_examples.py b/src/cupbearer/tasks/adversarial_examples.py index c9bd23cb..ee593558 100644 --- a/src/cupbearer/tasks/adversarial_examples.py +++ b/src/cupbearer/tasks/adversarial_examples.py @@ -5,7 +5,7 @@ from cupbearer.data import make_adversarial_examples from cupbearer.models import HookedModel -from ._config import Task +from .task import Task def adversarial_examples( diff --git a/src/cupbearer/tasks/backdoor_detection.py b/src/cupbearer/tasks/backdoor_detection.py index d0e94b62..51942285 100644 --- a/src/cupbearer/tasks/backdoor_detection.py +++ b/src/cupbearer/tasks/backdoor_detection.py @@ -3,7 +3,7 @@ from cupbearer.data import Backdoor, BackdoorDataset from cupbearer.models import HookedModel -from ._config import Task +from .task import Task def backdoor_detection( diff --git a/src/cupbearer/tasks/_config.py b/src/cupbearer/tasks/task.py similarity index 100% rename from src/cupbearer/tasks/_config.py rename to src/cupbearer/tasks/task.py From 79b51ecf9fe57067d5e6552826102e8087f13381 Mon Sep 17 00:00:00 2001 From: Erik Jenner Date: Fri, 1 Mar 2024 20:50:58 -0800 Subject: [PATCH 08/25] WIP on removing ScriptConfig and TrainConfig --- src/cupbearer/scripts/__init__.py | 5 +- src/cupbearer/scripts/_shared.py | 7 +- .../scripts/conf/eval_classifier_conf.py | 30 ----- .../scripts/conf/eval_detector_conf.py | 12 -- .../scripts/conf/train_classifier_conf.py | 47 -------- .../scripts/conf/train_detector_conf.py | 15 --- src/cupbearer/scripts/eval_classifier.py | 39 +++--- src/cupbearer/scripts/eval_detector.py | 24 ++-- src/cupbearer/scripts/train_classifier.py | 111 ++++++++++++++---- src/cupbearer/scripts/train_detector.py | 37 ++++-- src/cupbearer/utils/scripts.py | 37 ++---- 11 files changed, 163 insertions(+), 201 deletions(-) delete mode 100644 src/cupbearer/scripts/conf/eval_classifier_conf.py delete mode 100644 src/cupbearer/scripts/conf/eval_detector_conf.py delete mode 100644 src/cupbearer/scripts/conf/train_classifier_conf.py delete mode 100644 src/cupbearer/scripts/conf/train_detector_conf.py diff --git a/src/cupbearer/scripts/__init__.py b/src/cupbearer/scripts/__init__.py index 51003cff..1666400e 100644 --- a/src/cupbearer/scripts/__init__.py +++ b/src/cupbearer/scripts/__init__.py @@ -1,8 +1,5 @@ # ruff: noqa: F401 -from .conf.eval_classifier_conf import Config as EvalClassifierConfig -from .conf.eval_detector_conf import Config as EvalDetectorConfig -from .conf.train_classifier_conf import Config as TrainClassifierConfig -from .conf.train_detector_conf import Config as TrainDetectorConfig +from ._shared import Classifier from .eval_classifier import main as eval_classifier from .eval_detector import main as eval_detector from .train_classifier import main as train_classifier diff --git a/src/cupbearer/scripts/_shared.py b/src/cupbearer/scripts/_shared.py index 62729606..539d88d1 100644 --- a/src/cupbearer/scripts/_shared.py +++ b/src/cupbearer/scripts/_shared.py @@ -3,7 +3,6 @@ from torchmetrics.classification import Accuracy from cupbearer.models import HookedModel -from cupbearer.utils.optimizers import OptimizerConfig class Classifier(L.LightningModule): @@ -11,7 +10,7 @@ def __init__( self, model: HookedModel, num_classes: int, - optim_cfg: OptimizerConfig, + lr: float, val_loader_names: list[str] | None = None, test_loader_names: list[str] | None = None, save_hparams: bool = True, @@ -25,7 +24,7 @@ def __init__( test_loader_names = [] self.model = model - self.optim_cfg = optim_cfg + self.lr = lr self.val_loader_names = val_loader_names self.test_loader_names = test_loader_names self.train_accuracy = Accuracy(task="multiclass", num_classes=num_classes) @@ -81,4 +80,4 @@ def on_validation_epoch_end(self): self.log(f"{name}/acc_epoch", self.val_accuracy[i]) def configure_optimizers(self): - return self.optim_cfg.get_optimizer(self.parameters()) + return torch.optim.Adam(self.parameters(), lr=self.lr) diff --git a/src/cupbearer/scripts/conf/eval_classifier_conf.py b/src/cupbearer/scripts/conf/eval_classifier_conf.py deleted file mode 100644 index 10a365b1..00000000 --- a/src/cupbearer/scripts/conf/eval_classifier_conf.py +++ /dev/null @@ -1,30 +0,0 @@ -from dataclasses import dataclass -from typing import Optional - -from cupbearer.models import HookedModel -from cupbearer.utils.scripts import ScriptConfig -from torch.utils.data import Dataset - - -@dataclass(kw_only=True) -class Config(ScriptConfig): - data: Dataset - model: HookedModel - max_batches: Optional[int] = None - max_batch_size: int = 2048 - save_config: bool = False - pbar: bool = True - wandb: bool = False - log_every_n_steps: Optional[int] = None - - def __post_init__(self): - if self.path is None: - raise ValueError("Path must be set") - - -@dataclass -class DebugConfig(Config): - max_batches: int = 1 - max_batch_size: int = 2 - wandb: bool = False - log_every_n_steps: int = 1 diff --git a/src/cupbearer/scripts/conf/eval_detector_conf.py b/src/cupbearer/scripts/conf/eval_detector_conf.py deleted file mode 100644 index 33d6bbf8..00000000 --- a/src/cupbearer/scripts/conf/eval_detector_conf.py +++ /dev/null @@ -1,12 +0,0 @@ -from dataclasses import dataclass - -from cupbearer.detectors import AnomalyDetector -from cupbearer.tasks import Task -from cupbearer.utils.scripts import ScriptConfig - - -@dataclass(kw_only=True) -class Config(ScriptConfig): - task: Task - detector: AnomalyDetector - pbar: bool = False diff --git a/src/cupbearer/scripts/conf/train_classifier_conf.py b/src/cupbearer/scripts/conf/train_classifier_conf.py deleted file mode 100644 index 5fcd3473..00000000 --- a/src/cupbearer/scripts/conf/train_classifier_conf.py +++ /dev/null @@ -1,47 +0,0 @@ -from dataclasses import dataclass, field - -from cupbearer.data import BackdoorDataset, WanetBackdoor -from cupbearer.models import HookedModel -from cupbearer.utils.scripts import ScriptConfig -from cupbearer.utils.train import DebugTrainConfig, TrainConfig -from torch.utils.data import Dataset - - -@dataclass(kw_only=True) -class Config(ScriptConfig): - model: HookedModel - train_config: TrainConfig = field(default_factory=TrainConfig) - train_data: Dataset - num_classes: int - val_data: dict[str, Dataset] = field(default_factory=dict) - # If True, returns the Lighting Trainer object (which has the model and a bunch - # of other information, this may be useful when using interactively). - # Otherwise (default), return only a dictionary of latest metrics, to avoid e.g. - # submitit trying to pickle the entire Trainer object. - return_trainer: bool = False - - def __post_init__(self): - super().__post_init__() - - # For datasets that are not necessarily deterministic based only on - # arguments, this is where validation sets are set to follow train_data - if isinstance(self.train_data, BackdoorDataset): - for name, val_config in self.val_data.items(): - # WanetBackdoor - if ( - isinstance(self.train_data.backdoor, WanetBackdoor) - and isinstance(val_config, BackdoorDataset) - and isinstance(val_config.backdoor, WanetBackdoor) - ): - str_factor = ( - val_config.backdoor.warping_strength - / self.train_data.backdoor.warping_strength - ) - val_config.backdoor.control_grid = ( - str_factor * self.train_data.backdoor.control_grid - ) - - -@dataclass -class DebugConfig(Config): - train_config: DebugTrainConfig = field(default_factory=DebugTrainConfig) diff --git a/src/cupbearer/scripts/conf/train_detector_conf.py b/src/cupbearer/scripts/conf/train_detector_conf.py deleted file mode 100644 index 84a362b0..00000000 --- a/src/cupbearer/scripts/conf/train_detector_conf.py +++ /dev/null @@ -1,15 +0,0 @@ -from dataclasses import dataclass - -from cupbearer.detectors import AnomalyDetector -from cupbearer.tasks import Task -from cupbearer.utils.scripts import ScriptConfig -from cupbearer.utils.train import TrainConfig -from cupbearer.utils.utils import BaseConfig, mutable_field - - -@dataclass(kw_only=True) -class Config(ScriptConfig): - task: Task - detector: AnomalyDetector - num_classes: int - train: BaseConfig = mutable_field(TrainConfig()) diff --git a/src/cupbearer/scripts/eval_classifier.py b/src/cupbearer/scripts/eval_classifier.py index 021d866d..d30d1316 100644 --- a/src/cupbearer/scripts/eval_classifier.py +++ b/src/cupbearer/scripts/eval_classifier.py @@ -1,41 +1,48 @@ import json +from pathlib import Path +from typing import Optional import lightning as L from loguru import logger -from torch.utils.data import DataLoader +from torch.utils.data import DataLoader, Dataset from cupbearer.data import BackdoorDataset +from cupbearer.models import HookedModel from cupbearer.scripts._shared import Classifier from cupbearer.utils.scripts import script -from .conf.eval_classifier_conf import Config - @script -def main(cfg: Config): - assert cfg.path is not None # make type checker happy - - if isinstance(cfg.data, BackdoorDataset): - logger.debug(f"Loading transform: {cfg.data.backdoor}") - cfg.data.backdoor.load(cfg.path) +def main( + data: Dataset, + model: HookedModel, + path: Path | str, + max_batches: Optional[int] = None, + max_batch_size: int = 2048, +): + path = Path(path) + + if isinstance(data, BackdoorDataset): + logger.debug(f"Loading transform: {data.backdoor}") + data.backdoor.load(path) dataloader = DataLoader( - cfg.data, - batch_size=cfg.max_batch_size, + data, + batch_size=max_batch_size, shuffle=False, ) classifier = Classifier.load_from_checkpoint( - cfg.path / "checkpoints" / "last.ckpt", - model=cfg.model, + path / "checkpoints" / "last.ckpt", + model=model, test_loader_names=["test"], ) trainer = L.Trainer( logger=False, - default_root_dir=cfg.path, - limit_test_batches=cfg.max_batches, + default_root_dir=path, + limit_test_batches=max_batches, ) metrics = trainer.test(classifier, [dataloader]) - with open(cfg.path / "eval.json", "w") as f: + with open(path / "eval.json", "w") as f: json.dump(metrics, f) diff --git a/src/cupbearer/scripts/eval_detector.py b/src/cupbearer/scripts/eval_detector.py index 31217a7e..b20774ad 100644 --- a/src/cupbearer/scripts/eval_detector.py +++ b/src/cupbearer/scripts/eval_detector.py @@ -1,18 +1,18 @@ -from cupbearer.scripts.conf.eval_detector_conf import Config +from cupbearer.detectors import AnomalyDetector +from cupbearer.tasks import Task from cupbearer.utils.scripts import script @script -def main(cfg: Config): - assert cfg.detector is not None # make type checker happy - # Init - train_data = cfg.task.trusted_data - test_data = cfg.task.test_data - cfg.detector.set_model(cfg.task.model) +def main( + task: Task, + detector: AnomalyDetector, + pbar: bool = False, +): + detector.set_model(task.model) - # Evaluate detector - cfg.detector.eval( - train_dataset=train_data, - test_dataset=test_data, - pbar=cfg.pbar, + detector.eval( + train_dataset=task.trusted_data, + test_dataset=task.test_data, + pbar=pbar, ) diff --git a/src/cupbearer/scripts/train_classifier.py b/src/cupbearer/scripts/train_classifier.py index dce13a9e..05b8f3f2 100644 --- a/src/cupbearer/scripts/train_classifier.py +++ b/src/cupbearer/scripts/train_classifier.py @@ -1,49 +1,118 @@ import warnings +from pathlib import Path from typing import Any import lightning as L +from lightning.pytorch import loggers from lightning.pytorch.callbacks import ModelCheckpoint +from torch.utils.data import DataLoader -from cupbearer.data import BackdoorDataset +from cupbearer.models import HookedModel from cupbearer.scripts._shared import Classifier from cupbearer.utils.scripts import script -from .conf.train_classifier_conf import Config - @script -def main(cfg: Config) -> dict[str, Any] | L.Trainer: - train_loader = cfg.train_config.get_dataloader(cfg.train_data) +def main( + model: HookedModel, + train_loader: DataLoader, + num_classes: int, + path: Path | str, + lr: float = 1e-3, + val_loaders: DataLoader | dict[str, DataLoader] | None = None, + # If True, returns the Lighting Trainer object (which has the model and a bunch + # of other information, this may be useful when using interactively). + # Otherwise (default), return only a dictionary of latest metrics, to avoid e.g. + # submitit trying to pickle the entire Trainer object. + return_trainer: bool = False, + wandb: bool = False, + **trainer_kwargs, +) -> dict[str, Any] | L.Trainer: + path = Path(path) + + if trainer_kwargs is None: + trainer_kwargs = {} + if val_loaders is None: + val_loaders = {} + elif isinstance(val_loaders, DataLoader): + val_loaders = {"val": val_loaders} - val_loaders = { - k: cfg.train_config.get_dataloader(v, train=False) - for k, v in cfg.val_data.items() - } + # arguments, this is where validation sets are set to follow train_data + # TODO: we could get weird bugs here if e.g. train_data is a Subset of some + # BackdoorDataset. + # if isinstance(train_data, BackdoorDataset): + # for name, val_config in val_data.items(): + # # WanetBackdoor + # if ( + # isinstance(train_data.backdoor, WanetBackdoor) + # and isinstance(val_config, BackdoorDataset) + # and isinstance(val_config.backdoor, WanetBackdoor) + # ): + # str_factor = ( + # val_config.backdoor.warping_strength + # / train_data.backdoor.warping_strength + # ) + # val_config.backdoor.control_grid = ( + # str_factor * train_data.backdoor.control_grid + # ) - # The WaNet backdoor (and maybe others in the future) has randomly generated state - # that needs to be stored if we want to load it later. - if isinstance(cfg.train_data, BackdoorDataset): - cfg.train_data.backdoor.store(cfg.path) + # # The WaNet backdoor (and maybe others in the future) has randomly generated state + # # that needs to be stored if we want to load it later. + # if isinstance(train_data, BackdoorDataset): + # train_data.backdoor.store(path) classifier = Classifier( - model=cfg.model, - num_classes=cfg.num_classes, - optim_cfg=cfg.train_config.optimizer, + model=model, + num_classes=num_classes, + lr=lr, val_loader_names=list(val_loaders.keys()), ) + callbacks = trainer_kwargs.pop("callbacks", []) + # TODO: once we do longer training runs we'll want to have multiple # checkpoints, potentially based on validation loss - callbacks = cfg.train_config.callbacks - if cfg.path: + if ( + path + # If the user already provided a custom checkpoint config, we'll use that: + and not any(isinstance(c, ModelCheckpoint) for c in callbacks) + # If the user explicitly disabled checkpointing, we don't want to override that: + and trainer_kwargs.get("enable_checkpointing", True) + ): callbacks.append( ModelCheckpoint( - dirpath=cfg.path / "checkpoints", + dirpath=path / "checkpoints", save_last=True, ) ) - trainer = cfg.train_config.get_trainer(callbacks=callbacks, path=cfg.path) + # Define metrics logger + # TODO: make adjustable and set config correctly + if wandb: + metrics_logger = loggers.WandbLogger(project="cupbearer") + metrics_logger.experiment.config.update(trainer_kwargs) + metrics_logger.experiment.config.update( + { + "model": repr(model), + "train_data": repr(train_loader.dataset), + "batch_size": train_loader.batch_size, + } + ) + if path: + metrics_logger = loggers.TensorBoardLogger( + save_dir=path, + name="", + version="", + sub_dir="tensorboard", + ) + else: + metrics_logger = None + + trainer = L.Trainer( + default_root_dir=path, + **trainer_kwargs, + ) + with warnings.catch_warnings(): if not val_loaders: warnings.filterwarnings( @@ -59,7 +128,7 @@ def main(cfg: Config) -> dict[str, Any] | L.Trainer: val_dataloaders=list(val_loaders.values()) or None, ) - if cfg.return_trainer: + if return_trainer: return trainer else: return trainer.logged_metrics diff --git a/src/cupbearer/scripts/train_detector.py b/src/cupbearer/scripts/train_detector.py index fbc4151c..d7dfb00d 100644 --- a/src/cupbearer/scripts/train_detector.py +++ b/src/cupbearer/scripts/train_detector.py @@ -1,25 +1,36 @@ +from cupbearer.detectors import AnomalyDetector +from cupbearer.tasks import Task from cupbearer.utils.scripts import script +from cupbearer.utils.train import TrainConfig +from cupbearer.utils.utils import BaseConfig from . import EvalDetectorConfig, eval_detector -from .conf.train_detector_conf import Config @script -def main(cfg: Config): - cfg.detector.set_model(cfg.task.model) +def main( + task: Task, + detector: AnomalyDetector, + num_classes: int, + train: BaseConfig | None = None, + seed: int = 0, +): + if train is None: + train = TrainConfig() + detector.set_model(task.model) - cfg.detector.train( - trusted_data=cfg.task.trusted_data, - untrusted_data=cfg.task.untrusted_train_data, - num_classes=cfg.num_classes, - train_config=cfg.train, + detector.train( + trusted_data=task.trusted_data, + untrusted_data=task.untrusted_train_data, + num_classes=num_classes, + train_config=train, ) - path = cfg.detector.save_path + path = detector.save_path if path: - cfg.detector.save_weights(path / "detector") + detector.save_weights(path / "detector") eval_cfg = EvalDetectorConfig( - detector=cfg.detector, - task=cfg.task, - seed=cfg.seed, + detector=detector, + task=task, + seed=seed, ) eval_detector(eval_cfg) diff --git a/src/cupbearer/utils/scripts.py b/src/cupbearer/utils/scripts.py index 488254be..246d46e8 100644 --- a/src/cupbearer/utils/scripts.py +++ b/src/cupbearer/utils/scripts.py @@ -1,33 +1,19 @@ -import functools -from dataclasses import dataclass -from pathlib import Path -from typing import Any, Callable, Optional, TypeVar - -from cupbearer.utils.utils import BaseConfig - - -@dataclass(kw_only=True) -class ScriptConfig(BaseConfig): - seed: int = 0 - path: Optional[Path] = None - save_config: bool = True - - -ConfigType = TypeVar("ConfigType", bound=ScriptConfig) +from typing import Callable def script( - script_fn: Callable[[ConfigType], Any], -) -> Callable[[ConfigType], Any]: - @functools.wraps(script_fn) - def run_script(cfg: ConfigType): - save_cfg(cfg, save_config=cfg.save_config) - return script_fn(cfg) + script_fn: Callable, +) -> Callable: + # @functools.wraps(script_fn) + # def run_script(cfg: ConfigType): + # save_cfg(cfg, save_config=cfg.save_config) + # return script_fn(cfg) - return run_script + # return run_script + return script_fn -def save_cfg(cfg: ScriptConfig, save_config: bool = True): +def save_cfg(cfg, save_config: bool = True): # if cfg.path: # cfg.path.mkdir(parents=True, exist_ok=True) # if save_config: @@ -41,6 +27,3 @@ def save_cfg(cfg: ScriptConfig, save_config: bool = True): # sort_keys=False, # ) pass - - -T = TypeVar("T") From bdd56fbcc3ec315eb045818aa45ac00b8f75eeb1 Mon Sep 17 00:00:00 2001 From: Erik Jenner Date: Sat, 2 Mar 2024 12:50:42 -0800 Subject: [PATCH 09/25] Remove backdoor loading/storing logic I think we should let the user handle this and just have big warning flags around WaNet---making sure we always do this correctly automatically seems nearly impossible so better to be explicit about that --- src/cupbearer/data/backdoors.py | 53 ++++++++++------------- src/cupbearer/data/transforms.py | 8 ---- src/cupbearer/scripts/eval_classifier.py | 6 --- src/cupbearer/scripts/train_classifier.py | 24 ---------- 4 files changed, 23 insertions(+), 68 deletions(-) diff --git a/src/cupbearer/data/backdoors.py b/src/cupbearer/data/backdoors.py index 04bb161a..6751235b 100644 --- a/src/cupbearer/data/backdoors.py +++ b/src/cupbearer/data/backdoors.py @@ -1,7 +1,8 @@ import os from abc import ABC from dataclasses import dataclass -from typing import Optional, Tuple +from pathlib import Path +from typing import Tuple import torch import torch.nn.functional as F @@ -91,28 +92,23 @@ def inject_backdoor(self, img: torch.Tensor): return img -@dataclass +@dataclass(kw_only=True) class WanetBackdoor(Backdoor): """Implements trigger transform from "Wanet - Imperceptible Warping-based Backdoor Attack" by Anh Tuan Nguyen and Anh Tuan Tran, ICLR, 2021.""" + # Path to load control grid from, or None to generate a new one. + # Deliberartely non-optional to avoid accidentally generating a new grid! + path: Path | str | None p_noise: float = 0.0 # Probability of non-backdoor warping control_grid_width: int = 4 # Side length of unscaled warping field warping_strength: float = 0.5 # Strength of warping effect grid_rescale: float = 1.0 # Factor to rescale grid from warping effect - _control_grid: Optional[ - tuple[ - list[list[float]], - list[list[float]], - ] - ] = None # Used for reproducibility, typically not set manually def __post_init__(self): super().__post_init__() self._warping_field = None - - # Init control_grid so that it is saved in config - self.control_grid + self._control_grid = None assert 0 <= self.p_noise <= 1, "Probability must be between 0 and 1" assert ( @@ -121,7 +117,10 @@ def __post_init__(self): @property def control_grid(self) -> torch.Tensor: - if self._control_grid is None: + if self._control_grid is not None: + return self._control_grid + + if self.path: logger.debug("Generating new control grid for warping field.") control_grid_shape = (2, self.control_grid_width, self.control_grid_width) control_grid = 2 * torch.rand(*control_grid_shape) - 1 @@ -129,7 +128,14 @@ def control_grid(self) -> torch.Tensor: control_grid = control_grid * self.warping_strength self.control_grid = control_grid else: - control_grid = torch.tensor(self._control_grid) + logger.debug( + f"Loading control grid from {self._get_savefile_fullpath(self.path)}" + ) + control_grid = torch.load(self._get_savefile_fullpath(self.path)) + if control_grid.shape[-1] != self.control_grid_width: + logger.warning("Control grid width updated from load.") + self.control_grid_width = control_grid.shape[-1] + self.control_grid = control_grid control_grid_shape = (2, self.control_grid_width, self.control_grid_width) assert control_grid.shape == control_grid_shape @@ -143,8 +149,7 @@ def control_grid(self, control_grid: torch.Tensor): if control_grid.shape != control_grid_shape: raise ValueError("Control grid shape is incompatible.") - # We keep self._control_grid serializable - self._control_grid = tuple(control_grid.tolist()) + self._control_grid = control_grid @property def warping_field(self) -> torch.Tensor: @@ -177,21 +182,9 @@ def init_warping_field(self, px: int, py: int): def _get_savefile_fullpath(basepath): return os.path.join(basepath, "wanet_backdoor.pt") - def store(self, basepath): - super().store(basepath) - logger.debug(f"Storing control grid to {self._get_savefile_fullpath(basepath)}") - torch.save(self.control_grid, self._get_savefile_fullpath(basepath)) - - def load(self, basepath): - super().load(basepath) - logger.debug( - f"Loading control grid from {self._get_savefile_fullpath(basepath)}" - ) - control_grid = torch.load(self._get_savefile_fullpath(basepath)) - if control_grid.shape[-1] != self.control_grid_width: - logger.warning("Control grid width updated from load.") - self.control_grid_width = control_grid.shape[-1] - self.control_grid = control_grid + def store(self, path: Path | str): + logger.debug(f"Storing control grid to {self._get_savefile_fullpath(path)}") + torch.save(self.control_grid, self._get_savefile_fullpath(path)) def _warp(self, img: torch.Tensor, warping_field: torch.Tensor) -> torch.Tensor: if img.ndim == 3: diff --git a/src/cupbearer/data/transforms.py b/src/cupbearer/data/transforms.py index 6b15cde3..ebaae3cb 100644 --- a/src/cupbearer/data/transforms.py +++ b/src/cupbearer/data/transforms.py @@ -11,14 +11,6 @@ class Transform(ABC): def __call__(self, sample): pass - def store(self, basepath): - """Save transform state to reproduce instance later.""" - pass - - def load(self, basepath): - """Load transform state to reproduce stored instance.""" - pass - class AdaptedTransform(Transform, ABC): """Adapt a transform designed to work on inputs to work on img, label pairs.""" diff --git a/src/cupbearer/scripts/eval_classifier.py b/src/cupbearer/scripts/eval_classifier.py index d30d1316..ac0a42c3 100644 --- a/src/cupbearer/scripts/eval_classifier.py +++ b/src/cupbearer/scripts/eval_classifier.py @@ -3,10 +3,8 @@ from typing import Optional import lightning as L -from loguru import logger from torch.utils.data import DataLoader, Dataset -from cupbearer.data import BackdoorDataset from cupbearer.models import HookedModel from cupbearer.scripts._shared import Classifier from cupbearer.utils.scripts import script @@ -22,10 +20,6 @@ def main( ): path = Path(path) - if isinstance(data, BackdoorDataset): - logger.debug(f"Loading transform: {data.backdoor}") - data.backdoor.load(path) - dataloader = DataLoader( data, batch_size=max_batch_size, diff --git a/src/cupbearer/scripts/train_classifier.py b/src/cupbearer/scripts/train_classifier.py index 05b8f3f2..c55a4fc4 100644 --- a/src/cupbearer/scripts/train_classifier.py +++ b/src/cupbearer/scripts/train_classifier.py @@ -37,30 +37,6 @@ def main( elif isinstance(val_loaders, DataLoader): val_loaders = {"val": val_loaders} - # arguments, this is where validation sets are set to follow train_data - # TODO: we could get weird bugs here if e.g. train_data is a Subset of some - # BackdoorDataset. - # if isinstance(train_data, BackdoorDataset): - # for name, val_config in val_data.items(): - # # WanetBackdoor - # if ( - # isinstance(train_data.backdoor, WanetBackdoor) - # and isinstance(val_config, BackdoorDataset) - # and isinstance(val_config.backdoor, WanetBackdoor) - # ): - # str_factor = ( - # val_config.backdoor.warping_strength - # / train_data.backdoor.warping_strength - # ) - # val_config.backdoor.control_grid = ( - # str_factor * train_data.backdoor.control_grid - # ) - - # # The WaNet backdoor (and maybe others in the future) has randomly generated state - # # that needs to be stored if we want to load it later. - # if isinstance(train_data, BackdoorDataset): - # train_data.backdoor.store(path) - classifier = Classifier( model=model, num_classes=num_classes, From 62e618a8294bbfd3f31953e723a5d3c497f1d97e Mon Sep 17 00:00:00 2001 From: Erik Jenner Date: Sat, 2 Mar 2024 14:55:12 -0800 Subject: [PATCH 10/25] Remove TrainConfig Abstractions and tests are still very broken --- src/cupbearer/detectors/__init__.py | 2 - .../detectors/abstraction/__init__.py | 8 +- .../abstraction/abstraction_detector.py | 20 ++-- src/cupbearer/detectors/anomaly_detector.py | 7 +- src/cupbearer/detectors/finetuning.py | 13 ++- .../detectors/statistical/__init__.py | 6 -- .../statistical/mahalanobis_detector.py | 13 ++- .../detectors/statistical/que_detector.py | 9 +- .../statistical/spectral_detector.py | 3 +- .../detectors/statistical/statistical.py | 93 +++------------- src/cupbearer/scripts/train_classifier.py | 5 +- src/cupbearer/scripts/train_detector.py | 20 +--- src/cupbearer/utils/__init__.py | 2 - src/cupbearer/utils/optimizers.py | 19 ---- src/cupbearer/utils/train.py | 100 ------------------ src/cupbearer/utils/utils.py | 23 ---- 16 files changed, 51 insertions(+), 292 deletions(-) delete mode 100644 src/cupbearer/utils/optimizers.py delete mode 100644 src/cupbearer/utils/train.py diff --git a/src/cupbearer/detectors/__init__.py b/src/cupbearer/detectors/__init__.py index 2da3d794..775b2dbe 100644 --- a/src/cupbearer/detectors/__init__.py +++ b/src/cupbearer/detectors/__init__.py @@ -3,9 +3,7 @@ from .anomaly_detector import AnomalyDetector from .finetuning import FinetuningAnomalyDetector from .statistical import ( - ActivationCovarianceTrainConfig, MahalanobisDetector, - MahalanobisTrainConfig, QuantumEntropyDetector, SpectralSignatureDetector, ) diff --git a/src/cupbearer/detectors/abstraction/__init__.py b/src/cupbearer/detectors/abstraction/__init__.py index ba48e172..33519c79 100644 --- a/src/cupbearer/detectors/abstraction/__init__.py +++ b/src/cupbearer/detectors/abstraction/__init__.py @@ -2,8 +2,6 @@ from dataclasses import dataclass, field from cupbearer.models import HookedModel -from cupbearer.utils.train import TrainConfig -from cupbearer.utils.utils import BaseConfig from .abstraction import ( Abstraction, @@ -19,7 +17,7 @@ # let users specify a path to a python function that gets called # to construct the abstraction. (With get_default_abstraction being the default.) @dataclass -class AbstractionConfig(BaseConfig, ABC): +class AbstractionConfig(ABC): size_reduction: int = 4 @abstractmethod @@ -48,13 +46,13 @@ class AbstractionDetectorConfig: abstraction: AbstractionConfig = field( default_factory=LocallyConsistentAbstractionConfig ) - train: TrainConfig = field(default_factory=TrainConfig) + max_batch_size: int = 4096 def build(self, model, save_dir) -> AbstractionDetector: abstraction = self.abstraction.build(model) return AbstractionDetector( model=model, abstraction=abstraction, - max_batch_size=self.train.max_batch_size, + max_batch_size=self.max_batch_size, save_path=save_dir, ) diff --git a/src/cupbearer/detectors/abstraction/abstraction_detector.py b/src/cupbearer/detectors/abstraction/abstraction_detector.py index df49ddea..cec80e4f 100644 --- a/src/cupbearer/detectors/abstraction/abstraction_detector.py +++ b/src/cupbearer/detectors/abstraction/abstraction_detector.py @@ -15,8 +15,6 @@ ActivationBasedDetector, ) from cupbearer.models import HookedModel -from cupbearer.utils.optimizers import OptimizerConfig -from cupbearer.utils.train import TrainConfig def per_layer(func: Callable): @@ -94,14 +92,14 @@ def __init__( self, get_activations: Callable[[torch.Tensor], tuple[Any, dict[str, torch.Tensor]]], abstraction: Abstraction, - optim_cfg: OptimizerConfig, + lr: float = 1e-3, ): super().__init__() self.save_hyperparameters(ignore=["get_activations", "abstraction"]) self.get_activations = get_activations self.abstraction = abstraction - self.optim_cfg = optim_cfg + self.lr = lr def _shared_step(self, batch): _, activations = self.get_activations(batch) @@ -118,7 +116,7 @@ def training_step(self, batch, batch_idx): def configure_optimizers(self): # Note we only optimize over the abstraction parameters, the model is frozen - return self.optim_cfg.get_optimizer(self.abstraction.parameters()) + return torch.optim.Adam(self.abstraction.parameters(), lr=self.lr) class AbstractionDetector(ActivationBasedDetector): @@ -150,7 +148,9 @@ def train( untrusted_data, *, num_classes: int, - train_config: TrainConfig, + lr: float = 1e-3, + batch_size: int = 64, + **trainer_kwargs, ): if trusted_data is None: raise ValueError("Abstraction detector requires trusted training data.") @@ -160,10 +160,12 @@ def train( module = AbstractionModule( self.get_activations, self.abstraction, - optim_cfg=train_config.optimizer, + lr=lr, ) - train_loader = train_config.get_dataloader(trusted_data) + train_loader = torch.utils.data.DataLoader( + trusted_data, batch_size=batch_size, shuffle=True + ) # TODO: implement validation data # val_loaders = { @@ -186,7 +188,7 @@ def train( # (which seems tricky to do manually). module.model = self.model - trainer = train_config.get_trainer(path=self.save_path) + trainer = L.Trainer(default_root_dir=self.save_path, **trainer_kwargs) trainer.fit( model=module, train_dataloaders=train_loader, diff --git a/src/cupbearer/detectors/anomaly_detector.py b/src/cupbearer/detectors/anomaly_detector.py index 75c5569e..c97db6f1 100644 --- a/src/cupbearer/detectors/anomaly_detector.py +++ b/src/cupbearer/detectors/anomaly_detector.py @@ -43,12 +43,7 @@ def set_model(self, model: HookedModel): @abstractmethod def train( - self, - trusted_data: Dataset | None, - untrusted_data: Dataset | None, - *, - num_classes: int, - train_config: utils.BaseConfig, + self, trusted_data: Dataset | None, untrusted_data: Dataset | None, **kwargs ): """Train the anomaly detector with the given datasets on the given model. diff --git a/src/cupbearer/detectors/finetuning.py b/src/cupbearer/detectors/finetuning.py index d0f2e014..50b224dd 100644 --- a/src/cupbearer/detectors/finetuning.py +++ b/src/cupbearer/detectors/finetuning.py @@ -1,13 +1,14 @@ import copy import warnings +import lightning as L import torch import torch.nn.functional as F +from torch.utils.data import DataLoader from cupbearer.detectors.anomaly_detector import AnomalyDetector from cupbearer.scripts._shared import Classifier from cupbearer.utils import utils -from cupbearer.utils.train import TrainConfig class FinetuningAnomalyDetector(AnomalyDetector): @@ -26,22 +27,24 @@ def train( untrusted_data, *, num_classes: int, - train_config: TrainConfig, + lr: float = 1e-3, + batch_size: int = 64, + **trainer_kwargs, ): if trusted_data is None: raise ValueError("Finetuning detector requires trusted training data.") classifier = Classifier( self.finetuned_model, num_classes=num_classes, - optim_cfg=train_config.optimizer, + lr=lr, save_hparams=False, ) # Create a DataLoader for the clean dataset - clean_loader = train_config.get_dataloader(trusted_data) + clean_loader = DataLoader(trusted_data, batch_size=batch_size, shuffle=True) # Finetune the model on the clean dataset - trainer = train_config.get_trainer(path=self.save_path) + trainer = L.Trainer(default_root_dir=self.save_path, **trainer_kwargs) with warnings.catch_warnings(): warnings.filterwarnings( "ignore", diff --git a/src/cupbearer/detectors/statistical/__init__.py b/src/cupbearer/detectors/statistical/__init__.py index 76bc19a2..9b56b1c5 100644 --- a/src/cupbearer/detectors/statistical/__init__.py +++ b/src/cupbearer/detectors/statistical/__init__.py @@ -2,9 +2,3 @@ from .mahalanobis_detector import MahalanobisDetector from .que_detector import QuantumEntropyDetector from .spectral_detector import SpectralSignatureDetector -from .statistical import ( - ActivationCovarianceTrainConfig, - DebugActivationCovarianceTrainConfig, - DebugMahalanobisTrainConfig, - MahalanobisTrainConfig, -) diff --git a/src/cupbearer/detectors/statistical/mahalanobis_detector.py b/src/cupbearer/detectors/statistical/mahalanobis_detector.py index b91d4287..9dad4343 100644 --- a/src/cupbearer/detectors/statistical/mahalanobis_detector.py +++ b/src/cupbearer/detectors/statistical/mahalanobis_detector.py @@ -3,22 +3,21 @@ from cupbearer.detectors.statistical.helpers import mahalanobis from cupbearer.detectors.statistical.statistical import ( ActivationCovarianceBasedDetector, - MahalanobisTrainConfig, ) class MahalanobisDetector(ActivationCovarianceBasedDetector): - use_trusted: bool = True - - def post_covariance_training(self, train_config: MahalanobisTrainConfig): + def post_covariance_training( + self, rcond: float = 1e-5, relative: bool = False, **kwargs + ): self.inv_covariances = { - k: torch.linalg.pinv(C, rcond=train_config.rcond, hermitian=True) + k: torch.linalg.pinv(C, rcond=rcond, hermitian=True) for k, C in self.covariances.items() } self.inv_diag_covariances = None - if train_config.relative: + if relative: self.inv_diag_covariances = { - k: torch.where(torch.diag(C) > train_config.rcond, 1 / torch.diag(C), 0) + k: torch.where(torch.diag(C) > rcond, 1 / torch.diag(C), 0) for k, C in self.covariances.items() } diff --git a/src/cupbearer/detectors/statistical/que_detector.py b/src/cupbearer/detectors/statistical/que_detector.py index 7bc8dd15..1207161e 100644 --- a/src/cupbearer/detectors/statistical/que_detector.py +++ b/src/cupbearer/detectors/statistical/que_detector.py @@ -3,14 +3,11 @@ from cupbearer.detectors.statistical.helpers import quantum_entropy from cupbearer.detectors.statistical.statistical import ( ActivationCovarianceBasedDetector, - ActivationCovarianceTrainConfig, ) class QuantumEntropyDetector(ActivationCovarianceBasedDetector): - use_trusted: bool = True - - def post_covariance_training(self, train_config: ActivationCovarianceTrainConfig): + def post_covariance_training(self, rcond: float = 1e-5, **kwargs): whitening_matrices = {} for k, cov in self.covariances.items(): # Compute decomposition @@ -18,9 +15,7 @@ def post_covariance_training(self, train_config: ActivationCovarianceTrainConfig # Zero entries corresponding to eigenvalues smaller than rcond vals_rsqrt = eigs.eigenvalues.rsqrt() - vals_rsqrt[ - eigs.eigenvalues < train_config.rcond * eigs.eigenvalues.max() - ] = 0 + vals_rsqrt[eigs.eigenvalues < rcond * eigs.eigenvalues.max()] = 0 # PCA whitening # following https://doi.org/10.1080/00031305.2016.1277159 diff --git a/src/cupbearer/detectors/statistical/spectral_detector.py b/src/cupbearer/detectors/statistical/spectral_detector.py index 7774721d..5a79f630 100644 --- a/src/cupbearer/detectors/statistical/spectral_detector.py +++ b/src/cupbearer/detectors/statistical/spectral_detector.py @@ -2,7 +2,6 @@ from cupbearer.detectors.statistical.statistical import ( ActivationCovarianceBasedDetector, - ActivationCovarianceTrainConfig, ) @@ -15,7 +14,7 @@ class SpectralSignatureDetector(ActivationCovarianceBasedDetector): use_trusted: bool = False - def post_covariance_training(self, train_config: ActivationCovarianceTrainConfig): + def post_covariance_training(self, **kwargs): # Calculate top right singular vectors from covariance matrices self.top_singular_vectors = { k: torch.linalg.eigh(cov).eigenvectors[:, -1] diff --git a/src/cupbearer/detectors/statistical/statistical.py b/src/cupbearer/detectors/statistical/statistical.py index 2e686777..031bbdb6 100644 --- a/src/cupbearer/detectors/statistical/statistical.py +++ b/src/cupbearer/detectors/statistical/statistical.py @@ -1,73 +1,15 @@ from abc import ABC, abstractmethod -from dataclasses import dataclass import torch -from torch.utils.data import DataLoader, Dataset +from torch.utils.data import DataLoader from tqdm import tqdm from cupbearer.detectors.anomaly_detector import ActivationBasedDetector from cupbearer.detectors.statistical.helpers import update_covariance -from cupbearer.utils.utils import BaseConfig - - -@dataclass -class StatisticalTrainConfig(BaseConfig, ABC): - max_batches: int = 0 - batch_size: int = 4096 - max_batch_size: int = 4096 - pbar: bool = True - num_workers: int = 0 - # robust: bool = False # TODO spectre uses - # https://www.semanticscholar.org/paper/Being-Robust-(in-High-Dimensions)-Can-Be-Practical-Diakonikolas-Kamath/2a6de51d86f13e9eb7efa85491682dad0ccd65e8?utm_source=direct_link - - def get_dataloader(self, dataset: Dataset, train=True): - if train: - return DataLoader( - dataset, - batch_size=self.batch_size, - shuffle=True, - num_workers=self.num_workers, - persistent_workers=self.num_workers > 0, - ) - else: - return DataLoader( - dataset, - batch_size=self.batch_size, - shuffle=False, - ) - - -@dataclass -class DebugStatisticalTrainConfig(StatisticalTrainConfig): - max_batches: int = 3 - batch_size: int = 5 - max_batch_size: int = 5 - - -@dataclass -class ActivationCovarianceTrainConfig(StatisticalTrainConfig): - rcond: float = 1e-5 - - -@dataclass -class DebugActivationCovarianceTrainConfig( - DebugStatisticalTrainConfig, ActivationCovarianceTrainConfig -): - pass - - -@dataclass -class MahalanobisTrainConfig(ActivationCovarianceTrainConfig): - relative: bool = False - - -@dataclass -class DebugMahalanobisTrainConfig(DebugStatisticalTrainConfig, MahalanobisTrainConfig): - pass class StatisticalDetector(ActivationBasedDetector, ABC): - use_trusted: bool + use_trusted: bool = True @abstractmethod def init_variables(self, activation_sizes: dict[str, torch.Size]): @@ -82,8 +24,10 @@ def train( trusted_data, untrusted_data, *, - num_classes: int, - train_config: StatisticalTrainConfig, + batch_size: int = 1024, + pbar: bool = True, + max_batches: int | None = None, + **kwargs, ): # Common for statistical methods is that the training does not require # gradients, but instead computes summary statistics or similar @@ -101,7 +45,8 @@ def train( ) data = untrusted_data - data_loader = train_config.get_dataloader(data) + # No reason to shuffle, we're just computing statistics + data_loader = DataLoader(data, batch_size=batch_size, shuffle=False) example_batch = next(iter(data_loader)) _, example_activations = self.get_activations(example_batch) @@ -109,11 +54,11 @@ def train( activation_sizes = {k: v[0].size() for k, v in example_activations.items()} self.init_variables(activation_sizes) - if train_config.pbar: + if pbar: data_loader = tqdm(data_loader) for i, batch in enumerate(data_loader): - if train_config.max_batches and i >= train_config.max_batches: + if max_batches and i >= max_batches: break _, activations = self.get_activations(batch) self.batch_update(activations) @@ -142,22 +87,12 @@ def batch_update(self, activations: dict[str, torch.Tensor]): ) @abstractmethod - def post_covariance_training(self, train_config: ActivationCovarianceTrainConfig): + def post_covariance_training(self, **kwargs): pass - def train( - self, - trusted_data, - untrusted_data, - *, - num_classes: int, - train_config: ActivationCovarianceTrainConfig, - ): + def train(self, trusted_data, untrusted_data, **kwargs): super().train( - trusted_data=trusted_data, - untrusted_data=untrusted_data, - num_classes=num_classes, - train_config=train_config, + trusted_data=trusted_data, untrusted_data=untrusted_data, **kwargs ) # Post process @@ -167,4 +102,4 @@ def train( if any(torch.count_nonzero(C) == 0 for C in self.covariances.values()): raise RuntimeError("All zero covariance matrix detected.") - self.post_covariance_training(train_config=train_config) + self.post_covariance_training(**kwargs) diff --git a/src/cupbearer/scripts/train_classifier.py b/src/cupbearer/scripts/train_classifier.py index c55a4fc4..39303c35 100644 --- a/src/cupbearer/scripts/train_classifier.py +++ b/src/cupbearer/scripts/train_classifier.py @@ -84,10 +84,7 @@ def main( else: metrics_logger = None - trainer = L.Trainer( - default_root_dir=path, - **trainer_kwargs, - ) + trainer = L.Trainer(default_root_dir=path, **trainer_kwargs) with warnings.catch_warnings(): if not val_loaders: diff --git a/src/cupbearer/scripts/train_detector.py b/src/cupbearer/scripts/train_detector.py index d7dfb00d..f9392d74 100644 --- a/src/cupbearer/scripts/train_detector.py +++ b/src/cupbearer/scripts/train_detector.py @@ -1,36 +1,24 @@ from cupbearer.detectors import AnomalyDetector from cupbearer.tasks import Task from cupbearer.utils.scripts import script -from cupbearer.utils.train import TrainConfig -from cupbearer.utils.utils import BaseConfig -from . import EvalDetectorConfig, eval_detector +from . import eval_detector @script def main( task: Task, detector: AnomalyDetector, - num_classes: int, - train: BaseConfig | None = None, - seed: int = 0, + **train_kwargs, ): - if train is None: - train = TrainConfig() detector.set_model(task.model) detector.train( trusted_data=task.trusted_data, untrusted_data=task.untrusted_train_data, - num_classes=num_classes, - train_config=train, + **train_kwargs, ) path = detector.save_path if path: detector.save_weights(path / "detector") - eval_cfg = EvalDetectorConfig( - detector=detector, - task=task, - seed=seed, - ) - eval_detector(eval_cfg) + eval_detector(detector=detector, task=task, pbar=True) diff --git a/src/cupbearer/utils/__init__.py b/src/cupbearer/utils/__init__.py index 5ca825cb..1d326396 100644 --- a/src/cupbearer/utils/__init__.py +++ b/src/cupbearer/utils/__init__.py @@ -1,4 +1,2 @@ # ruff: noqa: F401 -from .optimizers import OptimizerConfig -from .train import DebugTrainConfig, TrainConfig from .utils import inputs_from_batch, load, save diff --git a/src/cupbearer/utils/optimizers.py b/src/cupbearer/utils/optimizers.py deleted file mode 100644 index 3adf4fbf..00000000 --- a/src/cupbearer/utils/optimizers.py +++ /dev/null @@ -1,19 +0,0 @@ -from dataclasses import dataclass - -import torch - -from cupbearer.utils.utils import BaseConfig - - -@dataclass -class OptimizerConfig(BaseConfig): - name: str = "adam" - lr: float = 1e-3 - - def get_optimizer(self, params) -> torch.optim.Optimizer: - if self.name == "adam": - return torch.optim.Adam(params, lr=self.lr) - elif self.name == "sgd": - return torch.optim.SGD(params, lr=self.lr) - else: - raise ValueError(f"Unknown optimizer {self.name}") diff --git a/src/cupbearer/utils/train.py b/src/cupbearer/utils/train.py deleted file mode 100644 index a87b9b6d..00000000 --- a/src/cupbearer/utils/train.py +++ /dev/null @@ -1,100 +0,0 @@ -from dataclasses import asdict, dataclass, field -from pathlib import Path -from typing import Optional - -import lightning as L -from lightning.pytorch import callbacks, loggers -from torch.utils.data import DataLoader, Dataset - -from cupbearer.utils.optimizers import OptimizerConfig -from cupbearer.utils.utils import BaseConfig - - -@dataclass(kw_only=True) -class TrainConfig(BaseConfig): - num_epochs: int = 10 - batch_size: int = 128 - max_batch_size: int = 2048 - optimizer: OptimizerConfig = field(default_factory=OptimizerConfig) - num_workers: int = 0 - pin_memory: bool = True - max_steps: int = -1 - check_val_every_n_epoch: int = 1 - pbar: bool = False - log_every_n_steps: Optional[int] = None - wandb: bool = False - devices: int | list[int] | str = "auto" - accelerator: str = "auto" - precision: int | str = 32 - monitor_device_stats: bool = False - profiler: Optional[str] = None - - @property - def callbacks(self): - callback_list = [] - if self.monitor_device_stats: - callback_list.append(callbacks.DeviceStatsMonitor(cpu_stats=True)) - - return callback_list - - def get_dataloader(self, dataset: Dataset, train=True): - if train: - return DataLoader( - dataset, - batch_size=self.batch_size, - shuffle=True, - num_workers=self.num_workers, - persistent_workers=self.num_workers > 0, - pin_memory=self.pin_memory, - ) - else: - return DataLoader( - dataset, - batch_size=self.max_batch_size, - shuffle=False, - ) - - # We deliberately don't make the `path` argument optional, since that makes it - # easy to forget passing it on (and this will likely only be used in internal - # code anyway). - def get_trainer(self, path: Path | None, **kwargs): - # Define metrics logger - if self.wandb: - metrics_logger = loggers.WandbLogger(project="abstractions") - metrics_logger.experiment.config.update(asdict(self)) - if path: - metrics_logger = loggers.TensorBoardLogger( - save_dir=path, - name="", - version="", - sub_dir="tensorboard", - ) - else: - metrics_logger = None - - trainer_kwargs = dict( - max_epochs=self.num_epochs, - max_steps=self.max_steps, - callbacks=self.callbacks, - logger=metrics_logger, - default_root_dir=path, - check_val_every_n_epoch=self.check_val_every_n_epoch, - enable_progress_bar=self.pbar, - log_every_n_steps=self.log_every_n_steps, - devices=self.devices, - accelerator=self.accelerator, - precision=self.precision, - profiler=self.profiler, - ) - trainer_kwargs.update(kwargs) # override defaults if given - return L.Trainer(**trainer_kwargs) - - -@dataclass(kw_only=True) -class DebugTrainConfig(TrainConfig): - num_epochs: int = 1 - max_steps: int = 1 - max_batch_size: int = 2 - wandb: bool = False - batch_size: int = 2 - log_every_n_steps: int = 1 diff --git a/src/cupbearer/utils/utils.py b/src/cupbearer/utils/utils.py index aa6013f3..7d519c14 100644 --- a/src/cupbearer/utils/utils.py +++ b/src/cupbearer/utils/utils.py @@ -4,12 +4,10 @@ import functools import importlib import pickle -from dataclasses import dataclass from pathlib import Path from typing import Iterable, TypeVar, Union import torch -from simple_parsing.helpers import serialization SUFFIX = ".pt" TYPE_PREFIX = "__TYPE__:" @@ -139,27 +137,6 @@ def dict_field(): return dataclasses.field(default_factory=dict) -@dataclass(kw_only=True) -class BaseConfig(serialization.serializable.Serializable): - def __post_init__(self): - pass - - def to_dict( - self, - dict_factory: type[dict] = dict, - recurse: bool = True, - save_dc_types: bool = True, - ) -> dict: - # This is the only change we make: default is for save_dc_types to be False. - # Instead, we always pass `True`. (We don't want the default elsewhere - # to get passed here and override this.) - # We could pass save_dc_types to `save`, but that doesn't propagate into - # lists of dataclasses. - return serialization.serializable.to_dict( - self, dict_factory, recurse, save_dc_types=True - ) - - def get_object(path: str): """Get an object from a string. From 94c54ed78e0134ec3037b77ec2d8a9afb4587ea3 Mon Sep 17 00:00:00 2001 From: Erik Jenner Date: Sat, 2 Mar 2024 15:10:28 -0800 Subject: [PATCH 11/25] Adjust abstractions --- .../detectors/abstraction/__init__.py | 52 +------------------ .../detectors/abstraction/abstraction.py | 2 + .../abstraction/abstraction_detector.py | 26 +--------- 3 files changed, 5 insertions(+), 75 deletions(-) diff --git a/src/cupbearer/detectors/abstraction/__init__.py b/src/cupbearer/detectors/abstraction/__init__.py index 33519c79..563f8d5f 100644 --- a/src/cupbearer/detectors/abstraction/__init__.py +++ b/src/cupbearer/detectors/abstraction/__init__.py @@ -1,7 +1,4 @@ -from abc import ABC, abstractmethod -from dataclasses import dataclass, field - -from cupbearer.models import HookedModel +# ruff: noqa: F401 from .abstraction import ( Abstraction, @@ -9,50 +6,3 @@ LocallyConsistentAbstraction, ) from .abstraction_detector import AbstractionDetector - - -# This is all unnessarily verbose right now, it's a remnant from when we had -# robust optimization for abstractions and I experimented with some variations. -# Leaving it like this for now, but ultimately, the way to go is probably to just -# let users specify a path to a python function that gets called -# to construct the abstraction. (With get_default_abstraction being the default.) -@dataclass -class AbstractionConfig(ABC): - size_reduction: int = 4 - - @abstractmethod - def build(self, model: HookedModel) -> Abstraction: - pass - - -class LocallyConsistentAbstractionConfig(AbstractionConfig): - def build(self, model: HookedModel) -> LocallyConsistentAbstraction: - return LocallyConsistentAbstraction.get_default( - model, - self.size_reduction, - ) - - -class AutoencoderAbstractionConfig(AbstractionConfig): - def build(self, model: HookedModel) -> AutoencoderAbstraction: - return AutoencoderAbstraction.get_default( - model, - self.size_reduction, - ) - - -@dataclass -class AbstractionDetectorConfig: - abstraction: AbstractionConfig = field( - default_factory=LocallyConsistentAbstractionConfig - ) - max_batch_size: int = 4096 - - def build(self, model, save_dir) -> AbstractionDetector: - abstraction = self.abstraction.build(model) - return AbstractionDetector( - model=model, - abstraction=abstraction, - max_batch_size=self.max_batch_size, - save_path=save_dir, - ) diff --git a/src/cupbearer/detectors/abstraction/abstraction.py b/src/cupbearer/detectors/abstraction/abstraction.py index 53587d77..766b33b0 100644 --- a/src/cupbearer/detectors/abstraction/abstraction.py +++ b/src/cupbearer/detectors/abstraction/abstraction.py @@ -54,6 +54,8 @@ def visit(node): class Abstraction(nn.Module): + # TODO: I think we should likely get rid of get_default and instead just have some + # informal collection of helper functions for building reasonable abstractions. @classmethod @abstractmethod def get_default(cls, model: HookedModel, size_reduction: int) -> Abstraction: diff --git a/src/cupbearer/detectors/abstraction/abstraction_detector.py b/src/cupbearer/detectors/abstraction/abstraction_detector.py index cec80e4f..83098eb8 100644 --- a/src/cupbearer/detectors/abstraction/abstraction_detector.py +++ b/src/cupbearer/detectors/abstraction/abstraction_detector.py @@ -14,7 +14,6 @@ from cupbearer.detectors.anomaly_detector import ( ActivationBasedDetector, ) -from cupbearer.models import HookedModel def per_layer(func: Callable): @@ -60,12 +59,7 @@ def compute_cosine_losses(input: torch.Tensor, target: torch.Tensor) -> torch.Te @per_layer def compute_kl_losses(input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: - return F.kl_div( - input, - target, - reduction="none", - log_target=True, - ).sum(dim=1) + return F.kl_div(input, target, reduction="none", log_target=True).sum(dim=1) def compute_losses( @@ -92,10 +86,9 @@ def __init__( self, get_activations: Callable[[torch.Tensor], tuple[Any, dict[str, torch.Tensor]]], abstraction: Abstraction, - lr: float = 1e-3, + lr: float, ): super().__init__() - self.save_hyperparameters(ignore=["get_activations", "abstraction"]) self.get_activations = get_activations self.abstraction = abstraction @@ -124,7 +117,6 @@ class AbstractionDetector(ActivationBasedDetector): def __init__( self, - model: HookedModel, abstraction: Abstraction, max_batch_size: int = 4096, save_path: str | Path | None = None, @@ -132,22 +124,16 @@ def __init__( self.abstraction = abstraction names = list(abstraction.tau_maps.keys()) super().__init__( - model, activation_name_func=lambda _: names, max_batch_size=max_batch_size, save_path=save_path, ) - @property - def should_train_on_clean_data(self) -> bool: - return True - def train( self, trusted_data, untrusted_data, *, - num_classes: int, lr: float = 1e-3, batch_size: int = 64, **trainer_kwargs, @@ -168,14 +154,6 @@ def train( ) # TODO: implement validation data - # val_loaders = { - # k: train_config.get_dataloader(v.build, train=False) - # for k, v in self.val_data.items() - # } - # checkpoint_callback = ModelCheckpoint( - # dirpath=self.save_path, - # filename="detector", - # ) self.model.eval() # We don't need gradients for base model parameters: From 4c7e0c2bfd0538149b095a72eda3382cb3077ea7 Mon Sep 17 00:00:00 2001 From: Erik Jenner Date: Sat, 2 Mar 2024 16:59:46 -0800 Subject: [PATCH 12/25] Remove loggers I think we haven't been using these for a while --- src/cupbearer/utils/logger.py | 66 ----------------------------------- 1 file changed, 66 deletions(-) delete mode 100644 src/cupbearer/utils/logger.py diff --git a/src/cupbearer/utils/logger.py b/src/cupbearer/utils/logger.py deleted file mode 100644 index 4e33f271..00000000 --- a/src/cupbearer/utils/logger.py +++ /dev/null @@ -1,66 +0,0 @@ -from abc import ABC -from typing import Any, Dict, Mapping, Optional - - -class Logger(ABC): - """Base class for all metric loggers. - - Subclasses need to override either `_log_scalar` or `log_metrics`. - """ - - def _log_scalar(self, name: str, value: Any, step: int, **kwargs): - raise NotImplementedError - - def log_metrics(self, metrics: Mapping[str, Any], step: int): - for name, value in metrics.items(): - self._log_scalar(name, value, step) - - def close(self): - pass - - -class DummyLogger(Logger): - def _log_scalar(self, name: str, value: Any, step: int, **kwargs): - pass - - -class ClearMLLogger(Logger): - def __init__(self, project_name: str, task_name: str): - super().__init__() - # Import here instead of at the top so this isn't a hard dependency - from clearml import Task - - # Don't seed anything here, that should be handled elsewhere - Task.set_random_seed(None) - self.task = Task.init(project_name=project_name, task_name=task_name) - self.logger = self.task.get_logger() - - def _log_scalar(self, name: str, value: Any, step: int, **kwargs): - # ClearML takes a name for a plot and then separately a name - # for the series in that plot. For now, we just make an extra - # plot for every series. - return self.logger.report_scalar(name, name, value, step) - - def close(self): - self.task.close() - - -class WandbLogger(Logger): - def __init__( - self, - project_name: str, - task_name: Optional[str] = None, - config: Optional[Dict[str, Any]] = None, - **kwargs, - ): - super().__init__() - import wandb - - wandb.init(project=project_name, name=task_name, config=config, **kwargs) - self.logger = wandb - - def log_metrics(self, metrics: Dict[str, Any], step: int): - return self.logger.log(metrics, step) - - def close(self): - self.logger.finish() From 6809a7e71d1f655a447864e7021d20957b706934 Mon Sep 17 00:00:00 2001 From: Erik Jenner Date: Sat, 2 Mar 2024 17:18:30 -0800 Subject: [PATCH 13/25] Fix bugs and tests Tests all pass now; I removed one or two that aren't applicable anymore (notably checking whether WaNet loads correctly out of the box) --- src/cupbearer/data/backdoors.py | 7 +- src/cupbearer/detectors/__init__.py | 2 +- .../detectors/abstraction/abstraction.py | 2 +- .../detectors/statistical/statistical.py | 4 +- src/cupbearer/scripts/eval_classifier.py | 6 +- src/cupbearer/scripts/train_classifier.py | 47 ++-- src/cupbearer/scripts/train_detector.py | 2 - src/cupbearer/tasks/adversarial_examples.py | 2 +- tests/test_data.py | 251 +++++------------- tests/test_detectors.py | 28 +- tests/test_pipeline.py | 236 ++++++++-------- 11 files changed, 220 insertions(+), 367 deletions(-) diff --git a/src/cupbearer/data/backdoors.py b/src/cupbearer/data/backdoors.py index 6751235b..c4f1298b 100644 --- a/src/cupbearer/data/backdoors.py +++ b/src/cupbearer/data/backdoors.py @@ -110,6 +110,11 @@ def __post_init__(self): self._warping_field = None self._control_grid = None + # Load or generate control grid; important to do this now before we might + # create multiple workers---we wouldn't want to generate different random + # control grids in each one. + self.control_grid + assert 0 <= self.p_noise <= 1, "Probability must be between 0 and 1" assert ( 0 <= self.p_noise + self.p_backdoor <= 1 @@ -120,7 +125,7 @@ def control_grid(self) -> torch.Tensor: if self._control_grid is not None: return self._control_grid - if self.path: + if self.path is None: logger.debug("Generating new control grid for warping field.") control_grid_shape = (2, self.control_grid_width, self.control_grid_width) control_grid = 2 * torch.rand(*control_grid_shape) - 1 diff --git a/src/cupbearer/detectors/__init__.py b/src/cupbearer/detectors/__init__.py index 775b2dbe..0cd36fec 100644 --- a/src/cupbearer/detectors/__init__.py +++ b/src/cupbearer/detectors/__init__.py @@ -1,5 +1,5 @@ # ruff: noqa: F401 -from .abstraction import AbstractionDetectorConfig +from .abstraction import AbstractionDetector from .anomaly_detector import AnomalyDetector from .finetuning import FinetuningAnomalyDetector from .statistical import ( diff --git a/src/cupbearer/detectors/abstraction/abstraction.py b/src/cupbearer/detectors/abstraction/abstraction.py index 766b33b0..bbeb421d 100644 --- a/src/cupbearer/detectors/abstraction/abstraction.py +++ b/src/cupbearer/detectors/abstraction/abstraction.py @@ -195,7 +195,7 @@ def get_mlp_abstraction( return cls(tau_maps, steps) -class AutoencoderAbstraction(nn.Module): +class AutoencoderAbstraction(Abstraction): def __init__( self, tau_maps: dict[str, nn.Module], # encoders diff --git a/src/cupbearer/detectors/statistical/statistical.py b/src/cupbearer/detectors/statistical/statistical.py index 031bbdb6..ee25eb88 100644 --- a/src/cupbearer/detectors/statistical/statistical.py +++ b/src/cupbearer/detectors/statistical/statistical.py @@ -26,7 +26,7 @@ def train( *, batch_size: int = 1024, pbar: bool = True, - max_batches: int | None = None, + max_steps: int | None = None, **kwargs, ): # Common for statistical methods is that the training does not require @@ -58,7 +58,7 @@ def train( data_loader = tqdm(data_loader) for i, batch in enumerate(data_loader): - if max_batches and i >= max_batches: + if max_steps and i >= max_steps: break _, activations = self.get_activations(batch) self.batch_update(activations) diff --git a/src/cupbearer/scripts/eval_classifier.py b/src/cupbearer/scripts/eval_classifier.py index ac0a42c3..dd08aacf 100644 --- a/src/cupbearer/scripts/eval_classifier.py +++ b/src/cupbearer/scripts/eval_classifier.py @@ -7,22 +7,20 @@ from cupbearer.models import HookedModel from cupbearer.scripts._shared import Classifier -from cupbearer.utils.scripts import script -@script def main( data: Dataset, model: HookedModel, path: Path | str, max_batches: Optional[int] = None, - max_batch_size: int = 2048, + batch_size: int = 2048, ): path = Path(path) dataloader = DataLoader( data, - batch_size=max_batch_size, + batch_size=batch_size, shuffle=False, ) diff --git a/src/cupbearer/scripts/train_classifier.py b/src/cupbearer/scripts/train_classifier.py index 39303c35..aaed8fc3 100644 --- a/src/cupbearer/scripts/train_classifier.py +++ b/src/cupbearer/scripts/train_classifier.py @@ -9,10 +9,8 @@ from cupbearer.models import HookedModel from cupbearer.scripts._shared import Classifier -from cupbearer.utils.scripts import script -@script def main( model: HookedModel, train_loader: DataLoader, @@ -49,9 +47,8 @@ def main( # TODO: once we do longer training runs we'll want to have multiple # checkpoints, potentially based on validation loss if ( - path # If the user already provided a custom checkpoint config, we'll use that: - and not any(isinstance(c, ModelCheckpoint) for c in callbacks) + not any(isinstance(c, ModelCheckpoint) for c in callbacks) # If the user explicitly disabled checkpointing, we don't want to override that: and trainer_kwargs.get("enable_checkpointing", True) ): @@ -62,27 +59,31 @@ def main( ) ) + trainer_kwargs["callbacks"] = callbacks + # Define metrics logger # TODO: make adjustable and set config correctly - if wandb: - metrics_logger = loggers.WandbLogger(project="cupbearer") - metrics_logger.experiment.config.update(trainer_kwargs) - metrics_logger.experiment.config.update( - { - "model": repr(model), - "train_data": repr(train_loader.dataset), - "batch_size": train_loader.batch_size, - } - ) - if path: - metrics_logger = loggers.TensorBoardLogger( - save_dir=path, - name="", - version="", - sub_dir="tensorboard", - ) - else: - metrics_logger = None + if "logger" not in trainer_kwargs: + if wandb: + metrics_logger = loggers.WandbLogger(project="cupbearer") + metrics_logger.experiment.config.update(trainer_kwargs) + metrics_logger.experiment.config.update( + { + "model": repr(model), + "train_data": repr(train_loader.dataset), + "batch_size": train_loader.batch_size, + } + ) + elif path: + metrics_logger = loggers.TensorBoardLogger( + save_dir=path, + name="", + version="", + sub_dir="tensorboard", + ) + else: + metrics_logger = None + trainer_kwargs["logger"] = metrics_logger trainer = L.Trainer(default_root_dir=path, **trainer_kwargs) diff --git a/src/cupbearer/scripts/train_detector.py b/src/cupbearer/scripts/train_detector.py index f9392d74..350d12ff 100644 --- a/src/cupbearer/scripts/train_detector.py +++ b/src/cupbearer/scripts/train_detector.py @@ -1,11 +1,9 @@ from cupbearer.detectors import AnomalyDetector from cupbearer.tasks import Task -from cupbearer.utils.scripts import script from . import eval_detector -@script def main( task: Task, detector: AnomalyDetector, diff --git a/src/cupbearer/tasks/adversarial_examples.py b/src/cupbearer/tasks/adversarial_examples.py index ee593558..496deab8 100644 --- a/src/cupbearer/tasks/adversarial_examples.py +++ b/src/cupbearer/tasks/adversarial_examples.py @@ -25,7 +25,7 @@ def adversarial_examples( anomaly_func=lambda dataset, train: make_adversarial_examples( model, dataset, - cache_path / f"advexes_{'train' if train else 'test'}", + cache_path / f"adversarial_examples_{'train' if train else 'test'}", **kwargs, ), trusted_fraction=trusted_fraction, diff --git a/tests/test_data.py b/tests/test_data.py index 66a31b5f..b323b89d 100644 --- a/tests/test_data.py +++ b/tests/test_data.py @@ -1,3 +1,4 @@ +import functools from dataclasses import dataclass import numpy as np @@ -22,17 +23,6 @@ def __getitem__(self, index): return self.value -@dataclass -class DummyConfig(data.DatasetConfig): - length: int - value: str - # Doesn't apply or matter - num_classes: int = 0 - - def _build(self) -> Dataset: - return DummyDataset(self.length, self.value) - - class DummyImageData(Dataset): def __init__(self, length: int, num_classes: int, shape: tuple[int, int]): self.length = length @@ -45,7 +35,7 @@ def __init__(self, length: int, num_classes: int, shape: tuple[int, int]): dtype=torch.float32, # Move channel dimension to front ).permute(2, 0, 1) - # Need any seed so that labels are (somewhat) consitent over instances + # Need any seed so that labels are (somewhat) consistent over instances self._rng = np.random.default_rng(seed=5965) def __len__(self): @@ -57,16 +47,6 @@ def __getitem__(self, index) -> tuple[torch.Tensor, int]: return self.img, self._rng.integers(self.num_classes) -@dataclass -class DummyImageConfig(data.DatasetConfig): - length: int - num_classes: int = 10 - shape: tuple[int, int] = (8, 12) - - def _build(self) -> Dataset: - return DummyImageData(self.length, self.num_classes, self.shape) - - ######################### # Tests for TestDataMix ######################### @@ -87,21 +67,6 @@ def mixed_dataset(clean_dataset, anomalous_dataset): return data.MixedData(clean_dataset, anomalous_dataset) -@pytest.fixture -def clean_config(): - return DummyConfig(9, "a") - - -@pytest.fixture -def anomalous_config(): - return DummyConfig(7, "b") - - -@pytest.fixture -def mixed_config(clean_config, anomalous_config): - return data.MixedDataConfig(clean_config, anomalous_config) - - def test_len(mixed_dataset): assert len(mixed_dataset) == 14 assert mixed_dataset.normal_len == mixed_dataset.anomalous_len == 7 @@ -127,96 +92,50 @@ def test_uneven_weight(clean_dataset, anomalous_dataset): assert mixed_data[i] == ("b", 1) -def test_simple_mixed_build(mixed_config): - mixed_data = mixed_config.build() - assert len(mixed_data) == 14 - assert mixed_data.normal_len == mixed_data.anomalous_len == 7 - for i in range(7): - assert mixed_data[i] == ("a", 0) - for i in range(7, 14): - assert mixed_data[i] == ("b", 1) - - -def test_mixed_max_size(clean_config, anomalous_config): - # Just some random big enough numbers: - clean_config.length = 105 - anomalous_config.length = 97 - # These max sizes shouldn't affect anything, but why not throw them into the mix. - clean_config.max_size = 51 - anomalous_config.max_size = 23 - # The actual mixed dataset we build now is the same as before: 10 datapoints, - # 3 normal and 7 anomalous. - mixed_config = data.MixedDataConfig(clean_config, anomalous_config) - mixed_config.max_size = 10 - mixed_config.normal_weight = 0.3 - mixed_data = mixed_config.build() - - assert len(mixed_data) == 10 - assert mixed_data.normal_len == 3 - assert mixed_data.anomalous_len == 7 - for i in range(3): - assert mixed_data[i] == ("a", 0) - for i in range(3, 10): - assert mixed_data[i] == ("b", 1) - - ####################### # Tests for Backdoors ####################### @pytest.fixture -def clean_image_config(): - return DummyImageConfig(9) +def clean_image_dataset(): + return DummyImageData(9, 10, (8, 12)) @pytest.fixture( params=[ data.backdoors.CornerPixelBackdoor, data.backdoors.NoiseBackdoor, - data.backdoors.WanetBackdoor, + functools.partial(data.backdoors.WanetBackdoor, path=None), ] ) -def BackdoorConfig(request): +def backdoor_type(request): return request.param -def test_backdoor_relabeling(clean_image_config, BackdoorConfig): - clean_image_config.num_classes = 2**63 - 1 +def test_backdoor_relabeling(clean_image_dataset, backdoor_type): target_class = 1 - data_config = data.BackdoorData( - original=clean_image_config, - backdoor=BackdoorConfig( - p_backdoor=1.0, - target_class=target_class, - ), + dataset = data.BackdoorDataset( + original=clean_image_dataset, + backdoor=backdoor_type(p_backdoor=1.0, target_class=target_class), ) - for img, label in data_config.build(): + for img, label in dataset: assert label == target_class -def test_backdoor_img_changes(clean_image_config, BackdoorConfig): - clean_config = data.BackdoorData( - original=clean_image_config, - backdoor=BackdoorConfig( - p_backdoor=0.0, - ), +def test_backdoor_img_changes(clean_image_dataset, backdoor_type): + clean_data = data.BackdoorDataset( + original=clean_image_dataset, backdoor=backdoor_type(p_backdoor=0.0) ) - anomalous_config = data.BackdoorData( - original=clean_image_config, - backdoor=BackdoorConfig( - p_backdoor=1.0, - ), + anomalous_data = data.BackdoorDataset( + original=clean_image_dataset, backdoor=backdoor_type(p_backdoor=1.0) ) - for clean_sample, (anomalous_img, _) in zip( - clean_config.build(), - anomalous_config.build(), - ): + for clean_sample, (anomalous_img, _) in zip(clean_data, anomalous_data): clean_img, _ = clean_sample # Check that something has changed - assert clean_img is not anomalous_config.backdoor(clean_sample)[0] - assert torch.any(clean_img != anomalous_config.backdoor(clean_sample)[0]) + assert clean_img is not anomalous_data.backdoor(clean_sample)[0] + assert torch.any(clean_img != anomalous_data.backdoor(clean_sample)[0]) assert torch.any(clean_img != anomalous_img) # Check that pixel values still in valid range @@ -231,26 +150,30 @@ def test_backdoor_img_changes(clean_image_config, BackdoorConfig): ) -def test_wanet_backdoor(clean_image_config): - clean_image_config.num_classes = 2**63 - 1 - target_class = 1 - clean_config = data.BackdoorData( - original=clean_image_config, +def test_wanet_backdoor(clean_image_dataset): + # Pick a target class outside the actual range so we can later tell whether it + # was set correctly. + target_class = 10_000 + clean_data = data.BackdoorDataset( + original=clean_image_dataset, backdoor=data.backdoors.WanetBackdoor( + path=None, p_backdoor=0.0, target_class=target_class, ), ) - anomalous_config = data.BackdoorData( - original=clean_image_config, + anomalous_data = data.BackdoorDataset( + original=clean_image_dataset, backdoor=data.backdoors.WanetBackdoor( + path=None, p_backdoor=1.0, target_class=target_class, ), ) - noise_config = data.BackdoorData( - original=clean_image_config, + noise_data = data.BackdoorDataset( + original=clean_image_dataset, backdoor=data.backdoors.WanetBackdoor( + path=None, p_backdoor=0.0, p_noise=1.0, target_class=target_class, @@ -260,12 +183,9 @@ def test_wanet_backdoor(clean_image_config): (clean_img, clean_label), (anoma_img, anoma_label), (noise_img, noise_label), - ) in zip( - clean_config.build(), - anomalous_config.build(), - noise_config.build(), - ): - # Check labels + ) in zip(clean_data, anomalous_data, noise_data): + # Check labels. Our target class is outside the valid range, + # so no chance it got randomly chosen. assert clean_label != target_class assert anoma_label == target_class assert noise_label != target_class @@ -285,27 +205,17 @@ def test_wanet_backdoor(clean_image_config): def test_wanet_backdoor_on_multiple_workers( - clean_image_config, + clean_image_dataset, ): - clean_image_config.num_classes = 1 - target_class = 1 - anomalous_config = data.BackdoorData( - original=clean_image_config, - backdoor=data.backdoors.WanetBackdoor( - p_backdoor=1.0, - p_noise=0.0, - target_class=target_class, - ), - ) - data_loader = DataLoader( - dataset=anomalous_config.build(), - num_workers=2, - batch_size=1, + anomalous_data = data.BackdoorDataset( + original=clean_image_dataset, + backdoor=data.backdoors.WanetBackdoor(path=None, p_backdoor=1.0, p_noise=0.0), ) + data_loader = DataLoader(dataset=anomalous_data, num_workers=2, batch_size=1) imgs = [img for img_batch, label_batch in data_loader for img in img_batch] assert all(torch.allclose(imgs[0], img) for img in imgs) - clean_image = clean_image_config.build().dataset.img + clean_image = clean_image_dataset.img assert not any(torch.allclose(clean_image, img) for img in imgs) @@ -325,19 +235,16 @@ def augmentation(request): return request.param -def test_augmentation(clean_image_config, augmentation): +def test_augmentation(clean_image_dataset, augmentation): # See that augmentation does something unless dud - for img, label in clean_image_config.build(): + for img, label in clean_image_dataset: aug_img, aug_label = augmentation((img, label)) assert label == aug_label assert not torch.allclose(aug_img, img) # Try with multiple workers and batches data_loader = DataLoader( - dataset=clean_image_config.build(), - num_workers=2, - batch_size=3, - drop_last=False, + dataset=clean_image_dataset, num_workers=2, batch_size=3, drop_last=False ) for img, label in data_loader: aug_img, aug_label = augmentation((img, label)) @@ -352,50 +259,48 @@ def test_augmentation(clean_image_config, augmentation): assert torch.all(aug_img == img) -def test_random_crop(clean_image_config): +def test_random_crop(clean_image_dataset): fill_val = 2.75 augmentation = data.RandomCrop( padding=100, # huge padding so that chance of no change is small fill=fill_val, ) - for img, label in clean_image_config.build(): + for img, label in clean_image_dataset: aug_img, aug_label = augmentation((img, label)) assert torch.any(aug_img == fill_val) @dataclass -class DummyPytorchImageConfig(data.PytorchConfig): +class DummyPytorchDataset(data.PytorchDataset): name: str = "dummy" length: int = 32 num_classes: int = 10 shape: tuple[int, int] = (8, 12) + default_augmentations: bool = True - def get_transforms(self): - transforms = super().get_transforms() - assert isinstance(transforms[0], data.transforms.ToTensor) - return transforms[1:] + def __post_init__(self): + # Because our data are already tensors, we need to disable the default ToTensor + assert len(self.transforms) == 1 + assert isinstance(self.transforms[0], data.transforms.ToTensor) + self.transforms = [] + # Now call super to add the augmentations + super().__post_init__() def _build(self) -> Dataset: return DummyImageData(self.length, self.num_classes, self.shape) -@pytest.fixture -def pytorch_data_config(): - return DummyPytorchImageConfig() - - -def test_pytorch_dataset_transforms(pytorch_data_config, BackdoorConfig): - for (_img, _label), (img, label) in zip( - pytorch_data_config._build(), pytorch_data_config.build() - ): +def test_pytorch_dataset_transforms(): + pytorch_dataset = DummyPytorchDataset() + for (_img, _label), (img, label) in zip(pytorch_dataset._build(), pytorch_dataset): assert _label == label assert _img.size() == img.size() assert _img is not img, "Transforms does not seem to have been applied" - transforms = pytorch_data_config.get_transforms() + transforms = pytorch_dataset.transforms transform_typereps = [repr(type(t)) for t in transforms] augmentation_used = False - for trafo in pytorch_data_config.get_transforms(): + for trafo in transforms: # Check that transform is unique in list assert transforms.count(trafo) == 1 assert transform_typereps.count(repr(type(trafo))) == 1 @@ -409,38 +314,8 @@ def test_pytorch_dataset_transforms(pytorch_data_config, BackdoorConfig): assert not augmentation_used, "Transform applied after augmentation" assert augmentation_used - # Test for BackdoorData - data_config = data.BackdoorData( - original=pytorch_data_config, - backdoor=BackdoorConfig(), - ) - transforms = data_config.get_transforms() - transform_typereps = [repr(type(t)) for t in transforms] - augmentation_used = False - backdoor_used = False - for trafo in data_config.get_transforms(): - # Check that transform is unique in list - assert transforms.count(trafo) == 1 - assert transform_typereps.count(repr(type(trafo))) == 1 - - # Check transform types - assert not backdoor_used, "Multiple backdoors in transforms" - assert isinstance(trafo, data.transforms.Transform) - if isinstance(trafo, data.transforms.ProbabilisticTransform): - augmentation_used = True - elif isinstance(trafo, data.backdoors.Backdoor): - backdoor_used = True - else: - assert not augmentation_used, "Transform applied after augmentation" - assert augmentation_used - assert backdoor_used - -def test_no_augmentations(BackdoorConfig): - pytorch_data_config = DummyPytorchImageConfig(default_augmentations=False) - data_config = data.BackdoorData( - original=pytorch_data_config, - backdoor=BackdoorConfig(), - ) - for trafo in data_config.get_transforms(): +def test_no_augmentations(): + dataset = DummyPytorchDataset(default_augmentations=False) + for trafo in dataset.transforms: assert not isinstance(trafo, data.transforms.ProbabilisticTransform) diff --git a/tests/test_detectors.py b/tests/test_detectors.py index 5d53a2b4..caaddc9a 100644 --- a/tests/test_detectors.py +++ b/tests/test_detectors.py @@ -4,7 +4,6 @@ import torch from cupbearer.detectors.statistical import ( MahalanobisDetector, - MahalanobisTrainConfig, QuantumEntropyDetector, SpectralSignatureDetector, ) @@ -39,23 +38,22 @@ ], ) class TestTrainedStatisticalDetectors: - # Currently MahalanobisTrainConfig works for all statistical detectors - train_config = MahalanobisTrainConfig( - batch_size=16, - rcond=1e-5, - ) + rcond: float = 1e-5 def train_detector(self, dataset, Model, Detector, **kwargs): example_input, _ = next(iter(dataset)) + detector = Detector() model = Model(input_shape=example_input.shape, output_dim=7) - detector = Detector(model=model) + detector.set_model(model) detector.train( # Just make sure all detectors get the data they need: trusted_data=dataset, untrusted_data=dataset, num_classes=7, - train_config=self.train_config, + batch_size=16, + rcond=self.rcond, + max_steps=1, ) return detector @@ -87,10 +85,8 @@ def test_inverse_covariance_matrices(self, dataset, Model): assert inv_cov.size() == cov.size() # Check that inverse is (pseudo) inverse - rank = torch.linalg.matrix_rank(cov, rtol=self.train_config.rcond) - assert ( - torch.linalg.matrix_rank(inv_cov, rtol=self.train_config.rcond) == rank - ) + rank = torch.linalg.matrix_rank(cov, rtol=self.rcond) + assert torch.linalg.matrix_rank(inv_cov, rtol=self.rcond) == rank # TODO I'm uncertain which tolerances to use here, this is a # guesstimate based on some of the computations that are done and @@ -111,12 +107,10 @@ def test_whitening_matrices(self, dataset, Model): assert W.size() == cov.size() # Check that Whitening matrix computes (pseudo) inverse - rank = torch.linalg.matrix_rank(cov, rtol=self.train_config.rcond) - assert torch.linalg.matrix_rank(W, rtol=self.train_config.rcond) == rank + rank = torch.linalg.matrix_rank(cov, rtol=self.rcond) + assert torch.linalg.matrix_rank(W, rtol=self.rcond) == rank inv_cov = W @ W.mT - assert ( - torch.linalg.matrix_rank(inv_cov, rtol=self.train_config.rcond) == rank - ) + assert torch.linalg.matrix_rank(inv_cov, rtol=self.rcond) == rank # TODO I'm uncertain which tolerances to use here, this is a # guesstimate based on some of the computations that are done and diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py index 4fdad0ea..c9033417 100644 --- a/tests/test_pipeline.py +++ b/tests/test_pipeline.py @@ -1,17 +1,7 @@ import pytest import torch from cupbearer import data, detectors, models, tasks -from cupbearer.scripts import ( - eval_classifier, - train_classifier, - train_detector, -) -from cupbearer.scripts.conf import ( - eval_classifier_conf, - train_classifier_conf, - train_detector_conf, -) -from cupbearer.utils.train import DebugTrainConfig +from cupbearer.scripts import eval_classifier, train_classifier, train_detector # Ignore warnings about num_workers pytestmark = pytest.mark.filterwarnings( @@ -24,87 +14,131 @@ @pytest.fixture(scope="module") -def backdoor_classifier_path(module_tmp_path): +def model(): + return models.MLP(input_shape=(1, 28, 28), hidden_dims=[5, 5], output_dim=10) + + +@pytest.fixture(scope="module") +def mnist(): + # 10 samples will be plenty for all our tests + return torch.utils.data.Subset(data.MNIST(train=False), range(10)) + + +@pytest.fixture +def backdoor_task(model, mnist): + return tasks.backdoor_detection( + model=model, + train_data=mnist, + test_data=mnist, + backdoor=data.CornerPixelBackdoor(), + # For detectors that need untrusted data + trusted_fraction=0.5, + ) + + +@pytest.fixture(scope="module") +def backdoor_classifier_path(model, mnist, module_tmp_path): """Trains a backdoored classifier and returns the path to the run directory.""" - cfg = train_classifier_conf.DebugConfig( - train_data=data.BackdoorData( - original=data.MNIST(), backdoor=data.CornerPixelBackdoor() - ), - model=models.DebugMLPConfig(), + dataset = data.BackdoorDataset(original=mnist, backdoor=data.CornerPixelBackdoor()) + train_loader = torch.utils.data.DataLoader(dataset, batch_size=2) + train_classifier( + train_loader=train_loader, + model=model, + num_classes=10, path=module_tmp_path, + max_steps=1, + logger=False, ) - train_classifier(cfg) - assert (module_tmp_path / "config.yaml").is_file() assert (module_tmp_path / "checkpoints" / "last.ckpt").is_file() - assert (module_tmp_path / "tensorboard").is_dir() return module_tmp_path @pytest.mark.slow -def test_eval_classifier(backdoor_classifier_path): - cfg = eval_classifier_conf.DebugConfig( - path=backdoor_classifier_path, data=data.MNIST(train=False) +def test_eval_classifier(model, mnist, backdoor_classifier_path): + # Test model loading once here; other tests will just use whatever state the model + # happens to have at that point instead of constantly loading the trained version. + models.load(model, backdoor_classifier_path) + + eval_classifier( + data=mnist, + model=model, + path=backdoor_classifier_path, + max_batches=1, + batch_size=2, ) - eval_classifier(cfg) - assert (backdoor_classifier_path / "eval.json").is_file() @pytest.mark.slow -def test_train_abstraction_corner_backdoor(backdoor_classifier_path, tmp_path): - cfg = train_detector_conf.Config( - task=tasks.BackdoorDetection(path=backdoor_classifier_path), - detector=detectors.AbstractionDetectorConfig(train=DebugTrainConfig()), - path=tmp_path, +def test_train_abstraction_corner_backdoor(model, backdoor_task, tmp_path): + train_detector( + task=backdoor_task, + detector=detectors.AbstractionDetector( + abstraction=detectors.abstraction.LocallyConsistentAbstraction.get_default( + model, size_reduction=2 + ), + max_batch_size=2, + save_path=tmp_path, + ), + batch_size=2, + max_steps=1, ) - train_detector(cfg) - assert (tmp_path / "config.yaml").is_file() assert (tmp_path / "detector.pt").is_file() assert (tmp_path / "histogram.pdf").is_file() assert (tmp_path / "eval.json").is_file() - assert (tmp_path / "tensorboard").is_dir() - @pytest.mark.slow -def test_train_autoencoder_corner_backdoor(backdoor_classifier_path, tmp_path): - cfg = train_detector_conf.Config( - task=tasks.BackdoorDetection(path=backdoor_classifier_path), - detector=detectors.AbstractionDetectorConfig( - train=DebugTrainConfig(), - abstraction=detectors.abstraction.AutoencoderAbstractionConfig(), +def test_train_autoencoder_corner_backdoor(model, backdoor_task, tmp_path): + train_detector( + task=backdoor_task, + detector=detectors.AbstractionDetector( + abstraction=detectors.abstraction.AutoencoderAbstraction.get_default( + model, size_reduction=2 + ), + max_batch_size=2, + save_path=tmp_path, ), - path=tmp_path, + batch_size=2, + max_steps=1, ) - train_detector(cfg) - assert (tmp_path / "config.yaml").is_file() assert (tmp_path / "detector.pt").is_file() assert (tmp_path / "histogram.pdf").is_file() assert (tmp_path / "eval.json").is_file() - assert (tmp_path / "tensorboard").is_dir() - @pytest.mark.slow -def test_train_mahalanobis_advex(backdoor_classifier_path, tmp_path): - # This test doesn't need a backdoored classifier, but we already have one - # and it doesn't hurt, so reusing it makes execution faster. - cfg = train_detector_conf.Config( - task=tasks.adversarial_examples.DebugAdversarialExampleTask( - path=backdoor_classifier_path +def test_train_mahalanobis_advex(model, mnist, tmp_path): + train_detector( + task=tasks.adversarial_examples( + model, + train_data=mnist, + test_data=mnist, + cache_path=tmp_path, + batch_size=2, + max_examples=2, + # Success threshold=1.0 means it's fine even if the classifier gets 100% + # accuracy after the attack---we don't want to error out because of this. + success_threshold=1.0, + steps=1, ), - detector=detectors.DebugMahalanobisConfig(), - path=tmp_path, + detector=detectors.MahalanobisDetector( + max_batch_size=2, + save_path=tmp_path, + ), + batch_size=2, + max_steps=1, ) - train_detector(cfg) - assert (backdoor_classifier_path / "adv_examples_train.pt").is_file() - assert (backdoor_classifier_path / "adv_examples.pdf").is_file() - assert (tmp_path / "config.yaml").is_file() + # Note: we don't expect train samples to exist since we have no untrusted train data + assert not (tmp_path / "adversarial_examples_train.pt").is_file() + assert not (tmp_path / "adversarial_examples_train.pdf").is_file() + assert (tmp_path / "adversarial_examples_test.pt").is_file() + assert (tmp_path / "adversarial_examples_test.pdf").is_file() assert (tmp_path / "detector.pt").is_file() # Eval outputs: assert (tmp_path / "histogram.pdf").is_file() @@ -115,25 +149,19 @@ def test_train_mahalanobis_advex(backdoor_classifier_path, tmp_path): @pytest.mark.parametrize( "detector_type", [ - detectors.DebugMahalanobisConfig, - detectors.DebugSpectralSignatureConfig, - detectors.DebugQuantumEntropyConfig, + detectors.MahalanobisDetector, + detectors.SpectralSignatureDetector, + detectors.QuantumEntropyDetector, ], ) -def test_train_statistical_backdoor(backdoor_classifier_path, tmp_path, detector_type): - cfg = train_detector_conf.Config( - task=tasks.backdoor_detection.DebugBackdoorDetection( - # Need some untrusted data for SpectralSignatureConfig - path=backdoor_classifier_path, - trusted_fraction=0.5, - ), - detector=detector_type(), - path=tmp_path, +def test_train_statistical_backdoor(tmp_path, backdoor_task, detector_type): + train_detector( + task=backdoor_task, + detector=detector_type(max_batch_size=2, save_path=tmp_path), + batch_size=2, + max_steps=1, ) - train_detector(cfg) - - assert (tmp_path / "config.yaml").is_file() assert (tmp_path / "detector.pt").is_file() # Eval outputs: assert (tmp_path / "histogram.pdf").is_file() @@ -141,63 +169,17 @@ def test_train_statistical_backdoor(backdoor_classifier_path, tmp_path, detector @pytest.mark.slow -def test_finetuning_detector(backdoor_classifier_path, tmp_path): - cfg = train_detector_conf.Config( - task=tasks.BackdoorDetection(path=backdoor_classifier_path), - detector=detectors.finetuning.FinetuningConfig(train=DebugTrainConfig()), - path=tmp_path, +def test_finetuning_detector(backdoor_task, tmp_path): + train_detector( + task=backdoor_task, + detector=detectors.FinetuningAnomalyDetector( + max_batch_size=2, save_path=tmp_path + ), + num_classes=10, + batch_size=2, + max_steps=1, ) - train_detector(cfg) - assert (tmp_path / "config.yaml").is_file() assert (tmp_path / "detector.pt").is_file() assert (tmp_path / "histogram.pdf").is_file() assert (tmp_path / "eval.json").is_file() - - assert (tmp_path / "tensorboard").is_dir() - - -@pytest.mark.slow -def test_wanet(tmp_path): - cfg = train_classifier_conf.DebugConfig( - train_data=data.BackdoorData( - original=data.GTSRB(), backdoor=data.WanetBackdoor() - ), - model=models.DebugMLPConfig(), - path=tmp_path / "wanet", - val_data={ - "backdoor": data.BackdoorData( - original=data.GTSRB(), backdoor=data.WanetBackdoor() - ) - }, - train_config=DebugTrainConfig(num_workers=1), - ) - train_classifier(cfg) - - assert (tmp_path / "wanet" / "config.yaml").is_file() - assert (tmp_path / "wanet" / "checkpoints" / "last.ckpt").is_file() - assert (tmp_path / "wanet" / "tensorboard").is_dir() - - # Checks mostly to make the type checker happy for the allclose assert - assert isinstance(cfg.val_data["backdoor"], data.BackdoorData) - assert isinstance(cfg.val_data["backdoor"].backdoor, data.WanetBackdoor) - assert isinstance(cfg.train_data, data.BackdoorData) - assert isinstance(cfg.train_data.backdoor, data.WanetBackdoor) - assert torch.allclose( - cfg.val_data["backdoor"].backdoor.control_grid, - cfg.train_data.backdoor.control_grid, - ) - - # Check that from_run can load WanetBackdoor properly - train_detector_cfg = train_detector_conf.Config( - task=tasks.backdoor_detection.DebugBackdoorDetection(path=tmp_path / "wanet"), - detector=detectors.DebugMahalanobisConfig(), - path=tmp_path / "wanet-mahalanobis", - ) - train_detector(train_detector_cfg) - assert isinstance(train_detector_cfg.task, tasks.BackdoorDetection) - assert isinstance(train_detector_cfg.task._backdoor, data.WanetBackdoor) - assert torch.allclose( - train_detector_cfg.task._backdoor.control_grid, - cfg.train_data.backdoor.control_grid, - ) From 6f0e4724d305a896ddf20d7dcc7c29b7756b0cc0 Mon Sep 17 00:00:00 2001 From: Erik Jenner Date: Sat, 2 Mar 2024 17:41:05 -0800 Subject: [PATCH 14/25] Move save_path and max_batch_size arguments I think it doesn't make much sense intuitively to have them be arguments to the detector --- .../abstraction/abstraction_detector.py | 16 +++------ src/cupbearer/detectors/anomaly_detector.py | 35 +++++++++---------- src/cupbearer/detectors/finetuning.py | 7 ++-- src/cupbearer/scripts/eval_detector.py | 6 ++++ src/cupbearer/scripts/train_detector.py | 19 +++++++--- tests/test_pipeline.py | 27 +++++++------- 6 files changed, 59 insertions(+), 51 deletions(-) diff --git a/src/cupbearer/detectors/abstraction/abstraction_detector.py b/src/cupbearer/detectors/abstraction/abstraction_detector.py index 83098eb8..b89a5a4d 100644 --- a/src/cupbearer/detectors/abstraction/abstraction_detector.py +++ b/src/cupbearer/detectors/abstraction/abstraction_detector.py @@ -115,24 +115,16 @@ def configure_optimizers(self): class AbstractionDetector(ActivationBasedDetector): """Anomaly detector based on an abstraction.""" - def __init__( - self, - abstraction: Abstraction, - max_batch_size: int = 4096, - save_path: str | Path | None = None, - ): + def __init__(self, abstraction: Abstraction): self.abstraction = abstraction names = list(abstraction.tau_maps.keys()) - super().__init__( - activation_name_func=lambda _: names, - max_batch_size=max_batch_size, - save_path=save_path, - ) + super().__init__(activation_name_func=lambda _: names) def train( self, trusted_data, untrusted_data, + save_path: Path | str, *, lr: float = 1e-3, batch_size: int = 64, @@ -166,7 +158,7 @@ def train( # (which seems tricky to do manually). module.model = self.model - trainer = L.Trainer(default_root_dir=self.save_path, **trainer_kwargs) + trainer = L.Trainer(default_root_dir=save_path, **trainer_kwargs) trainer.fit( model=module, train_dataloaders=train_loader, diff --git a/src/cupbearer/detectors/anomaly_detector.py b/src/cupbearer/detectors/anomaly_detector.py index c97db6f1..f1e917c1 100644 --- a/src/cupbearer/detectors/anomaly_detector.py +++ b/src/cupbearer/detectors/anomaly_detector.py @@ -3,7 +3,7 @@ from collections.abc import Collection from contextlib import contextmanager from pathlib import Path -from typing import Callable, Optional +from typing import Callable import numpy as np import sklearn.metrics @@ -19,16 +19,9 @@ class AnomalyDetector(ABC): - def __init__( - self, - max_batch_size: int = 4096, - save_path: Optional[Path | str] = None, - ): + def __init__(self): # For storing the original detector variables when finetuning self._original_variables = None - self.max_batch_size = max_batch_size - self.save_path = None if save_path is None else Path(save_path) - self.trained = False def set_model(self, model: HookedModel): @@ -43,7 +36,11 @@ def set_model(self, model: HookedModel): @abstractmethod def train( - self, trusted_data: Dataset | None, untrusted_data: Dataset | None, **kwargs + self, + trusted_data: Dataset | None, + untrusted_data: Dataset | None, + save_path: Path | str | None, + **kwargs, ): """Train the anomaly detector with the given datasets on the given model. @@ -100,7 +97,9 @@ def eval( # to untrusted data then). train_dataset: Dataset, test_dataset: MixedData, + batch_size: int = 1024, histogram_percentile: float = 95, + save_path: Path | str | None = None, num_bins: int = 100, pbar: bool = False, ): @@ -110,7 +109,7 @@ def eval( test_loader = DataLoader( test_dataset, - batch_size=self.max_batch_size, + batch_size=batch_size, # For some methods, such as adversarial abstractions, it might matter how # normal/anomalous data is distributed into batches. In that case, we want # to mix them by default. @@ -153,14 +152,16 @@ def eval( bins = np.linspace(lower_lim, upper_lim, num_bins) - if not self.save_path: + if not save_path: return - self.save_path.mkdir(parents=True, exist_ok=True) + save_path = Path(save_path) + + save_path.mkdir(parents=True, exist_ok=True) # Everything from here is just saving metrics and creating figures # (which we skip if they aren't going to be saved anyway). - with open(self.save_path / "eval.json", "w") as f: + with open(save_path / "eval.json", "w") as f: json.dump(metrics, f) # Visualizations for anomaly scores @@ -176,7 +177,7 @@ def eval( plt.xlabel("Anomaly score") plt.ylabel("Frequency") plt.title("Anomaly score distribution") - plt.savefig(self.save_path / "histogram.pdf") + plt.savefig(save_path / "histogram.pdf") @abstractmethod def layerwise_scores(self, batch) -> dict[str, torch.Tensor]: @@ -240,10 +241,8 @@ def __init__( activation_name_func: str | Callable[[HookedModel], Collection[str]] | None = None, - max_batch_size: int = 4096, - save_path: Path | str | None = None, ): - super().__init__(max_batch_size=max_batch_size, save_path=save_path) + super().__init__() if activation_name_func is None: activation_name_func = default_activation_name_func diff --git a/src/cupbearer/detectors/finetuning.py b/src/cupbearer/detectors/finetuning.py index 50b224dd..8dfe14c0 100644 --- a/src/cupbearer/detectors/finetuning.py +++ b/src/cupbearer/detectors/finetuning.py @@ -1,5 +1,6 @@ import copy import warnings +from pathlib import Path import lightning as L import torch @@ -12,9 +13,6 @@ class FinetuningAnomalyDetector(AnomalyDetector): - def __init__(self, max_batch_size, save_path): - super().__init__(max_batch_size, save_path) - def set_model(self, model): super().set_model(model) # We might as well make a copy here already, since whether we'll train this @@ -25,6 +23,7 @@ def train( self, trusted_data, untrusted_data, + save_path: Path | str, *, num_classes: int, lr: float = 1e-3, @@ -44,7 +43,7 @@ def train( clean_loader = DataLoader(trusted_data, batch_size=batch_size, shuffle=True) # Finetune the model on the clean dataset - trainer = L.Trainer(default_root_dir=self.save_path, **trainer_kwargs) + trainer = L.Trainer(default_root_dir=save_path, **trainer_kwargs) with warnings.catch_warnings(): warnings.filterwarnings( "ignore", diff --git a/src/cupbearer/scripts/eval_detector.py b/src/cupbearer/scripts/eval_detector.py index b20774ad..8f94d267 100644 --- a/src/cupbearer/scripts/eval_detector.py +++ b/src/cupbearer/scripts/eval_detector.py @@ -1,3 +1,5 @@ +from pathlib import Path + from cupbearer.detectors import AnomalyDetector from cupbearer.tasks import Task from cupbearer.utils.scripts import script @@ -7,7 +9,9 @@ def main( task: Task, detector: AnomalyDetector, + save_path: Path | str | None, pbar: bool = False, + batch_size: int = 1024, ): detector.set_model(task.model) @@ -15,4 +19,6 @@ def main( train_dataset=task.trusted_data, test_dataset=task.test_data, pbar=pbar, + save_path=save_path, + batch_size=batch_size, ) diff --git a/src/cupbearer/scripts/train_detector.py b/src/cupbearer/scripts/train_detector.py index 350d12ff..30f6b722 100644 --- a/src/cupbearer/scripts/train_detector.py +++ b/src/cupbearer/scripts/train_detector.py @@ -1,3 +1,5 @@ +from pathlib import Path + from cupbearer.detectors import AnomalyDetector from cupbearer.tasks import Task @@ -7,6 +9,8 @@ def main( task: Task, detector: AnomalyDetector, + save_path: Path | str | None, + eval_batch_size: int = 1024, **train_kwargs, ): detector.set_model(task.model) @@ -14,9 +18,16 @@ def main( detector.train( trusted_data=task.trusted_data, untrusted_data=task.untrusted_train_data, + save_path=save_path, **train_kwargs, ) - path = detector.save_path - if path: - detector.save_weights(path / "detector") - eval_detector(detector=detector, task=task, pbar=True) + if save_path: + save_path = Path(save_path) + detector.save_weights(save_path / "detector") + eval_detector( + detector=detector, + task=task, + pbar=True, + batch_size=eval_batch_size, + save_path=save_path, + ) diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py index c9033417..9f7d23e0 100644 --- a/tests/test_pipeline.py +++ b/tests/test_pipeline.py @@ -80,10 +80,10 @@ def test_train_abstraction_corner_backdoor(model, backdoor_task, tmp_path): abstraction=detectors.abstraction.LocallyConsistentAbstraction.get_default( model, size_reduction=2 ), - max_batch_size=2, - save_path=tmp_path, ), + save_path=tmp_path, batch_size=2, + eval_batch_size=2, max_steps=1, ) assert (tmp_path / "detector.pt").is_file() @@ -99,11 +99,11 @@ def test_train_autoencoder_corner_backdoor(model, backdoor_task, tmp_path): detector=detectors.AbstractionDetector( abstraction=detectors.abstraction.AutoencoderAbstraction.get_default( model, size_reduction=2 - ), - max_batch_size=2, - save_path=tmp_path, + ) ), batch_size=2, + eval_batch_size=2, + save_path=tmp_path, max_steps=1, ) assert (tmp_path / "detector.pt").is_file() @@ -127,11 +127,10 @@ def test_train_mahalanobis_advex(model, mnist, tmp_path): success_threshold=1.0, steps=1, ), - detector=detectors.MahalanobisDetector( - max_batch_size=2, - save_path=tmp_path, - ), + detector=detectors.MahalanobisDetector(), + save_path=tmp_path, batch_size=2, + eval_batch_size=2, max_steps=1, ) # Note: we don't expect train samples to exist since we have no untrusted train data @@ -157,8 +156,10 @@ def test_train_mahalanobis_advex(model, mnist, tmp_path): def test_train_statistical_backdoor(tmp_path, backdoor_task, detector_type): train_detector( task=backdoor_task, - detector=detector_type(max_batch_size=2, save_path=tmp_path), + detector=detector_type(), batch_size=2, + eval_batch_size=2, + save_path=tmp_path, max_steps=1, ) @@ -172,11 +173,11 @@ def test_train_statistical_backdoor(tmp_path, backdoor_task, detector_type): def test_finetuning_detector(backdoor_task, tmp_path): train_detector( task=backdoor_task, - detector=detectors.FinetuningAnomalyDetector( - max_batch_size=2, save_path=tmp_path - ), + detector=detectors.FinetuningAnomalyDetector(), + save_path=tmp_path, num_classes=10, batch_size=2, + eval_batch_size=2, max_steps=1, ) assert (tmp_path / "detector.pt").is_file() From ae98812f4cc01132f82f4691b55286b07104c333 Mon Sep 17 00:00:00 2001 From: Erik Jenner Date: Sat, 2 Mar 2024 17:42:17 -0800 Subject: [PATCH 15/25] Remove another unused file --- src/cupbearer/utils/custom_transforms.py | 20 -------------------- 1 file changed, 20 deletions(-) delete mode 100644 src/cupbearer/utils/custom_transforms.py diff --git a/src/cupbearer/utils/custom_transforms.py b/src/cupbearer/utils/custom_transforms.py deleted file mode 100644 index 0b09346e..00000000 --- a/src/cupbearer/utils/custom_transforms.py +++ /dev/null @@ -1,20 +0,0 @@ -from typing import Tuple - -# We use torch to generate random numbers, to keep things consistent -# with torchvision transforms. -from PIL.Image import Image - - -class AddInfoDict: - """Adds an info dict to the sample, in which other transforms can store information. - - This is meant to be used as the first transform, so that the info dict is - always present and other transforms can rely on it. - """ - - def __call__(self, sample: Tuple[Image, int]): - img, target = sample - # Some metrics need the original target (which CornerPixelToWhite changes). - # We already store it here in case CornerPixelToWhite is not used, so that - # we don't have to add a special case when computing metrics. - return img, target, {"original_target": target} From 31a79939fdeeb3b22b7ca976219570aae70736b3 Mon Sep 17 00:00:00 2001 From: Erik Jenner Date: Sat, 2 Mar 2024 17:49:12 -0800 Subject: [PATCH 16/25] Remove more unused code --- src/cupbearer/data/adversarial.py | 2 +- src/cupbearer/data/pytorch.py | 10 +- src/cupbearer/detectors/anomaly_detector.py | 2 +- src/cupbearer/detectors/finetuning.py | 4 +- src/cupbearer/scripts/eval_detector.py | 2 - src/cupbearer/utils/__init__.py | 121 ++++++++++++++- src/cupbearer/utils/scripts.py | 29 ---- src/cupbearer/utils/utils.py | 159 -------------------- 8 files changed, 128 insertions(+), 201 deletions(-) delete mode 100644 src/cupbearer/utils/scripts.py delete mode 100644 src/cupbearer/utils/utils.py diff --git a/src/cupbearer/data/adversarial.py b/src/cupbearer/data/adversarial.py index 7847c1c4..f9b4a672 100644 --- a/src/cupbearer/data/adversarial.py +++ b/src/cupbearer/data/adversarial.py @@ -8,7 +8,7 @@ from matplotlib import pyplot as plt from torch.utils.data import DataLoader, Dataset, Subset -from cupbearer.utils import utils +from cupbearer import utils class AdversarialExampleDataset(Dataset): diff --git a/src/cupbearer/data/pytorch.py b/src/cupbearer/data/pytorch.py index 43366d17..626ba2df 100644 --- a/src/cupbearer/data/pytorch.py +++ b/src/cupbearer/data/pytorch.py @@ -1,8 +1,8 @@ -from dataclasses import dataclass +from dataclasses import dataclass, field from torch.utils.data import Dataset -from cupbearer.utils.utils import get_object, mutable_field +from cupbearer.utils import get_object from .transforms import ( RandomCrop, @@ -18,7 +18,7 @@ class PytorchDataset(Dataset): name: str train: bool = True - transforms: list[Transform] = mutable_field([ToTensor()]) + transforms: list[Transform] = field(default_factory=lambda: [ToTensor()]) default_augmentations: bool = True def __post_init__(self): @@ -74,8 +74,8 @@ def __post_init__(self): class GTSRB(PytorchDataset): name: str = "torchvision.datasets.GTSRB" num_classes: int = 43 - transforms: list[Transform] = mutable_field( - [ + transforms: list[Transform] = field( + default_factory=lambda: [ Resize(size=(32, 32)), ToTensor(), ] diff --git a/src/cupbearer/detectors/anomaly_detector.py b/src/cupbearer/detectors/anomaly_detector.py index f1e917c1..571d19ab 100644 --- a/src/cupbearer/detectors/anomaly_detector.py +++ b/src/cupbearer/detectors/anomaly_detector.py @@ -13,9 +13,9 @@ from torch.utils.data import DataLoader, Dataset from tqdm.auto import tqdm +from cupbearer import utils from cupbearer.data import MixedData from cupbearer.models.models import HookedModel -from cupbearer.utils import utils class AnomalyDetector(ABC): diff --git a/src/cupbearer/detectors/finetuning.py b/src/cupbearer/detectors/finetuning.py index 8dfe14c0..063995d4 100644 --- a/src/cupbearer/detectors/finetuning.py +++ b/src/cupbearer/detectors/finetuning.py @@ -9,7 +9,7 @@ from cupbearer.detectors.anomaly_detector import AnomalyDetector from cupbearer.scripts._shared import Classifier -from cupbearer.utils import utils +from cupbearer.utils import inputs_from_batch class FinetuningAnomalyDetector(AnomalyDetector): @@ -63,7 +63,7 @@ def layerwise_scores(self, batch): ) def scores(self, batch): - inputs = utils.inputs_from_batch(batch) + inputs = inputs_from_batch(batch) original_output = self.model(inputs) finetuned_output = self.finetuned_model(inputs) diff --git a/src/cupbearer/scripts/eval_detector.py b/src/cupbearer/scripts/eval_detector.py index 8f94d267..ed9604f5 100644 --- a/src/cupbearer/scripts/eval_detector.py +++ b/src/cupbearer/scripts/eval_detector.py @@ -2,10 +2,8 @@ from cupbearer.detectors import AnomalyDetector from cupbearer.tasks import Task -from cupbearer.utils.scripts import script -@script def main( task: Task, detector: AnomalyDetector, diff --git a/src/cupbearer/utils/__init__.py b/src/cupbearer/utils/__init__.py index 1d326396..61fcceaa 100644 --- a/src/cupbearer/utils/__init__.py +++ b/src/cupbearer/utils/__init__.py @@ -1,2 +1,119 @@ -# ruff: noqa: F401 -from .utils import inputs_from_batch, load, save +import codecs +import importlib +import pickle +from pathlib import Path +from typing import Union + +import torch + +SUFFIX = ".pt" +TYPE_PREFIX = "__TYPE__:" +PICKLE_PREFIX = "__PICKLE__:" + + +def from_string(s): + # Doesn't restore Paths but all the code should be able to handle getting strings + # instead. + if not isinstance(s, str): + return s + if s.startswith(TYPE_PREFIX): + s = s[len(TYPE_PREFIX) :] + return get_object(s) + if s.startswith(PICKLE_PREFIX): + s = s[len(PICKLE_PREFIX) :] + pickled = codecs.decode(s.encode(), "base64") + return pickle.loads(pickled) + + return s + + +def validate_and_convert_leaf(leaf): + if isinstance(leaf, (str, int, float, bool, torch.Tensor)): + return leaf + if isinstance(leaf, Path): + return str(leaf) + if isinstance(leaf, type): + return TYPE_PREFIX + leaf.__module__ + "." + leaf.__name__ + + try: + pickled = pickle.dumps(leaf) + except Exception as e: + raise ValueError(f"Could not pickle object {leaf}") from e + # Make sure we're not accidentally encoding huge objects inefficiently into strings: + if len(pickled) > 1e6: + raise ValueError( + f"Object of type {type(leaf)} has {round(len(pickled) / 1e6, 1)} MB " + "when pickled. This is probably a mistake." + ) + pickle_str = codecs.encode(pickled, "base64").decode() + return PICKLE_PREFIX + pickle_str + + +def tree_map(f, tree): + """Like jax.tree_map, but simpler and for pytorch.""" + # We could use https://github.com/metaopt/optree in the future, + # which would be faster and generally add support for various tree operations. + if isinstance(tree, list): + return [tree_map(f, x) for x in tree] + if isinstance(tree, tuple): + return tuple(tree_map(f, x) for x in tree) + if isinstance(tree, dict): + return {k: tree_map(f, v) for k, v in tree.items()} + try: + return f(tree) + except Exception as e: + raise ValueError( + f"Could not apply {f} to leaf {tree} of type {type(tree)}" + ) from e + + +def save(data, path: Union[str, Path], overwrite: bool = False): + data = tree_map(validate_and_convert_leaf, data) + path = Path(path) + directory = path.parent + directory.mkdir(parents=True, exist_ok=True) + if path.exists(): + if overwrite: + assert not path.is_dir(), f"{path} is a directory, won't overwrite" + path.unlink() + else: + raise RuntimeError(f"File {path} already exists.") + torch.save(data, path.with_suffix(SUFFIX)) + + +def load(path: Union[str, Path]): + path = Path(path) + if path.is_dir(): + raise ValueError( + f"Expected a file, got directory {path}. " + "Maybe this is in the legacy Jax format?" + ) + + if path.suffix != SUFFIX: + path = path.with_suffix(SUFFIX) + with open(path, "rb") as file: + data = torch.load(file) + data = tree_map(from_string, data) + return data + + +def get_object(path: str): + """Get an object from a string. + + Args: + path: A string of the form "module.submodule.object_name". + + Returns: + The object named by `path`. + """ + module_name, object_name = path.rsplit(".", 1) + module = importlib.import_module(module_name) + return getattr(module, object_name) + + +def inputs_from_batch(batch): + # batch may contain labels or other info, if so we strip it out + if isinstance(batch, (tuple, list)): + return batch[0] + else: + return batch diff --git a/src/cupbearer/utils/scripts.py b/src/cupbearer/utils/scripts.py deleted file mode 100644 index 246d46e8..00000000 --- a/src/cupbearer/utils/scripts.py +++ /dev/null @@ -1,29 +0,0 @@ -from typing import Callable - - -def script( - script_fn: Callable, -) -> Callable: - # @functools.wraps(script_fn) - # def run_script(cfg: ConfigType): - # save_cfg(cfg, save_config=cfg.save_config) - # return script_fn(cfg) - - # return run_script - return script_fn - - -def save_cfg(cfg, save_config: bool = True): - # if cfg.path: - # cfg.path.mkdir(parents=True, exist_ok=True) - # if save_config: - # # TODO: replace this with cfg.save if/when that exposes save_dc_types. - # # Note that we need save_dc_types here even though `BaseConfig` already - # # enables that, since `save` calls `to_dict` directly, not `obj.to_dict`. - # simple_parsing.helpers.serialization.serializable.save( - # cfg, - # cfg.path / "config.yaml", - # save_dc_types=True, - # sort_keys=False, - # ) - pass diff --git a/src/cupbearer/utils/utils.py b/src/cupbearer/utils/utils.py deleted file mode 100644 index 7d519c14..00000000 --- a/src/cupbearer/utils/utils.py +++ /dev/null @@ -1,159 +0,0 @@ -import codecs -import copy -import dataclasses -import functools -import importlib -import pickle -from pathlib import Path -from typing import Iterable, TypeVar, Union - -import torch - -SUFFIX = ".pt" -TYPE_PREFIX = "__TYPE__:" -PICKLE_PREFIX = "__PICKLE__:" - - -def from_string(s): - # Doesn't restore Paths but all the code should be able to handle getting strings - # instead. - if not isinstance(s, str): - return s - if s.startswith(TYPE_PREFIX): - s = s[len(TYPE_PREFIX) :] - return get_object(s) - if s.startswith(PICKLE_PREFIX): - s = s[len(PICKLE_PREFIX) :] - pickled = codecs.decode(s.encode(), "base64") - return pickle.loads(pickled) - - return s - - -def validate_and_convert_leaf(leaf): - if isinstance(leaf, (str, int, float, bool, torch.Tensor)): - return leaf - if isinstance(leaf, Path): - return str(leaf) - if isinstance(leaf, type): - return TYPE_PREFIX + leaf.__module__ + "." + leaf.__name__ - - try: - pickled = pickle.dumps(leaf) - except Exception as e: - raise ValueError(f"Could not pickle object {leaf}") from e - # Make sure we're not accidentally encoding huge objects inefficiently into strings: - if len(pickled) > 1e6: - raise ValueError( - f"Object of type {type(leaf)} has {round(len(pickled) / 1e6, 1)} MB " - "when pickled. This is probably a mistake." - ) - pickle_str = codecs.encode(pickled, "base64").decode() - return PICKLE_PREFIX + pickle_str - - -def tree_map(f, tree): - """Like jax.tree_map, but simpler and for pytorch.""" - # We could use https://github.com/metaopt/optree in the future, - # which would be faster and generally add support for various tree operations. - if isinstance(tree, list): - return [tree_map(f, x) for x in tree] - if isinstance(tree, tuple): - return tuple(tree_map(f, x) for x in tree) - if isinstance(tree, dict): - return {k: tree_map(f, v) for k, v in tree.items()} - try: - return f(tree) - except Exception as e: - raise ValueError( - f"Could not apply {f} to leaf {tree} of type {type(tree)}" - ) from e - - -def save(data, path: Union[str, Path], overwrite: bool = False): - data = tree_map(validate_and_convert_leaf, data) - path = Path(path) - directory = path.parent - directory.mkdir(parents=True, exist_ok=True) - if path.exists(): - if overwrite: - assert not path.is_dir(), f"{path} is a directory, won't overwrite" - path.unlink() - else: - raise RuntimeError(f"File {path} already exists.") - torch.save(data, path.with_suffix(SUFFIX)) - - -def load(path: Union[str, Path]): - path = Path(path) - if path.is_dir(): - raise ValueError( - f"Expected a file, got directory {path}. " - "Maybe this is in the legacy Jax format?" - ) - - if path.suffix != SUFFIX: - path = path.with_suffix(SUFFIX) - with open(path, "rb") as file: - data = torch.load(file) - data = tree_map(from_string, data) - return data - - -def product(xs: Iterable): - return functools.reduce(lambda x, y: x * y, xs, 1) - - -def merge_dicts(a: dict, b: dict) -> dict: - """Merges two dictionaries recursively.""" - - merged = a.copy() - for key, value in b.items(): - if key in merged and isinstance(merged[key], dict): - # Make sure we don't overwrite a dict with a non-dict - assert isinstance(value, dict) - merged[key] = merge_dicts(merged[key], value) - else: - if isinstance(value, dict): - # Make sure we don't overwrite a non-dict with a dict - assert key not in merged - merged[key] = value - - return merged - - -T = TypeVar("T") - - -def mutable_field(default: T = None) -> T: - return dataclasses.field(default_factory=lambda: copy.deepcopy(default)) - - -def list_field(): - return dataclasses.field(default_factory=list) - - -def dict_field(): - return dataclasses.field(default_factory=dict) - - -def get_object(path: str): - """Get an object from a string. - - Args: - path: A string of the form "module.submodule.object_name". - - Returns: - The object named by `path`. - """ - module_name, object_name = path.rsplit(".", 1) - module = importlib.import_module(module_name) - return getattr(module, object_name) - - -def inputs_from_batch(batch): - # batch may contain labels or other info, if so we strip it out - if isinstance(batch, (tuple, list)): - return batch[0] - else: - return batch From f0dacc5123be402f3e258476f9dbffb69a1ad7d9 Mon Sep 17 00:00:00 2001 From: Erik Jenner Date: Sat, 2 Mar 2024 17:56:34 -0800 Subject: [PATCH 17/25] Minor improvements and remove TODOs --- src/cupbearer/detectors/anomaly_detector.py | 10 +++------- src/cupbearer/scripts/eval_detector.py | 3 +-- src/cupbearer/scripts/train_classifier.py | 2 +- src/cupbearer/tasks/backdoor_detection.py | 5 ----- 4 files changed, 5 insertions(+), 15 deletions(-) diff --git a/src/cupbearer/detectors/anomaly_detector.py b/src/cupbearer/detectors/anomaly_detector.py index 571d19ab..1b22a461 100644 --- a/src/cupbearer/detectors/anomaly_detector.py +++ b/src/cupbearer/detectors/anomaly_detector.py @@ -92,11 +92,7 @@ def eval( self, # Don't need train_dataset here, but e.g. adversarial abstractions need it, # and in general there's no reason to deny detectors access to it during eval. - # TODO: I think we can/should remove this and require detectors to handle - # anything involving training data during training (now that they get access - # to untrusted data then). - train_dataset: Dataset, - test_dataset: MixedData, + dataset: MixedData, batch_size: int = 1024, histogram_percentile: float = 95, save_path: Path | str | None = None, @@ -105,10 +101,10 @@ def eval( ): # Check this explicitly because otherwise things can break in weird ways # when we assume that anomaly labels are included. - assert isinstance(test_dataset, MixedData), type(test_dataset) + assert isinstance(dataset, MixedData), type(dataset) test_loader = DataLoader( - test_dataset, + dataset, batch_size=batch_size, # For some methods, such as adversarial abstractions, it might matter how # normal/anomalous data is distributed into batches. In that case, we want diff --git a/src/cupbearer/scripts/eval_detector.py b/src/cupbearer/scripts/eval_detector.py index ed9604f5..5bead65c 100644 --- a/src/cupbearer/scripts/eval_detector.py +++ b/src/cupbearer/scripts/eval_detector.py @@ -14,8 +14,7 @@ def main( detector.set_model(task.model) detector.eval( - train_dataset=task.trusted_data, - test_dataset=task.test_data, + dataset=task.test_data, pbar=pbar, save_path=save_path, batch_size=batch_size, diff --git a/src/cupbearer/scripts/train_classifier.py b/src/cupbearer/scripts/train_classifier.py index aaed8fc3..8689d802 100644 --- a/src/cupbearer/scripts/train_classifier.py +++ b/src/cupbearer/scripts/train_classifier.py @@ -62,7 +62,6 @@ def main( trainer_kwargs["callbacks"] = callbacks # Define metrics logger - # TODO: make adjustable and set config correctly if "logger" not in trainer_kwargs: if wandb: metrics_logger = loggers.WandbLogger(project="cupbearer") @@ -72,6 +71,7 @@ def main( "model": repr(model), "train_data": repr(train_loader.dataset), "batch_size": train_loader.batch_size, + "lr": lr, } ) elif path: diff --git a/src/cupbearer/tasks/backdoor_detection.py b/src/cupbearer/tasks/backdoor_detection.py index 51942285..b6e4c22d 100644 --- a/src/cupbearer/tasks/backdoor_detection.py +++ b/src/cupbearer/tasks/backdoor_detection.py @@ -20,11 +20,6 @@ def backdoor_detection( "this is probably unintentional." ) - # TODO: for WaNet, we currently expect the user to load the control grid. - # (Otherwise we'd have to always take in a path here, and also when working - # in a notebook it might just be easier to pass in the existing backdoor object.) - # But we should somehow check somewhere that it's loaded to avoid silent errors. - return Task.from_base_data( model=model, train_data=train_data, From 0267bd13809d0aa70dec4ba90615c8c2f786da4e Mon Sep 17 00:00:00 2001 From: Erik Jenner Date: Sat, 2 Mar 2024 18:09:11 -0800 Subject: [PATCH 18/25] Fix demo notebook --- notebooks/simple_demo.ipynb | 248 +++++++++++++++----------------- src/cupbearer/utils/__init__.py | 9 ++ 2 files changed, 122 insertions(+), 135 deletions(-) diff --git a/notebooks/simple_demo.ipynb b/notebooks/simple_demo.ipynb index 3f6a2bc6..da86a010 100644 --- a/notebooks/simple_demo.ipynb +++ b/notebooks/simple_demo.ipynb @@ -17,26 +17,11 @@ "outputs": [], "source": [ "import json\n", - "from datetime import datetime\n", - "from pathlib import Path\n", + "from torch.utils.data import DataLoader\n", "\n", "from cupbearer import data, detectors, models, scripts, tasks, utils" ] }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "def get_path(base=\"logs\", time=True):\n", - " if time:\n", - " timestamp = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n", - " else:\n", - " timestamp = datetime.now().strftime(\"%Y-%m-%d\")\n", - " return Path(base) / timestamp" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -47,7 +32,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ @@ -57,7 +42,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -66,7 +51,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 5, "metadata": {}, "outputs": [ { @@ -94,7 +79,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "48219c8359284728a9ec6a2144927c0a", + "model_id": "f3fbd649f84545b79393518b1875ea71", "version_major": 2, "version_minor": 0 }, @@ -118,7 +103,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "24b6f01a53f14158821bf04d8d7ee377", + "model_id": "94e198d2efdb4899a5cc3829dc6a90b7", "version_major": 2, "version_minor": 0 }, @@ -132,7 +117,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "b157b6824e0f468690e0bb219c6ca8c2", + "model_id": "6218ec07439243379c33377a00ade093", "version_major": 2, "version_minor": 0 }, @@ -146,7 +131,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "1b7021c89c6e40ba97cb5147930bda1d", + "model_id": "c1d3eeecce294a499dca8d392f36b779", "version_major": 2, "version_minor": 0 }, @@ -160,35 +145,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "c6252aadc20841a6b7e164a6fe30a204", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "Validation: | | 0/? [00:00┏━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n", "┃ Test metric DataLoader 0 ┃\n", "┡━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━┩\n", - "│ test/acc_epoch 0.9520999789237976 │\n", - "│ test/acc_step 0.9520999789237976 │\n", - "│ test/loss 0.15424881875514984 │\n", + "│ test/acc_epoch 0.9467999935150146 │\n", + "│ test/acc_step 0.9467999935150146 │\n", + "│ test/loss 0.16958695650100708 │\n", "└───────────────────────────┴───────────────────────────┘\n", "\n" ], @@ -310,9 +266,9 @@ "┏━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n", "┃\u001b[1m \u001b[0m\u001b[1m Test metric \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1m DataLoader 0 \u001b[0m\u001b[1m \u001b[0m┃\n", "┡━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━┩\n", - "│\u001b[36m \u001b[0m\u001b[36m test/acc_epoch \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m 0.9520999789237976 \u001b[0m\u001b[35m \u001b[0m│\n", - "│\u001b[36m \u001b[0m\u001b[36m test/acc_step \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m 0.9520999789237976 \u001b[0m\u001b[35m \u001b[0m│\n", - "│\u001b[36m \u001b[0m\u001b[36m test/loss \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m 0.15424881875514984 \u001b[0m\u001b[35m \u001b[0m│\n", + "│\u001b[36m \u001b[0m\u001b[36m test/acc_epoch \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m 0.9467999935150146 \u001b[0m\u001b[35m \u001b[0m│\n", + "│\u001b[36m \u001b[0m\u001b[36m test/acc_step \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m 0.9467999935150146 \u001b[0m\u001b[35m \u001b[0m│\n", + "│\u001b[36m \u001b[0m\u001b[36m test/loss \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m 0.16958695650100708 \u001b[0m\u001b[35m \u001b[0m│\n", "└───────────────────────────┴───────────────────────────┘\n" ] }, @@ -322,7 +278,9 @@ ], "source": [ "scripts.eval_classifier(\n", - " scripts.EvalClassifierConfig(path=classifier_path, data=val_data, model=model)\n", + " data=val_data,\n", + " model=model,\n", + " path=classifier_path,\n", ")" ] }, @@ -330,19 +288,19 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "These results will also have been stored to `logs/demo/classifier/eval.json` if we want to process them further (e.g. to compare many runs):" + "These results will also have been stored to `/eval.json` if we want to process them further (e.g. to compare many runs):" ] }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 7, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "[{'test/loss': 0.15424881875514984, 'test/acc_step': 0.9520999789237976, 'test/acc_epoch': 0.9520999789237976}]\n" + "[{'test/loss': 0.16958695650100708, 'test/acc_step': 0.9467999935150146, 'test/acc_epoch': 0.9467999935150146}]\n" ] } ], @@ -361,31 +319,54 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 8, "metadata": {}, "outputs": [], "source": [ + "# Initialize a new model with the same architecture as before:\n", + "model = models.MLP(input_shape=(28, 28), hidden_dims=[128, 128], output_dim=10)\n", + "# Load the weights:\n", "models.load(model, classifier_path)" ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 9, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "100%|██████████| 15/15 [00:06<00:00, 2.30it/s]\n", - "\u001b[32m2024-02-29 22:14:34.794\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mcupbearer.detectors.anomaly_detector\u001b[0m:\u001b[36msave_weights\u001b[0m:\u001b[36m228\u001b[0m - \u001b[1mSaving detector to logs/demo/detector/2024-02-29_22-14-27/detector\u001b[0m\n", - "\u001b[32m2024-02-29 22:14:35.134\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mcupbearer.detectors.anomaly_detector\u001b[0m:\u001b[36meval\u001b[0m:\u001b[36m149\u001b[0m - \u001b[1mAUC_ROC: 1.0000\u001b[0m\n", - "\u001b[32m2024-02-29 22:14:35.135\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mcupbearer.detectors.anomaly_detector\u001b[0m:\u001b[36meval\u001b[0m:\u001b[36m150\u001b[0m - \u001b[1mAP: 1.0000\u001b[0m\n" + "100%|██████████| 59/59 [00:06<00:00, 9.09it/s]\n", + "\u001b[32m2024-03-02 18:08:41.589\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mcupbearer.detectors.anomaly_detector\u001b[0m:\u001b[36msave_weights\u001b[0m:\u001b[36m220\u001b[0m - \u001b[1mSaving detector to logs/demo/detector/2024-03-02_18-08-34/detector\u001b[0m\n" ] }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjsAAAHHCAYAAABZbpmkAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAABR8ElEQVR4nO3dd1gUV/8+/nvpdRdBYCUiEGMBxYoFeyGiorHGEg3Y4hMFu0b5JpbYMPZobMlDQCOWmGgSa0RsiWKN2MWGoo8UE4UVDf38/vDHfFxBhXVhcbxf17XX5Z45M/Oeg4Y7Z5pCCCFAREREJFNGhi6AiIiIqDQx7BAREZGsMewQERGRrDHsEBERkawx7BAREZGsMewQERGRrDHsEBERkawx7BAREZGsMewQERGRrDHsEFEhCoUCM2bMMHQZb4TIyEgoFArcunVLamvTpg3atGlTJvt//mc1Y8YMKBQK/P3332Wyf3d3dwwaNKhM9kWkK4YdIh2sXLkSCoUCTZo0MXQpJBNHjx7FjBkzkJaWZuhSCinPtREVh4mhCyB6E0VFRcHd3R0nTpzA9evX8d577xm6JCpH9u7dW+J1jh49ii+//BKDBg2CnZ1dsdf7999/YWJSuv8pf1lt8fHxMDLi/zdT+ca/oUQllJCQgKNHj2Lx4sVwdHREVFSUoUuiV3j8+HGZ7s/MzAxmZmaltv38/HxkZmYCACwsLEo97LyMubk5TE1NDbZ/ouJg2CEqoaioKFSoUAEBAQHo3bt3kWHn1q1bUCgUWLhwIb799ltUrVoV5ubmaNSoEU6ePFmo//79+9GyZUtYW1vDzs4O3bp1w+XLl7X6FFyLcfXqVQwcOBAqlQqOjo6YOnUqhBC4c+cOunXrBqVSCbVajUWLFmmtn52djWnTpqFhw4ZQqVSwtrZGy5YtceDAgZce74EDB6BQKLBt27ZCyzZs2ACFQoHY2NgXrp+Tk4Mvv/wS1apVg4WFBRwcHNCiRQtER0dr9bty5Qr69OkDR0dHWFpaokaNGvj888+1+pw5cwadOnWCUqmEjY0N2rdvj2PHjmn1KbiG5tChQxg5ciScnJxQuXJlafnu3bulsba1tUVAQAAuXrz40jEocPHiRbRr1w6WlpaoXLkyZs+ejfz8/EL9irpmZ/ny5ahVqxasrKxQoUIF+Pj4YMOGDQCe/mwnTZoEAPDw8IBCodC6DkihUCAkJARRUVGoVasWzM3NsWfPHmlZUddX/f333+jTpw+USiUcHBwwZswYKSAB//d3NDIystC6z27zVbUVdc3OzZs38eGHH8Le3h5WVlZo2rQpdu7cqdXn4MGDUCgU+PHHHzFnzhxUrlwZFhYWaN++Pa5fv16oJqLXwdNYRCUUFRWFnj17wszMDP3798eqVatw8uRJNGrUqFDfDRs24NGjR/jPf/4DhUKB+fPno2fPnrh586b0f8P79u1Dp06d8O6772LGjBn4999/sXz5cjRv3hx//fUX3N3dtbbZt29feHp6Yt68edi5cydmz54Ne3t7rFmzBu3atcNXX32FqKgoTJw4EY0aNUKrVq0AABqNBv/973/Rv39/fPLJJ3j06BHCw8Ph7++PEydOoF69ekUeb5s2beDq6oqoqCj06NGj0FhUrVoVvr6+LxyvGTNmICwsDMOGDUPjxo2h0Whw6tQp/PXXX3j//fcBAOfOnUPLli1hamqK4cOHw93dHTdu3MD27dsxZ84cAE+DRsuWLaFUKvHZZ5/B1NQUa9asQZs2bXDo0KFC10+NHDkSjo6OmDZtmjSz88MPPyAoKAj+/v746quv8OTJE6xatQotWrTAmTNnCo31s5KTk9G2bVvk5uZiypQpsLa2xrfffgtLS8sXrlPgu+++w+jRo9G7d28pdJw7dw7Hjx/HRx99hJ49e+Lq1avYuHEjlixZgooVKwIAHB0dpW3s378fP/74I0JCQlCxYsWX1goAffr0gbu7O8LCwnDs2DEsW7YMDx8+xLp1615Z77OKU9uzUlJS0KxZMzx58gSjR4+Gg4MD1q5diw8++AA//fRTob9D8+bNg5GRESZOnIj09HTMnz8fAwYMwPHjx0tUJ9FLCSIqtlOnTgkAIjo6WgghRH5+vqhcubIYM2aMVr+EhAQBQDg4OIgHDx5I7b/++qsAILZv3y611atXTzg5OYl//vlHajt79qwwMjISgYGBUtv06dMFADF8+HCpLTc3V1SuXFkoFAoxb948qf3hw4fC0tJSBAUFafXNysrSqvPhw4fC2dlZDBkyRKsdgJg+fbr0PTQ0VJibm4u0tDSpLTU1VZiYmGj1K0rdunVFQEDAS/u0atVK2Nraitu3b2u15+fnS3/u3r27MDMzEzdu3JDa7t27J2xtbUWrVq2ktoiICAFAtGjRQuTm5krtjx49EnZ2duKTTz7R2kdycrJQqVSF2p83duxYAUAcP35caktNTRUqlUoAEAkJCVJ769atRevWraXv3bp1E7Vq1Xrp9hcsWFBoOwUACCMjI3Hx4sUilz37Myj4e/LBBx9o9Rs5cqQAIM6ePSuE+L+/oxEREa/c5stqc3Nz0/p7VjBOf/zxh9T26NEj4eHhIdzd3UVeXp4QQogDBw4IAMLT01Pr7+XXX38tAIjz588X2heRrngai6gEoqKi4OzsjLZt2wJ4Ot3ft29fbNq0CXl5eYX69+3bFxUqVJC+t2zZEsDTaX4ASEpKQlxcHAYNGgR7e3upX506dfD+++9j165dhbY5bNgw6c/Gxsbw8fGBEAJDhw6V2u3s7FCjRg1pPwV9C64jyc/Px4MHD5CbmwsfHx/89ddfLz3uwMBAZGVl4aeffpLaNm/ejNzcXAwcOPCl69rZ2eHixYu4du1akcvv37+Pw4cPY8iQIahSpYrWMoVCAQDIy8vD3r170b17d7z77rvS8kqVKuGjjz7Cn3/+CY1Go7XuJ598AmNjY+l7dHQ00tLS0L9/f/z999/Sx9jYGE2aNHnl6bxdu3ahadOmaNy4sdTm6OiIAQMGvHS9gjG4e/dukacwi6t169bw8vIqdv/g4GCt76NGjQKAIv9O6dOuXbvQuHFjtGjRQmqzsbHB8OHDcevWLVy6dEmr/+DBg7Wub3r+3wiRPjDsEBVTXl4eNm3ahLZt2yIhIQHXr1/H9evX0aRJE6SkpCAmJqbQOs//8i4IPg8fPgQA3L59GwBQo0aNQut6enri77//LnRx7fPbVKlUsLCwkE4vPNtesJ8Ca9euRZ06daRrZxwdHbFz506kp6e/9Nhr1qyJRo0aaV2fFBUVhaZNm77yTrSZM2ciLS0N1atXh7e3NyZNmoRz585Jywt+qdWuXfuF27h//z6ePHnywnHKz8/HnTt3tNo9PDy0vheErXbt2sHR0VHrs3fvXqSmpr70OG7fvo1q1aoVai+qpudNnjwZNjY2aNy4MapVq4bg4GAcOXLkles96/njeZXna61atSqMjIy0ngdUGm7fvv3Cn1PB8me96t8IkT7wmh2iYtq/fz+SkpKwadMmbNq0qdDyqKgodOjQQavt2ZmFZwkhdK6jqG0WZz/r16/HoEGD0L17d0yaNAlOTk4wNjZGWFgYbty48cr9BgYGYsyYMbh79y6ysrJw7NgxfPPNN69cr1WrVrhx4wZ+/fVX7N27F//973+xZMkSrF69WmuWSt+ev5am4ELiH374AWq1ulD/0ryjydPTE/Hx8dixYwf27NmDn3/+GStXrsS0adPw5ZdfFmsbxbk26GUKZsle9L1AUTOUpak0/o0QPY9hh6iYoqKi4OTkhBUrVhRatnXrVmzbtg2rV68u0S8lNzc3AE+fVfK8K1euoGLFirC2tta96Gf89NNPePfdd7F161atX3TTp08v1vr9+vXD+PHjsXHjRvz7778wNTVF3759i7Wuvb09Bg8ejMGDByMjIwOtWrXCjBkzMGzYMOm01IULF164vqOjI6ysrF44TkZGRnB1dX1pDVWrVgUAODk5wc/Pr1h1P8vNza3IU3FF1VQUa2tr9O3bF3379kV2djZ69uyJOXPmIDQ0FBYWFi8MH7q6du2a1mzQ9evXkZ+fL13YXDCD8vyDAp+feQFeHIyK4ubm9sKfU8FyorLG01hExfDvv/9i69at6NKlC3r37l3oExISgkePHuG3334r0XYrVaqEevXqYe3atVq/dC5cuIC9e/eic+fOejuGgv+Dfvb/mI8fP/7S28afVbFiRXTq1Anr169HVFQUOnbsWOjUWVH++ecfre82NjZ47733kJWVBeBpkGnVqhW+//57JCYmavUtqNXY2BgdOnTAr7/+qnUaJiUlBRs2bECLFi2gVCpfWoe/vz+USiXmzp2LnJycQsvv37//0vU7d+6MY8eO4cSJE1rrFOc5S8+PgZmZGby8vCCEkGopCLX6ekrx86F8+fLlAIBOnToBAJRKJSpWrIjDhw9r9Vu5cmWhbZWkts6dO+PEiRNaf68eP36Mb7/9Fu7u7iW67ohIXzizQ1QMv/32Gx49eoQPPvigyOVNmzaVHjBY3NmOAgsWLECnTp3g6+uLoUOHSreeq1Qqvb6fqkuXLti6dSt69OiBgIAAJCQkYPXq1fDy8kJGRkaxthEYGIjevXsDAGbNmlWsdby8vNCmTRs0bNgQ9vb2OHXqFH766SeEhIRIfZYtW4YWLVqgQYMGGD58ODw8PHDr1i3s3LkTcXFxAIDZs2cjOjoaLVq0wMiRI2FiYoI1a9YgKysL8+fPf2UdSqUSq1atwscff4wGDRqgX79+cHR0RGJiInbu3InmzZu/9LTcZ599hh9++AEdO3bEmDFjpFvP3dzctK5BKkqHDh2gVqvRvHlzODs74/Lly/jmm28QEBAAW1tbAEDDhg0BAJ9//jn69esHU1NTdO3aVeeZvYSEBHzwwQfo2LEjYmNjsX79enz00UeoW7eu1GfYsGGYN28ehg0bBh8fHxw+fBhXr14ttK2S1DZlyhRs3LgRnTp1wujRo2Fvb4+1a9ciISEBP//8M5+2TIZhwDvBiN4YXbt2FRYWFuLx48cv7DNo0CBhamoq/v77b+m23gULFhTqh+du6xVCiH379onmzZsLS0tLoVQqRdeuXcWlS5e0+hTcUnz//n2t9qCgIGFtbV1oP61bt9a63Tk/P1/MnTtXuLm5CXNzc1G/fn2xY8cOERQUJNzc3F5ZoxBCZGVliQoVKgiVSiX+/fffF47Fs2bPni0aN24s7OzshKWlpahZs6aYM2eOyM7O1up34cIF0aNHD2FnZycsLCxEjRo1xNSpU7X6/PXXX8Lf31/Y2NgIKysr0bZtW3H06FGtPgW3np88ebLIeg4cOCD8/f2FSqUSFhYWomrVqmLQoEHi1KlTrzyWc+fOidatWwsLCwvxzjvviFmzZonw8PBX3nq+Zs0a0apVK+Hg4CDMzc1F1apVxaRJk0R6errW9mfNmiXeeecdYWRkpLVNACI4OLjImp7/WRX8Pbl06ZLo3bu3sLW1FRUqVBAhISGFfmZPnjwRQ4cOFSqVStja2oo+ffqI1NTUIn/+L6rt+VvPhRDixo0bonfv3tLPsnHjxmLHjh1afQpuPd+yZYtW+8tuiSfSlUIIXgVGRMWTm5sLFxcXdO3aFeHh4YYuh4ioWDifSETF9ssvv+D+/fsIDAw0dClERMXGmR0ieqXjx4/j3LlzmDVrFipWrPjKhxASEZUnnNkholdatWoVRowYAScnpxK/W4mIyNA4s0NERESyxpkdIiIikjWGHSIiIpI1PlQQT9+Zc+/ePdja2ur9ke1ERERUOoQQePToEVxcXF76wEqGHQD37t175Xt1iIiIqHy6c+cOKleu/MLlDDuA9Lj2O3fuvPL9OkRERFQ+aDQauLq6Sr/HX4RhB//3Rl+lUsmwQ0RE9IZ51SUovECZiIiIZI1hh4iIiGSNYYeIiIhkjdfsEBHRGyM/Px/Z2dmGLoPKiKmpKYyNjV97Oww7RET0RsjOzkZCQgLy8/MNXQqVITs7O6jV6td6Dh7DDhERlXtCCCQlJcHY2Biurq4vfYAcyYMQAk+ePEFqaioAoFKlSjpvi2GHiIjKvdzcXDx58gQuLi6wsrIydDlURiwtLQEAqampcHJy0vmUFqMxERGVe3l5eQAAMzMzA1dCZa0g3Obk5Oi8DYYdIiJ6Y/D9hW8fffzMGXaIiIhI1hh2iIiISHLw4EEoFAqkpaUZuhS9MegFyu7u7rh9+3ah9pEjR2LFihXIzMzEhAkTsGnTJmRlZcHf3x8rV66Es7Oz1DcxMREjRozAgQMHYGNjg6CgIISFhcHEhNdeExHJ3ZLoq2W6v3HvVy9R/0GDBmHt2rUICwvDlClTpPZffvkFPXr0gBBC3yVSEQw6s3Py5EkkJSVJn+joaADAhx9+CAAYN24ctm/fji1btuDQoUO4d+8eevbsKa2fl5eHgIAAZGdn4+jRo1i7di0iIyMxbdo0gxwPERHR8ywsLPDVV1/h4cOHetsmH6xYMgYNO46OjlCr1dJnx44dqFq1Klq3bo309HSEh4dj8eLFaNeuHRo2bIiIiAgcPXoUx44dAwDs3bsXly5dwvr161GvXj106tQJs2bNwooVK/gXgYiIygU/Pz+o1WqEhYW9sM/PP/+MWrVqwdzcHO7u7li0aJHWcnd3d8yaNQuBgYFQKpUYPnw4IiMjYWdnhx07dqBGjRqwsrJC79698eTJE6xduxbu7u6oUKECRo8eLd3NBgA//PADfHx8YGtrC7VajY8++kh6lo1clZtrdrKzs7F+/XoMGTIECoUCp0+fRk5ODvz8/KQ+NWvWRJUqVRAbGwsAiI2Nhbe3t9ZpLX9/f2g0Gly8eLHMj4GIiOh5xsbGmDt3LpYvX467d+8WWn769Gn06dMH/fr1w/nz5zFjxgxMnToVkZGRWv0WLlyIunXr4syZM5g6dSoA4MmTJ1i2bBk2bdqEPXv24ODBg+jRowd27dqFXbt24YcffsCaNWvw008/SdvJycnBrFmzcPbsWfzyyy+4desWBg0aVJpDYHDl5sKWX375BWlpadKAJycnw8zMDHZ2dlr9nJ2dkZycLPV5NugULC9Y9iJZWVnIysqSvms0Gj0cQekq6rx0Sc8dExGRYfTo0QP16tXD9OnTER4errVs8eLFaN++vRRgqlevjkuXLmHBggVaIaRdu3aYMGGC9P2PP/5ATk4OVq1ahapVqwIAevfujR9++AEpKSmwsbGBl5cX2rZtiwMHDqBv374AgCFDhkjbePfdd7Fs2TI0atQIGRkZsLGxKa0hMKhyM7MTHh6OTp06wcXFpdT3FRYWBpVKJX1cXV1LfZ9ERPR2++qrr7B27VpcvnxZq/3y5cto3ry5Vlvz5s1x7do1rdNPPj4+hbZpZWUlBR3g6f/wu7u7a4UWZ2dnrdNUp0+fRteuXVGlShXY2tqidevWAJ7e8CNX5SLs3L59G/v27cOwYcOkNrVajezs7EK3vqWkpECtVkt9UlJSCi0vWPYioaGhSE9Plz537tzR05EQEREVrVWrVvD390doaKhO61tbWxdqMzU11fquUCiKbCt4eerjx4/h7+8PpVKJqKgonDx5Etu2bQMg74uey0XYiYiIgJOTEwICAqS2hg0bwtTUFDExMVJbfHw8EhMT4evrCwDw9fXF+fPntRJrdHQ0lEolvLy8Xrg/c3NzKJVKrQ8REVFpmzdvHrZv3y5dewoAnp6eOHLkiFa/I0eOoHr16jq/C+pFrly5gn/++Qfz5s1Dy5YtUbNmTdlfnAyUg2t28vPzERERgaCgIK1n46hUKgwdOhTjx4+Hvb09lEolRo0aBV9fXzRt2hQA0KFDB3h5eeHjjz/G/PnzkZycjC+++ALBwcEwNzc31CEREREVydvbGwMGDMCyZcuktgkTJqBRo0aYNWsW+vbti9jYWHzzzTdYuXKl3vdfpUoVmJmZYfny5fj0009x4cIFzJo1S+/7KW8MPrOzb98+JCYmal0wVWDJkiXo0qULevXqhVatWkGtVmPr1q3ScmNjY+zYsQPGxsbw9fXFwIEDERgYiJkzZ5blIRARERXbzJkzpdNKANCgQQP8+OOP2LRpE2rXro1p06Zh5syZpXKHlKOjIyIjI7FlyxZ4eXlh3rx5WLhwod73U94oBB/fCI1GA5VKhfT09HJ7Sot3YxHR2ywzMxMJCQnw8PCAhYWFocuhMvSyn31xf38bfGaHiIiIqDQx7BAREZGsGfwCZdIdT20RERG9Gmd2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiIqEju7u5YunSpoct4bXzODhERvbkOhJXt/tqG6rRabGwsWrRogY4dO2Lnzp16LopehTM7REREpSw8PByjRo3C4cOHce/ePUOX89Zh2CmnlkRf1foQEdGbKSMjA5s3b8aIESMQEBCAyMhIadnBgwehUCgQExMDHx8fWFlZoVmzZoiPj9faxqpVq1C1alWYmZmhRo0a+OGHH7SWKxQKrFmzBl26dIGVlRU8PT0RGxuL69evo02bNrC2tkazZs1w48YNaZ0bN26gW7ducHZ2ho2NDRo1aoR9+/a99FgSExPRrVs32NjYQKlUok+fPkhJSZGWDxo0CN27d9daZ+zYsWjTpo30/aeffoK3tzcsLS3h4OAAPz8/PH78uJijqRuGHSIiolL0448/ombNmqhRowYGDhyI77//HkIIrT6ff/45Fi1ahFOnTsHExARDhgyRlm3btg1jxozBhAkTcOHCBfznP//B4MGDceDAAa1tzJo1C4GBgYiLi0PNmjXx0Ucf4T//+Q9CQ0Nx6tQpCCEQEhIi9c/IyEDnzp0RExODM2fOoGPHjujatSsSExOLPI78/Hx069YNDx48wKFDhxAdHY2bN2+ib9++xR6LpKQk9O/fH0OGDMHly5dx8OBB9OzZs9B46Buv2SEiIipF4eHhGDhwIACgY8eOSE9Px6FDh7RmO+bMmYPWrVsDAKZMmYKAgABkZmbCwsICCxcuxKBBgzBy5EgAwPjx43Hs2DEsXLgQbdu2lbYxePBg9OnTBwAwefJk+Pr6YurUqfD39wcAjBkzBoMHD5b6161bF3Xr1pW+z5o1C9u2bcNvv/2mFYoKxMTE4Pz580hISICrqysAYN26dahVqxZOnjyJRo0avXIskpKSkJubi549e8LNzQ0A4O3t/epBfE2c2SEiIiol8fHxOHHiBPr37w8AMDExQd++fREeHq7Vr06dOtKfK1WqBABITU0FAFy+fBnNmzfX6t+8eXNcvnz5hdtwdnYGoB0knJ2dkZmZCY1GA+DpzM7EiRPh6ekJOzs72NjY4PLlyy+c2bl8+TJcXV2loAMAXl5esLOzK1TLi9StWxft27eHt7c3PvzwQ3z33Xd4+PBhsdZ9HQw7REREpSQ8PBy5ublwcXGBiYkJTExMsGrVKvz8889IT0+X+pmamkp/VigUAJ6eNiqJorbxsu1OnDgR27Ztw9y5c/HHH38gLi4O3t7eyM7OLuFR/h8jI6NCp6RycnKkPxsbGyM6Ohq7d++Gl5cXli9fjho1aiAhIUHnfRarrlLdOhER0VsqNzcX69atw6JFixAXFyd9zp49CxcXF2zcuLFY2/H09MSRI0e02o4cOQIvL6/Xqu/IkSMYNGgQevToAW9vb6jVaty6deulddy5cwd37tyR2i5duoS0tDSpFkdHRyQlJWmtFxcXp/VdoVCgefPm+PLLL3HmzBmYmZlh27Ztr3Usr8JrdoiIiErBjh078PDhQwwdOhQqlUprWa9evRAeHo4FCxa8cjuTJk1Cnz59UL9+ffj5+WH79u3YunXrK++cepVq1aph69at6Nq1KxQKBaZOnfrS2SQ/Pz94e3tjwIABWLp0KXJzczFy5Ei0bt0aPj4+AIB27dphwYIFWLduHXx9fbF+/XpcuHAB9evXBwAcP34cMTEx6NChA5ycnHD8+HHcv38fnp6er3Usr8KZHSIiolIQHh4OPz+/QkEHeBp2Tp06hXPnzr1yO927d8fXX3+NhQsXolatWlizZg0iIiK0LnDWxeLFi1GhQgU0a9YMXbt2hb+/Pxo0aPDC/gqFAr/++isqVKiAVq1awc/PD++++y42b94s9fH398fUqVPx2WefoVGjRnj06BECAwOl5UqlEocPH0bnzp1RvXp1fPHFF1i0aBE6der0WsfyKgpR2vd7vQE0Gg1UKhXS09OhVCoNXQ4A6PxsnXHvV9dzJUREhpeZmYmEhAR4eHjAwsLC0OVQGXrZz764v785s0NERESyxrBDREREssawQ0RERLLGsENERESyxrBDRERvDN5T8/bRx8+cYYeIiMo9Y2NjAHitp/vSm+nJkycAtJ8GXVJ8qCAREZV7JiYmsLKywv3792FqagojI/6/utwJIfDkyROkpqbCzs5OCry6YNghIqJyT6FQoFKlSkhISMDt27cNXQ6VITs7O6jV6tfaBsMOERG9EczMzFCtWjWeynqLmJqavtaMTgGGHSIiemMYGRnxCcpUYjzpSURERLLGsENERESyxrBDREREssawQ0RERLLGsENERESyxrBDREREssawQ0RERLLGsENERESyxrBDREREsmbwsPO///0PAwcOhIODAywtLeHt7Y1Tp05Jy4UQmDZtGipVqgRLS0v4+fnh2rVrWtt48OABBgwYAKVSCTs7OwwdOhQZGRllfShERERUDhk07Dx8+BDNmzeHqakpdu/ejUuXLmHRokWoUKGC1Gf+/PlYtmwZVq9ejePHj8Pa2hr+/v7IzMyU+gwYMAAXL15EdHQ0duzYgcOHD2P48OGGOCQiIiIqZxRCCGGonU+ZMgVHjhzBH3/8UeRyIQRcXFwwYcIETJw4EQCQnp4OZ2dnREZGol+/frh8+TK8vLxw8uRJ+Pj4AAD27NmDzp074+7du3BxcXllHRqNBiqVCunp6VAqlfo7wNewJPqqTuuNe7+6nishIiIqn4r7+9ugMzu//fYbfHx88OGHH8LJyQn169fHd999Jy1PSEhAcnIy/Pz8pDaVSoUmTZogNjYWABAbGws7Ozsp6ACAn58fjIyMcPz48SL3m5WVBY1Go/UhIiIieTJo2Ll58yZWrVqFatWq4ffff8eIESMwevRorF27FgCQnJwMAHB2dtZaz9nZWVqWnJwMJycnreUmJiawt7eX+jwvLCwMKpVK+ri6uur70IiIiKicMGjYyc/PR4MGDTB37lzUr18fw4cPxyeffILVq1eX6n5DQ0ORnp4ufe7cuVOq+yMiIiLDMWjYqVSpEry8vLTaPD09kZiYCABQq9UAgJSUFK0+KSkp0jK1Wo3U1FSt5bm5uXjw4IHU53nm5uZQKpVaHyIiIpIng4ad5s2bIz4+Xqvt6tWrcHNzAwB4eHhArVYjJiZGWq7RaHD8+HH4+voCAHx9fZGWlobTp09Lffbv34/8/Hw0adKkDI6CiIiIyjMTQ+583LhxaNasGebOnYs+ffrgxIkT+Pbbb/Htt98CABQKBcaOHYvZs2ejWrVq8PDwwNSpU+Hi4oLu3bsDeDoT1LFjR+n0V05ODkJCQtCvX79i3YlFRERE8mbQsNOoUSNs27YNoaGhmDlzJjw8PLB06VIMGDBA6vPZZ5/h8ePHGD58ONLS0tCiRQvs2bMHFhYWUp+oqCiEhISgffv2MDIyQq9evbBs2TJDHBIRERGVMwZ9zk55wefsEBERvXneiOfsEBEREZU2hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjUTQxdA+rUk+qrW93HvVzdQJUREROUDZ3aIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYMGnZmzJgBhUKh9alZs6a0PDMzE8HBwXBwcICNjQ169eqFlJQUrW0kJiYiICAAVlZWcHJywqRJk5Cbm1vWh0JERETllImhC6hVqxb27dsnfTcx+b+Sxo0bh507d2LLli1QqVQICQlBz549ceTIEQBAXl4eAgICoFarcfToUSQlJSEwMBCmpqaYO3dumR8LERERlT8GDzsmJiZQq9WF2tPT0xEeHo4NGzagXbt2AICIiAh4enri2LFjaNq0Kfbu3YtLly5h3759cHZ2Rr169TBr1ixMnjwZM2bMgJmZWVkfDhEREZUzBr9m59q1a3BxccG7776LAQMGIDExEQBw+vRp5OTkwM/PT+pbs2ZNVKlSBbGxsQCA2NhYeHt7w9nZWerj7+8PjUaDixcvvnCfWVlZ0Gg0Wh8iIiKSJ4OGnSZNmiAyMhJ79uzBqlWrkJCQgJYtW+LRo0dITk6GmZkZ7OzstNZxdnZGcnIyACA5OVkr6BQsL1j2ImFhYVCpVNLH1dVVvwdGRERE5YZBT2N16tRJ+nOdOnXQpEkTuLm54ccff4SlpWWp7Tc0NBTjx4+Xvms0GgYeIiIimTL4aaxn2dnZoXr16rh+/TrUajWys7ORlpam1SclJUW6xketVhe6O6vge1HXARUwNzeHUqnU+hAREZE8lauwk5GRgRs3bqBSpUpo2LAhTE1NERMTIy2Pj49HYmIifH19AQC+vr44f/48UlNTpT7R0dFQKpXw8vIq8/qJiIio/DHoaayJEyeia9eucHNzw7179zB9+nQYGxujf//+UKlUGDp0KMaPHw97e3solUqMGjUKvr6+aNq0KQCgQ4cO8PLywscff4z58+cjOTkZX3zxBYKDg2Fubm7IQyMiIqJywqBh5+7du+jfvz/++ecfODo6okWLFjh27BgcHR0BAEuWLIGRkRF69eqFrKws+Pv7Y+XKldL6xsbG2LFjB0aMGAFfX19YW1sjKCgIM2fONNQhERERUTmjEEIIQxdhaBqNBiqVCunp6eXm+p0l0Vf1sp1x71fXy3aIiIjKm+L+/i5X1+wQERER6RvDDhEREckaww4RERHJGsMOERERyRrDDhEREckaww4RERHJGsMOERERyRrDDhEREcmaQZ+gTE/p6wGCREREVBhndoiIiEjWGHaIiIhI1ngai4iISG4OhGl/bxtqmDrKCc7sEBERkawx7BAREZGsMewQERGRrDHsEBERkawx7BAREZGsMewQERGRrDHsEBERkawx7BAREZGsMewQERGRrDHsEBERkawx7BAREZGsMewQERGRrDHsEBERkawx7BAREZGs6RR2bt68qe86iIiIiEqFTmHnvffeQ9u2bbF+/XpkZmbquyYiIiIivdEp7Pz111+oU6cOxo8fD7Vajf/85z84ceKEvmsjIiKi5x0I0/7QK+kUdurVq4evv/4a9+7dw/fff4+kpCS0aNECtWvXxuLFi3H//n1910lERES6ej4gvWUh6bUuUDYxMUHPnj2xZcsWfPXVV7h+/TomTpwIV1dXBAYGIikpSV91EhEREenktcLOqVOnMHLkSFSqVAmLFy/GxIkTcePGDURHR+PevXvo1q2bvuokIiIi0omJListXrwYERERiI+PR+fOnbFu3Tp07twZRkZPs5OHhwciIyPh7u6uz1qJiIjoeW/ZKSld6BR2Vq1ahSFDhmDQoEGoVKlSkX2cnJwQHh7+WsURERERvS6dws61a9de2cfMzAxBQUG6bJ6IiIhIb3S6ZiciIgJbtmwp1L5lyxasXbv2tYsiIiIi0hedwk5YWBgqVqxYqN3JyQlz58597aKIiIiI9EWnsJOYmAgPD49C7W5ubkhMTHztooiIiIj0Raew4+TkhHPnzhVqP3v2LBwcHF67KCIiIiJ90Sns9O/fH6NHj8aBAweQl5eHvLw87N+/H2PGjEG/fv10KmTevHlQKBQYO3as1JaZmYng4GA4ODjAxsYGvXr1QkpKitZ6iYmJCAgIgJWVFZycnDBp0iTk5ubqVAMRERHJj053Y82aNQu3bt1C+/btYWLydBP5+fkIDAzU6ZqdkydPYs2aNahTp45W+7hx47Bz505s2bIFKpUKISEh6NmzJ44cOQIAyMvLQ0BAANRqNY4ePYqkpCQEBgbC1NSU1w4RERERAB1ndszMzLB582ZcuXIFUVFR2Lp1K27cuIHvv/8eZmZmJdpWRkYGBgwYgO+++w4VKlSQ2tPT0xEeHo7FixejXbt2aNiwISIiInD06FEcO3YMALB3715cunQJ69evR7169dCpUyfMmjULK1asQHZ2ti6HRkRERDLzWq+LqF69Oj788EN06dIFbm5uOm0jODgYAQEB8PPz02o/ffo0cnJytNpr1qyJKlWqIDY2FgAQGxsLb29vODs7S338/f2h0Whw8eLFF+4zKysLGo1G60NERETypNNprLy8PERGRiImJgapqanIz8/XWr5///5ibWfTpk3466+/cPLkyULLkpOTYWZmBjs7O612Z2dnJCcnS32eDToFywuWvUhYWBi+/PLLYtVIREREbzadws6YMWMQGRmJgIAA1K5dGwqFosTbuHPnDsaMGYPo6GhYWFjoUobOQkNDMX78eOm7RqOBq6trmdZAREREZUOnsLNp0yb8+OOP6Ny5s847Pn36NFJTU9GgQQOpLS8vD4cPH8Y333yD33//HdnZ2UhLS9Oa3UlJSYFarQYAqNVqnDhxQmu7BXdrFfQpirm5OczNzXWunYiIiN4cOl+g/N57773Wjtu3b4/z588jLi5O+vj4+GDAgAHSn01NTRETEyOtEx8fj8TERPj6+gIAfH19cf78eaSmpkp9oqOjoVQq4eXl9Vr1ERERkTzoNLMzYcIEfP311/jmm290OoUFALa2tqhdu7ZWm7W1NRwcHKT2oUOHYvz48bC3t4dSqcSoUaPg6+uLpk2bAgA6dOgALy8vfPzxx5g/fz6Sk5PxxRdfIDg4mDM3REREBEDHsPPnn3/iwIED2L17N2rVqgVTU1Ot5Vu3btVLcUuWLIGRkRF69eqFrKws+Pv7Y+XKldJyY2Nj7NixAyNGjICvry+sra0RFBSEmTNn6mX/RERE9ObTKezY2dmhR48e+q4FBw8e1PpuYWGBFStWYMWKFS9cx83NDbt27dJ7LURERCQPOoWdiIgIfddBREREVCp0fqhgbm4u9u3bhzVr1uDRo0cAgHv37iEjI0NvxRERERG9Lp1mdm7fvo2OHTsiMTERWVlZeP/992Fra4uvvvoKWVlZWL16tb7rJCIievscCDN0BbKg08zOmDFj4OPjg4cPH8LS0lJq79Gjh9at4kRERESGptPMzh9//IGjR48Weumnu7s7/ve//+mlMCIiIipFz88atQ01TB1lQKewk5+fj7y8vELtd+/eha2t7WsXRfqzJPpqobZx71c3QCVERESGodNprA4dOmDp0qXSd4VCgYyMDEyfPv21XiFBREREpG86zewsWrQI/v7+8PLyQmZmJj766CNcu3YNFStWxMaNG/VdIxEREZHOdAo7lStXxtmzZ7Fp0yacO3cOGRkZGDp0KAYMGKB1wTIRERGRoekUdgDAxMQEAwcO1GctRERERHqnU9hZt27dS5cHBgbqVAwRERGRvukUdsaMGaP1PScnB0+ePIGZmRmsrKwYdoiIiKjc0OlurIcPH2p9MjIyEB8fjxYtWvACZSIiIipXdH431vOqVauGefPmFZr1ISIiIjIkvYUd4OlFy/fu3dPnJomIiIhei07X7Pz2229a34UQSEpKwjfffIPmzZvrpTAiIiIifdAp7HTv3l3ru0KhgKOjI9q1a4dFixbpoy4iIiIivdD53VhEREREbwK9XrNDREREVN7oNLMzfvz4YvddvHixLrsgIiIi0gudws6ZM2dw5swZ5OTkoEaNGgCAq1evwtjYGA0aNJD6KRQK/VRJREREpCOdwk7Xrl1ha2uLtWvXokKFCgCePmhw8ODBaNmyJSZMmKDXIomIiIh0pdM1O4sWLUJYWJgUdACgQoUKmD17Nu/GIiIionJFp7Cj0Whw//79Qu3379/Ho0ePXrsoIiIiIn3RKez06NEDgwcPxtatW3H37l3cvXsXP//8M4YOHYqePXvqu0YiIiIinel0zc7q1asxceJEfPTRR8jJyXm6IRMTDB06FAsWLNBrgURERESvQ6ewY2VlhZUrV2LBggW4ceMGAKBq1aqwtrbWa3FEREREr+u1HiqYlJSEpKQkVKtWDdbW1hBC6KsuIiIiIr3QKez8888/aN++PapXr47OnTsjKSkJADB06FDedk5ERETlik5hZ9y4cTA1NUViYiKsrKyk9r59+2LPnj16K46IiIjodel0zc7evXvx+++/o3Llylrt1apVw+3bt/VSGBEREZE+6DSz8/jxY60ZnQIPHjyAubn5axdFREREpC86hZ2WLVti3bp10neFQoH8/HzMnz8fbdu21VtxRERERK9Lp9NY8+fPR/v27XHq1ClkZ2fjs88+w8WLF/HgwQMcOXJE3zUSERER6UynmZ3atWvj6tWraNGiBbp164bHjx+jZ8+eOHPmDKpWrarvGomIiIh0VuKZnZycHHTs2BGrV6/G559/Xho1EREREelNiWd2TE1Nce7cudKohYiIiEjvdDqNNXDgQISHh+u7FiIiIiK90+kC5dzcXHz//ffYt28fGjZsWOidWIsXL9ZLcURERESvq0QzOzdv3kR+fj4uXLiABg0awNbWFlevXsWZM2ekT1xcXLG3t2rVKtSpUwdKpRJKpRK+vr7YvXu3tDwzMxPBwcFwcHCAjY0NevXqhZSUFK1tJCYmIiAgAFZWVnBycsKkSZOQm5tbksN66yyJvqr1ISIikrMSzexUq1YNSUlJOHDgAICnr4dYtmwZnJ2dddp55cqVMW/ePFSrVg1CCKxduxbdunXDmTNnUKtWLYwbNw47d+7Eli1boFKpEBISgp49e0q3t+fl5SEgIABqtRpHjx5FUlISAgMDYWpqirlz5+pUExEREcmLQpTgVeVGRkZITk6Gk5MTAECpVCIuLg7vvvuu3gqyt7fHggUL0Lt3bzg6OmLDhg3o3bs3AODKlSvw9PREbGwsmjZtit27d6NLly64d++eFLhWr16NyZMn4/79+zAzMyvWPjUaDVQqFdLT06FUKvV2LMVl6NmVce9XN+j+iYjoBQ6Eld2+2oaW3b70pLi/v3W6QLlACXLSK+Xl5WHTpk14/PgxfH19cfr0aeTk5MDPz0/qU7NmTVSpUgWxsbEAgNjYWHh7e2vNLPn7+0Oj0eDixYsv3FdWVhY0Go3Wh4iIiOSpRGFHoVBAoVAUansd58+fh42NDczNzfHpp59i27Zt8PLyQnJyMszMzGBnZ6fV39nZGcnJyQCA5OTkQqfQCr4X9ClKWFgYVCqV9HF1dX2tYyAiIqLyq0TX7AghMGjQIOlln5mZmfj0008L3Y21devWYm+zRo0aiIuLQ3p6On766ScEBQXh0KFDJSmrxEJDQzF+/Hjpu0ajKdPAY+jTVkRERG+TEoWdoKAgre8DBw587QLMzMzw3nvvAQAaNmyIkydP4uuvv0bfvn2RnZ2NtLQ0rdmdlJQUqNVqAIBarcaJEye0tldwt1ZBn6KYm5vz7exERERviRKFnYiIiNKqQ5Kfn4+srCw0bNgQpqamiImJQa9evQAA8fHxSExMhK+vLwDA19cXc+bMQWpqqnTRdHR0NJRKJby8vEq9ViIiIir/dHqooL6EhoaiU6dOqFKlCh49eoQNGzbg4MGD+P3336FSqTB06FCMHz8e9vb2UCqVGDVqFHx9fdG0aVMAQIcOHeDl5YWPP/4Y8+fPR3JyMr744gsEBwdz5oaIiIgAGDjspKamIjAwEElJSVCpVKhTpw5+//13vP/++wCAJUuWwMjICL169UJWVhb8/f2xcuVKaX1jY2Ps2LEDI0aMgK+vL6ytrREUFISZM2ca6pCIiIionCnRc3bkqqyfs1PeLlDmc3aIiMopPmfnpcrkOTtERERE5R3DDhEREckaww4RERHJGsMOERERyRrDDhEREckaww4RERHJGsMOERERyRrDDhEREckaww4RERHJmkFfF0FERET/v7J8WvJbhjM7REREJGsMO0RERCRrPI1FRERERZ9GewNfDloUzuwQERGRrDHsEBERkawx7BAREZGsMewQERGRrDHsEBERkawx7BAREZGsMewQERGRrDHsEBERkawx7BAREZGsMewQERGRrDHsEBERkawx7BAREZGs8UWgREREhlDUizepVHBmh4iIiGSNYYeIiIhkjWGHiIiIZI1hh4iIiGSNYYeIiIhkjWGHiIiIZI1hh4iIiGSNYYeIiIhkjWGHiIiIZI1PUCYsib5aqG3c+9UNUAkREZH+cWaHiIiIZI1hh4iIiGSNp7GIiIioaM+/rLRtqGHqeE0GndkJCwtDo0aNYGtrCycnJ3Tv3h3x8fFafTIzMxEcHAwHBwfY2NigV69eSElJ0eqTmJiIgIAAWFlZwcnJCZMmTUJubm5ZHgoRERGVUwYNO4cOHUJwcDCOHTuG6Oho5OTkoEOHDnj8+LHUZ9y4cdi+fTu2bNmCQ4cO4d69e+jZs6e0PC8vDwEBAcjOzsbRo0exdu1aREZGYtq0aYY4JCIiIipnFEIIYegiCty/fx9OTk44dOgQWrVqhfT0dDg6OmLDhg3o3bs3AODKlSvw9PREbGwsmjZtit27d6NLly64d+8enJ2dAQCrV6/G5MmTcf/+fZiZmb1yvxqNBiqVCunp6VAqlaV6jEDRdz+VN7wbi4iolD1/iuhNUM5OYxX393e5ukA5PT0dAGBvbw8AOH36NHJycuDn5yf1qVmzJqpUqYLY2FgAQGxsLLy9vaWgAwD+/v7QaDS4ePFikfvJysqCRqPR+hAREZE8lZuwk5+fj7Fjx6J58+aoXbs2ACA5ORlmZmaws7PT6uvs7Izk5GSpz7NBp2B5wbKihIWFQaVSSR9XV1c9Hw0RERGVF+Um7AQHB+PChQvYtGlTqe8rNDQU6enp0ufOnTulvk8iIiIyjHJx63lISAh27NiBw4cPo3LlylK7Wq1GdnY20tLStGZ3UlJSoFarpT4nTpzQ2l7B3VoFfZ5nbm4Oc3NzPR8FERERlUcGndkRQiAkJATbtm3D/v374eHhobW8YcOGMDU1RUxMjNQWHx+PxMRE+Pr6AgB8fX1x/vx5pKamSn2io6OhVCrh5eVVNgdCRERE5ZZBZ3aCg4OxYcMG/Prrr7C1tZWusVGpVLC0tIRKpcLQoUMxfvx42NvbQ6lUYtSoUfD19UXTpk0BAB06dICXlxc+/vhjzJ8/H8nJyfjiiy8QHBzM2RsiIiIybNhZtWoVAKBNmzZa7RERERg0aBAAYMmSJTAyMkKvXr2QlZUFf39/rFy5UuprbGyMHTt2YMSIEfD19YW1tTWCgoIwc+bMsjoMIiIiKscMGnaK84gfCwsLrFixAitWrHhhHzc3N+zatUufpREREZFMlJu7sYiIiIhKA8MOERERyRrDDhEREckaww4RERHJGsMOERERyVq5eIIyERERvQGKelN7OXsTelEYdoiIiEpbUSGBygxPYxEREZGsMewQERGRrPE0FhVpSfRVre/j3q9uoEqIiIheD2d2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1njrORERkb7xicnlCmd2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWeDdWKXv+hZpERERUthh2iIiISHdF3WbfNrTs63gJnsYiIiIiWWPYISIiIllj2CEiIiJZY9ghIiIiWWPYISIiIllj2CEiIiJZY9ghIiIiWWPYISIiIllj2CEiIiJZY9ghIiIiWWPYISIiIllj2CEiIiJZY9ghIiIiWWPYISIiIlkzMXQB9GZYEn21UNu496sboBIiIqKSMejMzuHDh9G1a1e4uLhAoVDgl19+0VouhMC0adNQqVIlWFpaws/PD9euXdPq8+DBAwwYMABKpRJ2dnYYOnQoMjIyyvAoiIiISMuBMO2PgRk07Dx+/Bh169bFihUrilw+f/58LFu2DKtXr8bx48dhbW0Nf39/ZGZmSn0GDBiAixcvIjo6Gjt27MDhw4cxfPjwsjoEIiIiKucMehqrU6dO6NSpU5HLhBBYunQpvvjiC3Tr1g0AsG7dOjg7O+OXX35Bv379cPnyZezZswcnT56Ej48PAGD58uXo3LkzFi5cCBcXlzI7FiIiIiqfyu0FygkJCUhOToafn5/UplKp0KRJE8TGxgIAYmNjYWdnJwUdAPDz84ORkRGOHz9e5jUTERFR+VNuL1BOTk4GADg7O2u1Ozs7S8uSk5Ph5OSktdzExAT29vZSn6JkZWUhKytL+q7RaPRVNhEREZUz5XZmpzSFhYVBpVJJH1dXV0OXRERERKWk3IYdtVoNAEhJSdFqT0lJkZap1WqkpqZqLc/NzcWDBw+kPkUJDQ1Fenq69Llz546eqyciIqLyotyGHQ8PD6jVasTExEhtGo0Gx48fh6+vLwDA19cXaWlpOH36tNRn//79yM/PR5MmTV64bXNzcyiVSq0PERERyZNBr9nJyMjA9evXpe8JCQmIi4uDvb09qlSpgrFjx2L27NmoVq0aPDw8MHXqVLi4uKB79+4AAE9PT3Ts2BGffPIJVq9ejZycHISEhKBfv368E4uIiIgAGDjsnDp1Cm3btpW+jx8/HgAQFBSEyMhIfPbZZ3j8+DGGDx+OtLQ0tGjRAnv27IGFhYW0TlRUFEJCQtC+fXsYGRmhV69eWLZsWZkfCxEREZVPCiGEMHQRhqbRaKBSqZCenq73U1pFvWZBLvi6CCJ6K5WDJwK/cdqGlspmi/v7u9xes0NERESkD+X2OTtU/j0/a8WZHiIiKo84s0NERESyxrBDREREssawQ0RERLLGsENERESyxrBDREREssawQ0RERLLGsENERESyxrBDREREssawQ0RERLLGsENERESyxtdFkN4U9dJTvkKCiIgMjTM7REREJGsMO0RERCRrDDtEREQkaww7REREJGsMO0RERCRrDDtEREQkaww7REREJGsMO0RERCRrfKgglSo+aJCI3mgHwgxdAekBZ3aIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1vicHTI4PouHiIhKE8MOlbmiwg0REVFp4WksIiIikjWGHSIiIpI1hh0iIiKSNV6zQ0REVIAv/pQlhh0ql56/iJl3ZxERka54GouIiIhkjTM79Mbi7A8RERUHZ3aIiIhI1mQzs7NixQosWLAAycnJqFu3LpYvX47GjRsbuizSEz6IkIiIdCWLsLN582aMHz8eq1evRpMmTbB06VL4+/sjPj4eTk5Ohi6PiIjKI9559daQRdhZvHgxPvnkEwwePBgAsHr1auzcuRPff/89pkyZYuDqyJCKc10P381FRCRvb3zYyc7OxunTpxEaGiq1GRkZwc/PD7GxsQasjMpacU516Xo6jIGI6A3HWZy32hsfdv7++2/k5eXB2dlZq93Z2RlXrlwpcp2srCxkZWVJ39PT0wEAGo1G7/VlPs7Q+zap9IX98pde+hRHcLv39LIdorfW4UWF21pN0P7+OLNsaqGilcLv16ebfbpdIcRL+73xYUcXYWFh+PLLLwu1u7q6GqAaetv9P0MXQCRLMw1dAGkp3Z/Ho0ePoFKpXrj8jQ87FStWhLGxMVJSUrTaU1JSoFari1wnNDQU48ePl77n5+fjwYMHcHBwgEKheOU+NRoNXF1dcefOHSiVytc7gLcEx6zkOGYlxzErGY5XyXHMSq40x0wIgUePHsHFxeWl/d74sGNmZoaGDRsiJiYG3bt3B/A0vMTExCAkJKTIdczNzWFubq7VZmdnV+J9K5VK/mUvIY5ZyXHMSo5jVjIcr5LjmJVcaY3Zy2Z0CrzxYQcAxo8fj6CgIPj4+KBx48ZYunQpHj9+LN2dRURERG8vWYSdvn374v79+5g2bRqSk5NRr1497Nmzp9BFy0RERPT2kUXYAYCQkJAXnrbSN3Nzc0yfPr3QqTB6MY5ZyXHMSo5jVjIcr5LjmJVceRgzhXjV/VpEREREbzC+CJSIiIhkjWGHiIiIZI1hh4iIiGSNYYeIiIhkjWGnhFasWAF3d3dYWFigSZMmOHHihKFLMpiwsDA0atQItra2cHJyQvfu3REfH6/VJzMzE8HBwXBwcICNjQ169epV6GnXiYmJCAgIgJWVFZycnDBp0iTk5uaW5aEYxLx586BQKDB27FipjeNV2P/+9z8MHDgQDg4OsLS0hLe3N06dOiUtF0Jg2rRpqFSpEiwtLeHn54dr165pbePBgwcYMGAAlEol7OzsMHToUGRkyPO9dXl5eZg6dSo8PDxgaWmJqlWrYtasWVrvDnrbx+zw4cPo2rUrXFxcoFAo8Msvv2gt19f4nDt3Di1btoSFhQVcXV0xf/780j60UvOyMcvJycHkyZPh7e0Na2truLi4IDAwEPfu3dPahkHHTFCxbdq0SZiZmYnvv/9eXLx4UXzyySfCzs5OpKSkGLo0g/D39xcRERHiwoULIi4uTnTu3FlUqVJFZGRkSH0+/fRT4erqKmJiYsSpU6dE06ZNRbNmzaTlubm5onbt2sLPz0+cOXNG7Nq1S1SsWFGEhoYa4pDKzIkTJ4S7u7uoU6eOGDNmjNTO8dL24MED4ebmJgYNGiSOHz8ubt68KX7//Xdx/fp1qc+8efOESqUSv/zyizh79qz44IMPhIeHh/j333+lPh07dhR169YVx44dE3/88Yd47733RP/+/Q1xSKVuzpw5wsHBQezYsUMkJCSILVu2CBsbG/H1119Lfd72Mdu1a5f4/PPPxdatWwUAsW3bNq3l+hif9PR04ezsLAYMGCAuXLggNm7cKCwtLcWaNWvK6jD16mVjlpaWJvz8/MTmzZvFlStXRGxsrGjcuLFo2LCh1jYMOWYMOyXQuHFjERwcLH3Py8sTLi4uIiwszIBVlR+pqakCgDh06JAQ4uk/AFNTU7Flyxapz+XLlwUAERsbK4R4+g/IyMhIJCcnS31WrVollEqlyMrKKtsDKCOPHj0S1apVE9HR0aJ169ZS2OF4FTZ58mTRokWLFy7Pz88XarVaLFiwQGpLS0sT5ubmYuPGjUIIIS5duiQAiJMnT0p9du/eLRQKhfjf//5XesUbSEBAgBgyZIhWW8+ePcWAAQOEEByz5z3/i1tf47Ny5UpRoUIFrX+XkydPFjVq1CjlIyp9RQXE5504cUIAELdv3xZCGH7MeBqrmLKzs3H69Gn4+flJbUZGRvDz80NsbKwBKys/0tPTAQD29vYAgNOnTyMnJ0drzGrWrIkqVapIYxYbGwtvb2+tp137+/tDo9Hg4sWLZVh92QkODkZAQIDWuAAcr6L89ttv8PHxwYcffggnJyfUr18f3333nbQ8ISEBycnJWmOmUqnQpEkTrTGzs7ODj4+P1MfPzw9GRkY4fvx42R1MGWnWrBliYmJw9epVAMDZs2fx559/olOnTgA4Zq+ir/GJjY1Fq1atYGZmJvXx9/dHfHw8Hj58WEZHYzjp6elQKBTSeycNPWayeYJyafv777+Rl5dX6BUUzs7OuHLlioGqKj/y8/MxduxYNG/eHLVr1wYAJCcnw8zMrNBLVp2dnZGcnCz1KWpMC5bJzaZNm/DXX3/h5MmThZZxvAq7efMmVq1ahfHjx+P//b//h5MnT2L06NEwMzNDUFCQdMxFjcmzY+bk5KS13MTEBPb29rIcsylTpkCj0aBmzZowNjZGXl4e5syZgwEDBgAAx+wV9DU+ycnJ8PDwKLSNgmUVKlQolfrLg8zMTEyePBn9+/eXXvxp6DFj2CG9CA4OxoULF/Dnn38aupRy686dOxgzZgyio6NhYWFh6HLeCPn5+fDx8cHcuXMBAPXr18eFCxewevVqBAUFGbi68unHH39EVFQUNmzYgFq1aiEuLg5jx46Fi4sLx4xKXU5ODvr06QMhBFatWmXociQ8jVVMFStWhLGxcaE7Y1JSUqBWqw1UVfkQEhKCHTt24MCBA6hcubLUrlarkZ2djbS0NK3+z46ZWq0uckwLlsnJ6dOnkZqaigYNGsDExAQmJiY4dOgQli1bBhMTEzg7O3O8nlOpUiV4eXlptXl6eiIxMRHA/x3zy/5dqtVqpKamai3Pzc3FgwcPZDlmkyZNwpQpU9CvXz94e3vj448/xrhx4xAWFgaAY/Yq+hqft+3fKvB/Qef27duIjo6WZnUAw48Zw04xmZmZoWHDhoiJiZHa8vPzERMTA19fXwNWZjhCCISEhGDbtm3Yv39/oenHhg0bwtTUVGvM4uPjkZiYKI2Zr68vzp8/r/WPoOAfyfO/5N507du3x/nz5xEXFyd9fHx8MGDAAOnPHC9tzZs3L/Q4g6tXr8LNzQ0A4OHhAbVarTVmGo0Gx48f1xqztLQ0nD59Wuqzf/9+5Ofno0mTJmVwFGXryZMnMDLS/k+7sbEx8vPzAXDMXkVf4+Pr64vDhw8jJydH6hMdHY0aNWrI8hRWQdC5du0a9u3bBwcHB63lBh+z177E+S2yadMmYW5uLiIjI8WlS5fE8OHDhZ2dndadMW+TESNGCJVKJQ4ePCiSkpKkz5MnT6Q+n376qahSpYrYv3+/OHXqlPD19RW+vr7S8oJbqTt06CDi4uLEnj17hKOjo2xvpX7es3djCcHxet6JEyeEiYmJmDNnjrh27ZqIiooSVlZWYv369VKfefPmCTs7O/Hrr7+Kc+fOiW7duhV5m3D9+vXF8ePHxZ9//imqVasmm9uonxcUFCTeeecd6dbzrVu3iooVK4rPPvtM6vO2j9mjR4/EmTNnxJkzZwQAsXjxYnHmzBnpziF9jE9aWppwdnYWH3/8sbhw4YLYtGmTsLKyemNvPX/ZmGVnZ4sPPvhAVK5cWcTFxWn9Pnj2zipDjhnDTgktX75cVKlSRZiZmYnGjRuLY8eOGbokgwFQ5CciIkLq8++//4qRI0eKChUqCCsrK9GjRw+RlJSktZ1bt26JTp06CUtLS1GxYkUxYcIEkZOTU8ZHYxjPhx2OV2Hbt28XtWvXFubm5qJmzZri22+/1Vqen58vpk6dKpydnYW5ublo3769iI+P1+rzzz//iP79+wsbGxuhVCrF4MGDxaNHj8ryMMqMRqMRY8aMEVWqVBEWFhbi3XffFZ9//rnWL523fcwOHDhQ5H+7goKChBD6G5+zZ8+KFi1aCHNzc/HOO++IefPmldUh6t3LxiwhIeGFvw8OHDggbcOQY6YQ4pnHahIRERHJDK/ZISIiIllj2CEiIiJZY9ghIiIiWWPYISIiIllj2CEiIiJZY9ghIiIiWWPYISIiIllj2CEi2XJ3d8fSpUsNXQYRGRjDDhG9VGxsLIyNjREQEGDoUoiIdMKwQ0QvFR4ejlGjRuHw4cO4d++eocuRtWdfgEhE+sOwQ0QvlJGRgc2bN2PEiBEICAhAZGSk1vKDBw9CoVAgJiYGPj4+sLKyQrNmzQq9qXzVqlWoWrUqzMzMUKNGDfzwww9ayxUKBdasWYMuXbrAysoKnp6eiI2NxfXr19GmTRtYW1ujWbNmuHHjhrTOjRs30K1bNzg7O8PGxgaNGjXCvn37XngsQ4YMQZcuXbTacnJy4OTkhPDw8CLXuX37Nrp27YoKFSrA2toatWrVwq5du6TlFy9eRJcuXaBUKmFra4uWLVtKNebn52PmzJmoXLkyzM3NUa9ePezZs0da99atW1AoFNi8eTNat24NCwsLREVFAQD++9//wtPTExYWFqhZsyZWrlz5wuMiomLQyxu2iEiWwsPDhY+PjxDi6Qs5q1atKvLz86XlBS8HbNKkiTh48KC4ePGiaNmypWjWrJnUZ+vWrcLU1FSsWLFCxMfHi0WLFgljY2Oxf/9+qQ8A8c4774jNmzeL+Ph40b17d+Hu7i7atWsn9uzZIy5duiSaNm0qOnbsKK0TFxcnVq9eLc6fPy+uXr0qvvjiC2FhYSG9uVoIIdzc3MSSJUuEEEIcOXJEGBsbi3v37mnVZm1t/cIXXAYEBIj3339fnDt3Tty4cUNs375dHDp0SAghxN27d4W9vb3o2bOnOHnypIiPjxfff/+9uHLlihBCiMWLFwulUik2btworly5Ij777DNhamoqrl69KoQQ0ssT3d3dxc8//yxu3rwp7t27J9avXy8qVaoktf3888/C3t5eREZG6vQzJCK+9ZyIXqJZs2Zi6dKlQgghcnJyRMWKFbXeYlwQdvbt2ye17dy5UwAQ//77r7SNTz75RGu7H374oejcubP0HYD44osvpO+xsbECgAgPD5faNm7cKCwsLF5ab61atcTy5cul78+GHSGE8PLyEl999ZX0vWvXrmLQoEEv3J63t7eYMWNGkctCQ0OFh4eHyM7OLnK5i4uLmDNnjlZbo0aNxMiRI4UQ/xd2Csa3QNWqVcWGDRu02mbNmiV8fX1fWCcRvRxPYxFRkeLj43HixAn0798fAGBiYoK+ffsWecqnTp060p8rVaoEAEhNTQUAXL58Gc2bN9fq37x5c1y+fPmF23B2dgYAeHt7a7VlZmZCo9EAeHqKbeLEifD09ISdnR1sbGxw+fJlJCYmvvCYhg0bhoiICABASkoKdu/ejSFDhryw/+jRozF79mw0b94c06dPx7lz56RlcXFxaNmyJUxNTQutp9FocO/evWIdt4+Pj/Tnx48f48aNGxg6dChsbGykz+zZs7VO4RFRyZgYugAiKp/Cw8ORm5sLFxcXqU0IAXNzc3zzzTdQqVRS+7O/8BUKBYCn16yURFHbeNl2J06ciOjoaCxcuBDvvfceLC0t0bt3b2RnZ79wH4GBgZgyZQpiY2Nx9OhReHh4oGXLli/sP2zYMPj7+2Pnzp3Yu3cvwsLCsGjRIowaNQqWlpYlOr4Xsba2lv6ckZEBAPjuu+/QpEkTrX7GxsZ62R/R24gzO0RUSG5uLtatW4dFixYhLi5O+pw9exYuLi7YuHFjsbfl6emJI0eOaLUdOXIEXl5er1XjkSNHMGjQIPTo0QPe3t5Qq9W4devWS9dxcHBA9+7dERERgcjISAwePPiV+3F1dcWnn36KrVu3YsKECfjuu+8APJ2J+uOPP4q8g0qpVMLFxaXEx+3s7AwXFxfcvHkT7733ntbHw8PjlbUSUdE4s0NEhezYsQMPHz7E0KFDtWZwAKBXr14IDw/Hp59+WqxtTZo0CX369EH9+vXh5+eH7du3Y+vWrS+9c6o4qlWrhq1bt6Jr165QKBSYOnVqsWaThg0bhi5duiAvLw9BQUEv7Tt27Fh06tQJ1atXx8OHD3HgwAF4enoCAEJCQrB8+XL069cPoaGhUKlUOHbsGBo3bowaNWpg0qRJmD59OqpWrYp69eohIiICcXFx0h1XL/Lll19i9OjRUKlU6NixI7KysnDq1Ck8fPgQ48ePL/4AEZGEYYeICgkPD4efn1+hoAM8DTvz58/Xun7lZbp3746vv/4aCxcuxJgxY+Dh4YGIiAi0adPmtWpcvHgxhgwZgmbNmqFixYqYPHmydD3Py/j5+aFSpUqoVauW1im6ouTl5SE4OBh3796FUqlEx44dsWTJEgBPZ4n279+PSZMmoXXr1jA2Nka9evWk63RGjx6N9PR0TJgwAampqfDy8sJvv/2GatWqvXSfw4YNg5WVFRYsWIBJkybB2toa3t7eGDt2bPEGhogKUQghhKGLICIqKxkZGXjnnXcQERGBnj17GrocIioDnNkhordCfn4+/v77byxatAh2dnb44IMPDF0SEZURhh0ieiskJibCw8MDlStXRmRkJExM+J8/orcFT2MRERGRrPHWcyIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikrX/DwmDTMCRKI7YAAAAAElFTkSuQmCC", + "application/vnd.jupyter.widget-view+json": { + "model_id": "ec6c3dcf6ee742048eeb88a37650d650", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Evaluating: 0%| | 0/10 [00:00" ] @@ -396,15 +377,12 @@ ], "source": [ "scripts.train_detector(\n", - " scripts.TrainDetectorConfig(\n", - " path=(detector_path := get_path(\"logs/demo/detector\")),\n", - " task=tasks.backdoor_detection(\n", - " model, train_data, val_data, data.CornerPixelBackdoor()\n", - " ),\n", - " detector=detectors.MahalanobisDetector(save_path=detector_path),\n", - " train=detectors.MahalanobisTrainConfig(),\n", - " num_classes=10,\n", - " )\n", + " save_path=(detector_path := utils.log_path(\"logs/demo/detector\")),\n", + " task=tasks.backdoor_detection(\n", + " model, train_data, val_data, data.CornerPixelBackdoor()\n", + " ),\n", + " detector=detectors.MahalanobisDetector(),\n", + " num_classes=10,\n", ")" ] }, @@ -412,24 +390,24 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "As we can see, this was a trivial detection task. As an ablation, we can test whether the detector specifically flags backdoored inputs as anomalous, or just anything out of distribution:" + "As we can see, this was a trivial detection task. As an ablation, we can test whether the detector specifically flags backdoored inputs as anomalous, or just anything out of distribution. Let's again reload the detector just to show how that works:" ] }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 10, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "\u001b[32m2024-02-29 22:14:35.637\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mcupbearer.detectors.anomaly_detector\u001b[0m:\u001b[36mload_weights\u001b[0m:\u001b[36m232\u001b[0m - \u001b[1mLoading detector from logs/demo/detector/2024-02-29_22-14-27/detector\u001b[0m\n" + "\u001b[32m2024-03-02 18:08:42.519\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mcupbearer.detectors.anomaly_detector\u001b[0m:\u001b[36mload_weights\u001b[0m:\u001b[36m224\u001b[0m - \u001b[1mLoading detector from logs/demo/detector/2024-03-02_18-08-34/detector\u001b[0m\n" ] } ], "source": [ - "detector = detectors.MahalanobisDetector(save_path=detector_path / \"ood_eval\")\n", + "detector = detectors.MahalanobisDetector()\n", "# TODO: The fact that weights are saved in \"detector\" is just a convention used by\n", "# the train_detector script, this is kind of weird.\n", "detector.load_weights(detector_path / \"detector\")" @@ -437,20 +415,20 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 11, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "\u001b[32m2024-02-29 22:14:36.725\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mcupbearer.detectors.anomaly_detector\u001b[0m:\u001b[36meval\u001b[0m:\u001b[36m149\u001b[0m - \u001b[1mAUC_ROC: 0.9934\u001b[0m\n", - "\u001b[32m2024-02-29 22:14:36.726\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mcupbearer.detectors.anomaly_detector\u001b[0m:\u001b[36meval\u001b[0m:\u001b[36m150\u001b[0m - \u001b[1mAP: 0.9779\u001b[0m\n" + "\u001b[32m2024-03-02 18:08:43.409\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mcupbearer.detectors.anomaly_detector\u001b[0m:\u001b[36meval\u001b[0m:\u001b[36m139\u001b[0m - \u001b[1mAUC_ROC: 0.9940\u001b[0m\n", + "\u001b[32m2024-03-02 18:08:43.409\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mcupbearer.detectors.anomaly_detector\u001b[0m:\u001b[36meval\u001b[0m:\u001b[36m140\u001b[0m - \u001b[1mAP: 0.9784\u001b[0m\n" ] }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjsAAAHHCAYAAABZbpmkAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAABVb0lEQVR4nO3deVhUVeMH8O+wb84gKIMkCpkLKC6Jy7gvJCqaay5R4lKWQm5Jyi81d8w9LcV6CbRwydIWt0Q0LUVcEndxQ7GXzVIY0djP7w8f7usIKo4DM16/n+eZ53HOOXPvuQd0vp577r0KIYQAERERkUyZGbsDRERERBWJYYeIiIhkjWGHiIiIZI1hh4iIiGSNYYeIiIhkjWGHiIiIZI1hh4iIiGSNYYeIiIhkjWGHiIiIZI1hh4hKUSgUmDlzprG78VyIjo6GQqHAtWvXpLJOnTqhU6dOlbL/h39WM2fOhEKhwN9//10p+/fw8MDw4cMrZV9E+mLYIdLDqlWroFAo0KpVK2N3hWTi0KFDmDlzJrKysozdlVJMuW9E5WFh7A4QPY9iYmLg4eGBI0eO4PLly3jllVeM3SUyIbt3737qzxw6dAizZs3C8OHD4ejoWO7P/fvvv7CwqNh/yh/Xt6SkJJiZ8f/NZNr4G0r0lJKTk3Ho0CEsXboU1atXR0xMjLG7RE9w9+7dSt2flZUVrKysKmz7xcXFyM3NBQDY2NhUeNh5HGtra1haWhpt/0TlwbBD9JRiYmJQtWpVBAQEYODAgWWGnWvXrkGhUGDx4sX48ssvUadOHVhbW6NFixY4evRoqfZ79+5F+/btYW9vD0dHR/Tp0wfnz5/XaVOyFuPixYt46623oFKpUL16dUyfPh1CCNy4cQN9+vSBUqmEq6srlixZovP5/Px8zJgxA82bN4dKpYK9vT3at2+Pffv2PfZ49+3bB4VCga1bt5aqW79+PRQKBeLj4x/5+YKCAsyaNQt169aFjY0NnJ2d0a5dO8TGxuq0u3DhAgYNGoTq1avD1tYW9evXx8cff6zT5sSJE+jRoweUSiUcHBzQtWtXHD58WKdNyRqa/fv3Y+zYsXBxcUHNmjWl+p07d0pjXaVKFQQEBODs2bOPHYMSZ8+eRZcuXWBra4uaNWti7ty5KC4uLtWurDU7K1euRMOGDWFnZ4eqVavC19cX69evB3D/ZxsaGgoA8PT0hEKh0FkHpFAoEBISgpiYGDRs2BDW1tbYtWuXVFfW+qq///4bgwYNglKphLOzM8aPHy8FJOB/v6PR0dGlPvvgNp/Ut7LW7Fy9ehVvvPEGnJycYGdnh9atW2P79u06bX777TcoFAp89913mDdvHmrWrAkbGxt07doVly9fLtUnomfB01hETykmJgb9+/eHlZUVhg4ditWrV+Po0aNo0aJFqbbr16/HnTt38N5770GhUGDhwoXo378/rl69Kv1veM+ePejRowdefvllzJw5E//++y9WrlyJtm3b4s8//4SHh4fONgcPHgwvLy8sWLAA27dvx9y5c+Hk5IQ1a9agS5cu+PTTTxETE4PJkyejRYsW6NChAwBAq9XiP//5D4YOHYp3330Xd+7cQWRkJPz9/XHkyBE0bdq0zOPt1KkT3N3dERMTg379+pUaizp16kCj0TxyvGbOnInw8HC88847aNmyJbRaLY4dO4Y///wTr732GgDg1KlTaN++PSwtLTF69Gh4eHjgypUr+OWXXzBv3jwA94NG+/btoVQq8dFHH8HS0hJr1qxBp06dsH///lLrp8aOHYvq1atjxowZ0szON998g6CgIPj7++PTTz/FvXv3sHr1arRr1w4nTpwoNdYPSk9PR+fOnVFYWIipU6fC3t4eX375JWxtbR/5mRJfffUVxo0bh4EDB0qh49SpU0hISMCbb76J/v374+LFi9iwYQOWLVuGatWqAQCqV68ubWPv3r347rvvEBISgmrVqj22rwAwaNAgeHh4IDw8HIcPH8aKFStw+/ZtrFu37on9fVB5+vagjIwMtGnTBvfu3cO4cePg7OyMtWvX4vXXX8f3339f6ndowYIFMDMzw+TJk5GdnY2FCxciMDAQCQkJT9VPoscSRFRux44dEwBEbGysEEKI4uJiUbNmTTF+/HiddsnJyQKAcHZ2Frdu3ZLKf/rpJwFA/PLLL1JZ06ZNhYuLi/jnn3+kspMnTwozMzMxbNgwqeyTTz4RAMTo0aOlssLCQlGzZk2hUCjEggULpPLbt28LW1tbERQUpNM2Ly9Pp5+3b98WarVajBw5UqccgPjkk0+k92FhYcLa2lpkZWVJZZmZmcLCwkKnXVmaNGkiAgICHtumQ4cOokqVKuL69es65cXFxdKf+/btK6ysrMSVK1ekstTUVFGlShXRoUMHqSwqKkoAEO3atROFhYVS+Z07d4Sjo6N49913dfaRnp4uVCpVqfKHTZgwQQAQCQkJUllmZqZQqVQCgEhOTpbKO3bsKDp27Ci979Onj2jYsOFjt79o0aJS2ykBQJiZmYmzZ8+WWffgz6Dk9+T111/XaTd27FgBQJw8eVII8b/f0aioqCdu83F9q127ts7vWck4/f7771LZnTt3hKenp/Dw8BBFRUVCCCH27dsnAAgvLy+d38vPPvtMABCnT58utS8iffE0FtFTiImJgVqtRufOnQHcn+4fPHgwNm7ciKKiolLtBw8ejKpVq0rv27dvD+D+ND8ApKWlITExEcOHD4eTk5PUrnHjxnjttdewY8eOUtt85513pD+bm5vD19cXQgiMGjVKKnd0dET9+vWl/ZS0LVlHUlxcjFu3bqGwsBC+vr74888/H3vcw4YNQ15eHr7//nupbNOmTSgsLMRbb7312M86Ojri7NmzuHTpUpn1N2/exIEDBzBy5EjUqlVLp06hUAAAioqKsHv3bvTt2xcvv/yyVF+jRg28+eab+OOPP6DVanU+++6778Lc3Fx6Hxsbi6ysLAwdOhR///239DI3N0erVq2eeDpvx44daN26NVq2bCmVVa9eHYGBgY/9XMkY/PXXX2Wewiyvjh07wtvbu9ztg4ODdd5/8MEHAFDm75Qh7dixAy1btkS7du2kMgcHB4wePRrXrl3DuXPndNqPGDFCZ33Tw39HiAyBYYeonIqKirBx40Z07twZycnJuHz5Mi5fvoxWrVohIyMDcXFxpT7z8Jd3SfC5ffs2AOD69esAgPr165f6rJeXF/7+++9Si2sf3qZKpYKNjY10euHB8pL9lFi7di0aN24srZ2pXr06tm/fjuzs7Mcee4MGDdCiRQud9UkxMTFo3br1E69Emz17NrKyslCvXj34+PggNDQUp06dkupLvtQaNWr0yG3cvHkT9+7de+Q4FRcX48aNGzrlnp6eOu9LwlaXLl1QvXp1ndfu3buRmZn52OO4fv066tatW6q8rD49bMqUKXBwcEDLli1Rt25dBAcH4+DBg0/83IMePp4nebivderUgZmZmc79gCrC9evXH/lzKql/0JP+jhAZAtfsEJXT3r17kZaWho0bN2Ljxo2l6mNiYtCtWzedsgdnFh4khNC7H2Vtszz7+fbbbzF8+HD07dsXoaGhcHFxgbm5OcLDw3HlypUn7nfYsGEYP348/vrrL+Tl5eHw4cP4/PPPn/i5Dh064MqVK/jpp5+we/du/Oc//8GyZcsQERGhM0tlaA+vpSlZSPzNN9/A1dW1VPuKvKLJy8sLSUlJ2LZtG3bt2oUffvgBq1atwowZMzBr1qxybaM8a4Mep2SW7FHvS5Q1Q1mRKuLvCNHDGHaIyikmJgYuLi744osvStVt2bIFW7duRURExFN9KdWuXRvA/XuVPOzChQuoVq0a7O3t9e/0A77//nu8/PLL2LJli84X3SeffFKuzw8ZMgSTJk3Chg0b8O+//8LS0hKDBw8u12ednJwwYsQIjBgxAjk5OejQoQNmzpyJd955RzotdebMmUd+vnr16rCzs3vkOJmZmcHd3f2xfahTpw4AwMXFBX5+fuXq94Nq165d5qm4svpUFnt7ewwePBiDBw9Gfn4++vfvj3nz5iEsLAw2NjaPDB/6unTpks5s0OXLl1FcXCwtbC6ZQXn4RoEPz7wAjw5GZaldu/Yjf04l9USVjaexiMrh33//xZYtW9CrVy8MHDiw1CskJAR37tzBzz///FTbrVGjBpo2bYq1a9fqfOmcOXMGu3fvRs+ePQ12DCX/g37wf8wJCQmPvWz8QdWqVUOPHj3w7bffIiYmBt27dy916qws//zzj857BwcHvPLKK8jLywNwP8h06NABX3/9NVJSUnTalvTV3Nwc3bp1w08//aRzGiYjIwPr169Hu3btoFQqH9sPf39/KJVKzJ8/HwUFBaXqb968+djP9+zZE4cPH8aRI0d0PlOe+yw9PAZWVlbw9vaGEELqS0moNdRdih8O5StXrgQA9OjRAwCgVCpRrVo1HDhwQKfdqlWrSm3rafrWs2dPHDlyROf36u7du/jyyy/h4eHxVOuOiAyFMztE5fDzzz/jzp07eP3118usb926tXSDwfLOdpRYtGgRevToAY1Gg1GjRkmXnqtUKoM+n6pXr17YsmUL+vXrh4CAACQnJyMiIgLe3t7Iyckp1zaGDRuGgQMHAgDmzJlTrs94e3ujU6dOaN68OZycnHDs2DF8//33CAkJkdqsWLEC7dq1w6uvvorRo0fD09MT165dw/bt25GYmAgAmDt3LmJjY9GuXTuMHTsWFhYWWLNmDfLy8rBw4cIn9kOpVGL16tV4++238eqrr2LIkCGoXr06UlJSsH37drRt2/axp+U++ugjfPPNN+jevTvGjx8vXXpeu3ZtnTVIZenWrRtcXV3Rtm1bqNVqnD9/Hp9//jkCAgJQpUoVAEDz5s0BAB9//DGGDBkCS0tL9O7dW++ZveTkZLz++uvo3r074uPj8e233+LNN99EkyZNpDbvvPMOFixYgHfeeQe+vr44cOAALl68WGpbT9O3qVOnYsOGDejRowfGjRsHJycnrF27FsnJyfjhhx94t2UyDiNeCUb03Ojdu7ewsbERd+/efWSb4cOHC0tLS/H3339Ll/UuWrSoVDs8dFmvEELs2bNHtG3bVtja2gqlUil69+4tzp07p9Om5JLimzdv6pQHBQUJe3v7Uvvp2LGjzuXOxcXFYv78+aJ27drC2tpaNGvWTGzbtk0EBQWJ2rVrP7GPQgiRl5cnqlatKlQqlfj3338fORYPmjt3rmjZsqVwdHQUtra2okGDBmLevHkiPz9fp92ZM2dEv379hKOjo7CxsRH169cX06dP12nz559/Cn9/f+Hg4CDs7OxE586dxaFDh3TalFx6fvTo0TL7s2/fPuHv7y9UKpWwsbERderUEcOHDxfHjh174rGcOnVKdOzYUdjY2IiXXnpJzJkzR0RGRj7x0vM1a9aIDh06CGdnZ2FtbS3q1KkjQkNDRXZ2ts7258yZI1566SVhZmams00AIjg4uMw+PfyzKvk9OXfunBg4cKCoUqWKqFq1qggJCSn1M7t3754YNWqUUKlUokqVKmLQoEEiMzOzzJ//o/r28KXnQghx5coVMXDgQOln2bJlS7Ft2zadNiWXnm/evFmn/HGXxBPpSyEEV4ERUfkUFhbCzc0NvXv3RmRkpLG7Q0RULpxPJKJy+/HHH3Hz5k0MGzbM2F0hIio3zuwQ0RMlJCTg1KlTmDNnDqpVq/bEmxASEZkSzuwQ0ROtXr0aY8aMgYuLy1M/W4mIyNg4s0NERESyxpkdIiIikjWGHSIiIpI13lQQ95+Zk5qaiipVqhj8lu1ERERUMYQQuHPnDtzc3B57w0qGHQCpqalPfK4OERERmaYbN26gZs2aj6xn2AGk27XfuHHjic/XISIiItOg1Wrh7u4ufY8/CsMO/vdEX6VSybBDRET0nHnSEhQuUCYiIiJZY9ghIiIiWWPYISIiIlnjmp1yKi4uRn5+vrG7QZXE0tIS5ubmxu4GEREZAMNOOeTn5yM5ORnFxcXG7gpVIkdHR7i6uvLeS0REzzmGnScQQiAtLQ3m5uZwd3d/7E2LSB6EELh37x4yMzMBADVq1DByj4iI6Fkw7DxBYWEh7t27Bzc3N9jZ2Rm7O1RJbG1tAQCZmZlwcXHhKS0ioucYpymeoKioCABgZWVl5J5QZSsJtwUFBUbuCRERPQuGnXLiuo0XD3/mRETywLBDREREssawQybjt99+g0KhQFZWlrG7QkREMsIFynpaFnuxUvc38bV6T9V++PDhWLt2LcLDwzF16lSp/Mcff0S/fv0ghDB0F4mIiEwSZ3ZkzMbGBp9++ilu375tsG3yxopERPS8YdiRMT8/P7i6uiI8PPyRbX744Qc0bNgQ1tbW8PDwwJIlS3TqPTw8MGfOHAwbNgxKpRKjR49GdHQ0HB0dsW3bNtSvXx92dnYYOHAg7t27h7Vr18LDwwNVq1bFuHHjpKvZAOCbb76Br68vqlSpAldXV7z55pvSvWyIiIgqCsOOjJmbm2P+/PlYuXIl/vrrr1L1x48fx6BBgzBkyBCcPn0aM2fOxPTp0xEdHa3TbvHixWjSpAlOnDiB6dOnAwDu3buHFStWYOPGjdi1axd+++039OvXDzt27MCOHTvwzTffYM2aNfj++++l7RQUFGDOnDk4efIkfvzxR1y7dg3Dhw+vyCEgIiLimh2569evH5o2bYpPPvkEkZGROnVLly5F165dpQBTr149nDt3DosWLdIJIV26dMGHH34ovf/9999RUFCA1atXo06dOgCAgQMH4ptvvkFGRgYcHBzg7e2Nzp07Y9++fRg8eDAAYOTIkdI2Xn75ZaxYsQItWrRATk4OHBwcKmoIiIjkZd9Ds/Wdw4zTj+eIUWd2ioqKMH36dHh6esLW1hZ16tTBnDlzdBbPCiEwY8YM1KhRA7a2tvDz88OlS5d0tnPr1i0EBgZCqVTC0dERo0aNQk5OTmUfjsn69NNPsXbtWpw/f16n/Pz582jbtq1OWdu2bXHp0iWd00++vr6ltmlnZycFHQBQq9Xw8PDQCS1qtVrnNNXx48fRu3dv1KpVC1WqVEHHjh0BACkpKc92gERERI9h1LDz6aefYvXq1fj8889x/vx5fPrpp1i4cCFWrlwptVm4cCFWrFiBiIgIJCQkwN7eHv7+/sjNzZXaBAYG4uzZs4iNjcW2bdtw4MABjB492hiHZJI6dOgAf39/hIXpl/7t7e1LlVlaWuq8VygUZZaVPDz17t278Pf3h1KpRExMDI4ePYqtW7cC4KJnIiKqWEY9jXXo0CH06dMHAQEBAO4vht2wYQOOHDkC4P6szvLlyzFt2jT06dMHALBu3Tqo1Wr8+OOPGDJkCM6fP49du3bh6NGj0gzEypUr0bNnTyxevBhubm7GOTgTs2DBAjRt2hT169eXyry8vHDw4EGddgcPHkS9evUM/iyoCxcu4J9//sGCBQvg7u4OADh27JhB90FE9EJ6+LQWwFNbDzHqzE6bNm0QFxeHixfv37Pm5MmT+OOPP9CjRw8AQHJyMtLT0+Hn5yd9RqVSoVWrVoiPjwcAxMfHw9HRUedUi5+fH8zMzJCQkFDmfvPy8qDVanVecufj44PAwECsWLFCKvvwww8RFxeHOXPm4OLFi1i7di0+//xzTJ482eD7r1WrFqysrLBy5UpcvXoVP//8M+bMmWPw/RARET3MqGFn6tSpGDJkCBo0aABLS0s0a9YMEyZMQGBgIAAgPT0dwP21Hw9Sq9VSXXp6OlxcXHTqLSws4OTkJLV5WHh4OFQqlfQqmWmQu9mzZ0unlQDg1VdfxXfffYeNGzeiUaNGmDFjBmbPnl0hV0hVr14d0dHR2Lx5M7y9vbFgwQIsXrzY4PshIiJ6mEIY8Va6GzduRGhoKBYtWoSGDRsiMTEREyZMwNKlSxEUFIRDhw6hbdu2SE1NRY0aNaTPDRo0CAqFAps2bcL8+fOxdu1aJCUl6WzbxcUFs2bNwpgxY0rtNy8vD3l5edJ7rVYLd3d3ZGdnQ6lU6rTNzc1FcnIyPD09YWNjY+ARIFPGnz0RmaSyTls97AU5jaXVaqFSqcr8/n6QUdfshIaGSrM7wP1TLdevX0d4eDiCgoLg6uoKAMjIyNAJOxkZGWjatCkAwNXVtdSN6QoLC3Hr1i3p8w+ztraGtbV1BRwRERERmRqjnsa6d+8ezMx0u2Bubi6davH09ISrqyvi4uKkeq1Wi4SEBGg0GgCARqNBVlYWjh8/LrXZu3cviouL0apVq0o4CiIiIjJlRp3Z6d27N+bNm4datWqhYcOGOHHiBJYuXSrdfE6hUGDChAmYO3cu6tatC09PT0yfPh1ubm7o27cvgPtXFHXv3h3vvvsuIiIiUFBQgJCQEAwZMoRXYhEREZFxw87KlSsxffp0jB07FpmZmXBzc8N7772HGTNmSG0++ugj3L17F6NHj0ZWVhbatWuHXbt26ayhiImJQUhICLp27QozMzMMGDBA56ojIiIienEZdYGyqXjcAicuUn1x8WdPRCaJC5Ql5V2gzAeBEhERkawx7BAREZGsMewQERGRrDHsEBERkawx7JBJ8vDwwPLly43dDSIikgGjXnr+XCvPanhD0nNlfXx8PNq1a4fu3btj+/btBu4UERGR6ePMjsxFRkbigw8+wIEDB5Cammrs7hAREVU6hh0Zy8nJwaZNmzBmzBgEBAQgOjpaqvvtt9+gUCgQFxcHX19f2NnZoU2bNqUeqLp69WrUqVMHVlZWqF+/Pr755hudeoVCgTVr1qBXr16ws7ODl5cX4uPjcfnyZXTq1An29vZo06YNrly5In3mypUr6NOnD9RqNRwcHNCiRQvs2bPnsceSkpKCPn36wMHBAUqlEoMGDUJGRoZUP3z4cOmu2iUmTJiATp06Se+///57+Pj4wNbWFs7OzvDz88Pdu3fLOZpERPS8YtiRse+++w4NGjRA/fr18dZbb+Hrr7/Gw/eQ/Pjjj7FkyRIcO3YMFhYW0qM6AGDr1q0YP348PvzwQ5w5cwbvvfceRowYgX379ulsY86cORg2bBgSExPRoEEDvPnmm3jvvfcQFhaGY8eOQQiBkJAQqX1OTg569uyJuLg4nDhxAt27d0fv3r2RkpJS5nEUFxejT58+uHXrFvbv34/Y2FhcvXoVgwcPLvdYpKWlYejQoRg5ciTOnz+P3377Df379y81HkREJmVfeOkXPTWu2ZGxyMhIvPXWWwCA7t27Izs7G/v379eZ7Zg3bx46duwIAJg6dSoCAgKQm5sLGxsbLF68GMOHD8fYsWMBAJMmTcLhw4exePFidO7cWdrGiBEjMGjQIADAlClToNFoMH36dPj7+wMAxo8fjxEjRkjtmzRpgiZNmkjv58yZg61bt+Lnn3/WCUUl4uLicPr0aSQnJ8Pd3R0AsG7dOjRs2BBHjx5FixYtnjgWaWlpKCwsRP/+/VG7dm0AgI+Pz5MHkYiInnuc2ZGppKQkHDlyBEOHDgUAWFhYYPDgwYiMjNRp17hxY+nPNWrUAABkZmYCAM6fP4+2bdvqtG/bti3Onz//yG2o1WoAukFCrVYjNzcXWq0WwP2ZncmTJ8PLywuOjo5wcHDA+fPnHzmzc/78ebi7u0tBBwC8vb3h6OhYqi+P0qRJE3Tt2hU+Pj5444038NVXX+H27dvl+iwRET3fGHZkKjIyEoWFhXBzc4OFhQUsLCywevVq/PDDD8jOzpbaWVpaSn9WKBQA7p82ehplbeNx2508eTK2bt2K+fPn4/fff0diYiJ8fHyQn5//lEf5P2ZmZqVOSRUUFEh/Njc3R2xsLHbu3Alvb2+sXLkS9evXR3Jyst77JCKi5wPDjgwVFhZi3bp1WLJkCRITE6XXyZMn4ebmhg0bNpRrO15eXjh48KBO2cGDB+Ht7f1M/Tt48CCGDx+Ofv36wcfHB66urrh27dpj+3Hjxg3cuHFDKjt37hyysrKkvlSvXh1paWk6n0tMTNR5r1Ao0LZtW8yaNQsnTpyAlZUVtm7d+kzHQkREpo9rdmRo27ZtuH37NkaNGgWVSqVTN2DAAERGRmLRokVP3E5oaCgGDRqEZs2awc/PD7/88gu2bNnyxCunnqRu3brYsmULevfuDYVCgenTpz92NsnPzw8+Pj4IDAzE8uXLUVhYiLFjx6Jjx47w9fUFAHTp0gWLFi3CunXroNFo8O233+LMmTNo1qwZACAhIQFxcXHo1q0bXFxckJCQgJs3b8LLy+uZjoWIiEwfZ3ZkKDIyEn5+fqWCDnA/7Bw7dgynTp164nb69u2Lzz77DIsXL0bDhg2xZs0aREVF6Sxw1sfSpUtRtWpVtGnTBr1794a/vz9effXVR7ZXKBT46aefULVqVXTo0AF+fn54+eWXsWnTJqmNv78/pk+fjo8++ggtWrTAnTt3MGzYMKleqVTiwIED6NmzJ+rVq4dp06ZhyZIl6NGjxzMdCxERmT6F4LW30Gq1UKlUyM7OhlKp1KnLzc1FcnIyPD09YWNjY6QekjHwZ09ERqfvpeZ63nX/efO47+8HcWaHiIiIZI1hh4iIiGSNYYeIiIhkjWGHiIiIZI1hp5y4jvvFw585EZE8MOw8gbm5OQA809196fl07949ALp3gyYioucPbyr4BBYWFrCzs8PNmzdhaWkJMzPmQ7kTQuDevXvIzMyEo6OjFHiJiOj5xLDzBAqFAjVq1EBycjKuX79u7O5QJXJ0dISrq6uxu0FERM+IYaccrKysULduXZ7KeoFYWlpyRoeISCYYdsrJzMyMd9ElIiJ6DnEBChEREckaZ3aIiIhMgb7PwaIn4swOERERyRrDDhEREckaww4RERHJGsMOERERyZpRw46HhwcUCkWpV3BwMAAgNzcXwcHBcHZ2hoODAwYMGICMjAydbaSkpCAgIAB2dnZwcXFBaGgoCgsLjXE4REREZIKMGnaOHj2KtLQ06RUbGwsAeOONNwAAEydOxC+//ILNmzdj//79SE1NRf/+/aXPFxUVISAgAPn5+Th06BDWrl2L6OhozJgxwyjHQ0RERKZHIUzo0c4TJkzAtm3bcOnSJWi1WlSvXh3r16/HwIEDAQAXLlyAl5cX4uPj0bp1a+zcuRO9evVCamoq1Go1ACAiIgJTpkzBzZs3YWVlVa79arVaqFQqZGdnQ6lUVtjxERERPZIhLz3vHGa4bZmw8n5/m8yanfz8fHz77bcYOXIkFAoFjh8/joKCAvj5+UltGjRogFq1aiE+Ph4AEB8fDx8fHynoAIC/vz+0Wi3Onj37yH3l5eVBq9XqvIiIiEieTOamgj/++COysrIwfPhwAEB6ejqsrKzg6Oio006tViM9PV1q82DQKakvqXuU8PBwzJo1y3CdJyIiMiUPzxK9IDM9j2IyMzuRkZHo0aMH3NzcKnxfYWFhyM7Oll43btyo8H0SERGRcZjEzM7169exZ88ebNmyRSpzdXVFfn4+srKydGZ3MjIy4OrqKrU5cuSIzrZKrtYqaVMWa2trWFtbG/AIiIiIyFSZxMxOVFQUXFxcEBAQIJU1b94clpaWiIuLk8qSkpKQkpICjUYDANBoNDh9+jQyMzOlNrGxsVAqlfD29q68AyAiIiKTZfSZneLiYkRFRSEoKAgWFv/rjkqlwqhRozBp0iQ4OTlBqVTigw8+gEajQevWrQEA3bp1g7e3N95++20sXLgQ6enpmDZtGoKDgzlzQ0RERABMIOzs2bMHKSkpGDlyZKm6ZcuWwczMDAMGDEBeXh78/f2xatUqqd7c3Bzbtm3DmDFjoNFoYG9vj6CgIMyePbsyD4GIiIhMmEndZ8dYeJ8dIiIyOkPeZ+dhMr0a67m7zw4RERFRRWDYISIiIllj2CEiIiJZY9ghIiIiWWPYISIiIllj2CEiIiJZY9ghIiIiWWPYISIiIllj2CEiIiJZY9ghIiIiWWPYISIiIllj2CEiIiJZY9ghIiIiWWPYISIiIllj2CEiIiJZY9ghIiIiWWPYISIiIllj2CEiIiJZszB2B4iIiF5I+8KN3YMXBmd2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1vggUCIiIrkr66GjncMqvx9GYvSZnf/+979466234OzsDFtbW/j4+ODYsWNSvRACM2bMQI0aNWBraws/Pz9cunRJZxu3bt1CYGAglEolHB0dMWrUKOTk5FT2oRAREZEJMmrYuX37Ntq2bQtLS0vs3LkT586dw5IlS1C1alWpzcKFC7FixQpEREQgISEB9vb28Pf3R25urtQmMDAQZ8+eRWxsLLZt24YDBw5g9OjRxjgkIiIiMjEKIYQw1s6nTp2KgwcP4vfffy+zXggBNzc3fPjhh5g8eTIAIDs7G2q1GtHR0RgyZAjOnz8Pb29vHD16FL6+vgCAXbt2oWfPnvjrr7/g5ub2xH5otVqoVCpkZ2dDqVQa7gCJiIgepaxTS5VJBqexyvv9bdSZnZ9//hm+vr5444034OLigmbNmuGrr76S6pOTk5Geng4/Pz+pTKVSoVWrVoiPjwcAxMfHw9HRUQo6AODn5wczMzMkJCRU3sEQERGRSTJq2Ll69SpWr16NunXr4tdff8WYMWMwbtw4rF27FgCQnp4OAFCr1TqfU6vVUl16ejpcXFx06i0sLODk5CS1eVheXh60Wq3Oi4iIiOTJqFdjFRcXw9fXF/PnzwcANGvWDGfOnEFERASCgoIqbL/h4eGYNWtWhW2fiIiITIdRZ3Zq1KgBb29vnTIvLy+kpKQAAFxdXQEAGRkZOm0yMjKkOldXV2RmZurUFxYW4tatW1Kbh4WFhSE7O1t63bhxwyDHQ0RERKbHqGGnbdu2SEpK0im7ePEiateuDQDw9PSEq6sr4uLipHqtVouEhARoNBoAgEajQVZWFo4fPy612bt3L4qLi9GqVasy92ttbQ2lUqnzIiIiInky6mmsiRMnok2bNpg/fz4GDRqEI0eO4Msvv8SXX34JAFAoFJgwYQLmzp2LunXrwtPTE9OnT4ebmxv69u0L4P5MUPfu3fHuu+8iIiICBQUFCAkJwZAhQ8p1JRYRERHJm1HDTosWLbB161aEhYVh9uzZ8PT0xPLlyxEYGCi1+eijj3D37l2MHj0aWVlZaNeuHXbt2gUbGxupTUxMDEJCQtC1a1eYmZlhwIABWLFihTEOiYiIiEyMUe+zYyp4nx0iIqp0vM/OM3su7rNDREREVNEYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1owadmbOnAmFQqHzatCggVSfm5uL4OBgODs7w8HBAQMGDEBGRobONlJSUhAQEAA7Ozu4uLggNDQUhYWFlX0oREREz5d94bovGbMwdgcaNmyIPXv2SO8tLP7XpYkTJ2L79u3YvHkzVCoVQkJC0L9/fxw8eBAAUFRUhICAALi6uuLQoUNIS0vDsGHDYGlpifnz51f6sRAREZHpMXrYsbCwgKura6ny7OxsREZGYv369ejSpQsAICoqCl5eXjh8+DBat26N3bt349y5c9izZw/UajWaNm2KOXPmYMqUKZg5cyasrKwq+3CIiIjIxOh1Guvq1asG68ClS5fg5uaGl19+GYGBgUhJSQEAHD9+HAUFBfDz85PaNmjQALVq1UJ8fDwAID4+Hj4+PlCr1VIbf39/aLVanD179pH7zMvLg1ar1XkRERGRPOkVdl555RV07twZ3377LXJzc/XeeatWrRAdHY1du3Zh9erVSE5ORvv27XHnzh2kp6fDysoKjo6OOp9Rq9VIT08HAKSnp+sEnZL6krpHCQ8Ph0qlkl7u7u56HwMRERGZNr3Czp9//onGjRtj0qRJcHV1xXvvvYcjR4489XZ69OiBN954A40bN4a/vz927NiBrKwsfPfdd/p0q9zCwsKQnZ0tvW7cuFGh+yMiIiLj0SvsNG3aFJ999hlSU1Px9ddfIy0tDe3atUOjRo2wdOlS3Lx5U6/OODo6ol69erh8+TJcXV2Rn5+PrKwsnTYZGRnSGh9XV9dSV2eVvC9rHVAJa2trKJVKnRcRERHJ0zNdem5hYYH+/ftj8+bN+PTTT3H58mVMnjwZ7u7uGDZsGNLS0p5qezk5Obhy5Qpq1KiB5s2bw9LSEnFxcVJ9UlISUlJSoNFoAAAajQanT59GZmam1CY2NhZKpRLe3t7PcmhERESG8/Bl3jK/1NvUPFPYOXbsGMaOHYsaNWpg6dKlmDx5Mq5cuYLY2FikpqaiT58+j/385MmTsX//fly7dg2HDh1Cv379YG5ujqFDh0KlUmHUqFGYNGkS9u3bh+PHj2PEiBHQaDRo3bo1AKBbt27w9vbG22+/jZMnT+LXX3/FtGnTEBwcDGtr62c5NCIiIpIJvS49X7p0KaKiopCUlISePXti3bp16NmzJ8zM7mcnT09PREdHw8PD47Hb+euvvzB06FD8888/qF69Otq1a4fDhw+jevXqAIBly5bBzMwMAwYMQF5eHvz9/bFq1Srp8+bm5ti2bRvGjBkDjUYDe3t7BAUFYfbs2focFhEREcmQQgghnvZDdevWxciRIzF8+HDUqFGjzDb5+fnYsGEDgoKCnrmTFU2r1UKlUiE7O5vrd4iIyPCeh9NWncOM3YOnVt7vb71mdi5duvTENlZWVs9F0CEiIiJ502vNTlRUFDZv3lyqfPPmzVi7du0zd4qIiIjIUPQKO+Hh4ahWrVqpchcXFz6TioiIiEyKXmEnJSUFnp6epcpr164tPe6BiIiIyBToFXZcXFxw6tSpUuUnT56Es7PzM3eKiIiIyFD0CjtDhw7FuHHjsG/fPhQVFaGoqAh79+7F+PHjMWTIEEP3kYiIiEhvel2NNWfOHFy7dg1du3aFhcX9TRQXF2PYsGFcs0NEREQmRa+wY2VlhU2bNmHOnDk4efIkbG1t4ePjg9q1axu6f0RERETPRK+wU6JevXqoV6+eofpCREREZHB6hZ2ioiJER0cjLi4OmZmZKC4u1qnfu3evQTpHRERE9Kz0Cjvjx49HdHQ0AgIC0KhRIygUCkP3i4iIiMgg9Ao7GzduxHfffYeePXsauj9EREREBqXXpedWVlZ45ZVXDN0XIiIiIoPTK+x8+OGH+Oyzz6DHA9OJiIiIKpVep7H++OMP7Nu3Dzt37kTDhg1haWmpU79lyxaDdI7I0JbFXixVNvE1XlFIRCRneoUdR0dH9OvXz9B9IXokhhQiItKXXmEnKirK0P0gemoPByCGHyIiKotea3YAoLCwEHv27MGaNWtw584dAEBqaipycnIM1jkiIiKiZ6XXzM7169fRvXt3pKSkIC8vD6+99hqqVKmCTz/9FHl5eYiIiDB0P4mIiIj0otfMzvjx4+Hr64vbt2/D1tZWKu/Xrx/i4uIM1jkiIiKiZ6XXzM7vv/+OQ4cOwcrKSqfcw8MD//3vfw3SMXpxcPExERFVJL1mdoqLi1FUVFSq/K+//kKVKlWeuVNEREREhqLXzE63bt2wfPlyfPnllwAAhUKBnJwcfPLJJ3yEBJmUsmaNiIjoxaJX2FmyZAn8/f3h7e2N3NxcvPnmm7h06RKqVauGDRs2GLqPRERERHrTK+zUrFkTJ0+exMaNG3Hq1Cnk5ORg1KhRCAwM1FmwTERERGRseoUdALCwsMBbb71lyL4QSXj6iYiIDEWvsLNu3brH1g8bNkyvzhAREREZml5hZ/z48TrvCwoKcO/ePVhZWcHOzo5hh4yCs0FERFQWvS49v337ts4rJycHSUlJaNeuHRcoExERkUnR+9lYD6tbty4WLFhQataHiIiIyJgMFnaA+4uWU1NTDblJIiIiomei15qdn3/+Wee9EAJpaWn4/PPP0bZtW4N0jIiIiMgQ9JrZ6du3r86rf//+mDlzJho3boyvv/5ar44sWLAACoUCEyZMkMpyc3MRHBwMZ2dnODg4YMCAAcjIyND5XEpKCgICAmBnZwcXFxeEhoaisLBQrz4QERGR/Og1s1NcXGzQThw9ehRr1qxB48aNdconTpyI7du3Y/PmzVCpVAgJCUH//v1x8OBBAEBRURECAgLg6uqKQ4cOIS0tDcOGDYOlpSXmz59v0D4SERHJ2r7w0mWdwyq/HxXAoGt29JGTk4PAwEB89dVXqFq1qlSenZ2NyMhILF26FF26dEHz5s0RFRWFQ4cO4fDhwwCA3bt349y5c/j222/RtGlT9OjRA3PmzMEXX3yB/Px8Yx0SERERmRC9ZnYmTZpU7rZLly59bH1wcDACAgLg5+eHuXPnSuXHjx9HQUEB/Pz8pLIGDRqgVq1aiI+PR+vWrREfHw8fHx+o1Wqpjb+/P8aMGYOzZ8+iWbNmZe4zLy8PeXl50nutVlvu4yEiIqLni15h58SJEzhx4gQKCgpQv359AMDFixdhbm6OV199VWqnUCgeu52NGzfizz//xNGjR0vVpaenw8rKCo6OjjrlarUa6enpUpsHg05JfUndo4SHh2PWrFmP7RsRERHJg15hp3fv3qhSpQrWrl0rnXq6ffs2RowYgfbt2+PDDz984jZu3LiB8ePHIzY2FjY2Nvp0Q29hYWE6s1NarRbu7u6V2gciIiKqHHqFnSVLlmD37t06a2yqVq2KuXPnolu3buUKO8ePH0dmZqbOTFBRUREOHDiAzz//HL/++ivy8/ORlZWlM7uTkZEBV1dXAICrqyuOHDmis92Sq7VK2pTF2toa1tbW5TpWMjw+1oGIiCqTXmFHq9Xi5s2bpcpv3ryJO3fulGsbXbt2xenTp3XKRowYgQYNGmDKlClwd3eHpaUl4uLiMGDAAABAUlISUlJSoNFoAAAajQbz5s1DZmYmXFxcAACxsbFQKpXw9vbW59CIiIieXVlXNpHR6BV2+vXrhxEjRmDJkiVo2bIlACAhIQGhoaHo379/ubZRpUoVNGrUSKfM3t4ezs7OUvmoUaMwadIkODk5QalU4oMPPoBGo0Hr1q0BAN26dYO3tzfefvttLFy4EOnp6Zg2bRqCg4M5c0NEREQA9Aw7ERERmDx5Mt58800UFBTc35CFBUaNGoVFixYZrHPLli2DmZkZBgwYgLy8PPj7+2PVqlVSvbm5ObZt24YxY8ZAo9HA3t4eQUFBmD17tsH6QERERM83hRBC6Pvhu3fv4sqVKwCAOnXqwN7e3mAdq0xarRYqlQrZ2dlQKpXG7o7smdqanYmv1TN2F4hIbuRyGsvEbypY3u9vvWZ2SqSlpSEtLQ0dOnSAra0thBBPvNycyNSUFb4YgIiI5EOvOyj/888/6Nq1K+rVq4eePXsiLS0NwP01NuW5EouIiIiosugVdiZOnAhLS0ukpKTAzs5OKh88eDB27dplsM4RERERPSu9TmPt3r0bv/76K2rWrKlTXrduXVy/ft0gHSMiIiIyBL1mdu7evaszo1Pi1q1bvOSbiIiITIpeYad9+/ZYt26d9F6hUKC4uBgLFy5E586dDdY5IiIiomel12mshQsXomvXrjh27Bjy8/Px0Ucf4ezZs7h16xYOHjxo6D4SERER6U2vmZ1GjRrh4sWLaNeuHfr06YO7d++if//+OHHiBOrUqWPoPhIRERHp7alndgoKCtC9e3dERETg448/rog+ERERERnMU8/sWFpa4tSpUxXRFyIiIiKD0+s01ltvvYXIyEhD94WIiIjI4PRaoFxYWIivv/4ae/bsQfPmzUs9E2vp0qUG6RwRERHRs3qqsHP16lV4eHjgzJkzePXVVwEAFy/qPleIz8YiIiIiU/JUYadu3bpIS0vDvn37ANx/PMSKFSugVqsrpHNEREREz+qpwo4QQuf9zp07cffuXYN2iIiIiEzEvnDd953DjNOPZ6TXmp0SD4cfIrlaFnuxVNnE1+oZoSdERPS0nupqLIVCUWpNDtfoEBERkSl76tNYw4cPlx72mZubi/fff7/U1VhbtmwxXA+JiIiInsFThZ2goCCd92+99ZZBO0NkKso6bUVERM+npwo7UVFRFdUPIiIiogrxTAuUiZ6EMyRERGRsej0ugoiIiOh5wZkdIiKiZ/HwvWjI5HBmh4iIiGSNYYeIiIhkjaexiPT08OJr3lGZiMg0cWaHiIiIZI1hh4iIiGSNYYeIiIhkjWGHiIiIZI1hh4iIiGSNYYeIiIhkzahhZ/Xq1WjcuDGUSiWUSiU0Gg127twp1efm5iI4OBjOzs5wcHDAgAEDkJGRobONlJQUBAQEwM7ODi4uLggNDUVhYWFlHwoRERGZKKOGnZo1a2LBggU4fvw4jh07hi5duqBPnz44e/YsAGDixIn45ZdfsHnzZuzfvx+pqano37+/9PmioiIEBAQgPz8fhw4dwtq1axEdHY0ZM2YY65CIiIjIxCiEEMLYnXiQk5MTFi1ahIEDB6J69epYv349Bg4cCAC4cOECvLy8EB8fj9atW2Pnzp3o1asXUlNToVarAQARERGYMmUKbt68CSsrq3LtU6vVQqVSITs7G0qlssKO7UX0Ij31nDcVJHpBvUjPxuocZuwe6Cjv97fJrNkpKirCxo0bcffuXWg0Ghw/fhwFBQXw8/OT2jRo0AC1atVCfHw8ACA+Ph4+Pj5S0AEAf39/aLVaaXaIiIiIXmxGf1zE6dOnodFokJubCwcHB2zduhXe3t5ITEyElZUVHB0dddqr1Wqkp6cDANLT03WCTkl9Sd2j5OXlIS8vT3qv1WoNdDQvthdpFoeIiJ4fRg879evXR2JiIrKzs/H9998jKCgI+/fvr9B9hoeHY9asWRW6D3rxlBX2eGqLiMj4jH4ay8rKCq+88gqaN2+O8PBwNGnSBJ999hlcXV2Rn5+PrKwsnfYZGRlwdXUFALi6upa6OqvkfUmbsoSFhSE7O1t63bhxw7AHRURERCbD6GHnYcXFxcjLy0Pz5s1haWmJuLg4qS4pKQkpKSnQaDQAAI1Gg9OnTyMzM1NqExsbC6VSCW9v70fuw9raWrrcveRFRERE8mTU01hhYWHo0aMHatWqhTt37mD9+vX47bff8Ouvv0KlUmHUqFGYNGkSnJycoFQq8cEHH0Cj0aB169YAgG7dusHb2xtvv/02Fi5ciPT0dEybNg3BwcGwtrY25qERERGRiTBq2MnMzMSwYcOQlpYGlUqFxo0b49dff8Vrr70GAFi2bBnMzMwwYMAA5OXlwd/fH6tWrZI+b25ujm3btmHMmDHQaDSwt7dHUFAQZs+ebaxDIiIiIhNjcvfZMQbeZ8cweDVWaVygTPQC4H12jOa5u88OERERUUVg2CEiIiJZY9ghIiIiWWPYISIiIllj2CEiIiJZY9ghIiIiWWPYISIiIlkz+oNAiYiInisv0n11ZIIzO0RERCRrDDtEREQkaww7REREJGsMO0RERCRrDDtEREQkaww7REREJGsMO0RERCRrDDtEREQkaww7REREJGsMO0RERCRrfFwEUSVaFnuxVNnE1+oZoSdERHoo61EZncMqvx9PiTM7REREJGsMO0RERCRrPI1FVIHKOm31pDY8rUVEZFic2SEiIiJZY9ghIiIiWeNpLNJbeU7REBERGRvDDhEREenvObgcnaexiIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNaMGnbCw8PRokULVKlSBS4uLujbty+SkpJ02uTm5iI4OBjOzs5wcHDAgAEDkJGRodMmJSUFAQEBsLOzg4uLC0JDQ1FYWFiZh0JEREQmyqhhZ//+/QgODsbhw4cRGxuLgoICdOvWDXfv3pXaTJw4Eb/88gs2b96M/fv3IzU1Ff3795fqi4qKEBAQgPz8fBw6dAhr165FdHQ0ZsyYYYxDIiIiIhOjEEIIY3eixM2bN+Hi4oL9+/ejQ4cOyM7ORvXq1bF+/XoMHDgQAHDhwgV4eXkhPj4erVu3xs6dO9GrVy+kpqZCrVYDACIiIjBlyhTcvHkTVlZWT9yvVquFSqVCdnY2lEplhR6jnPBxERWDTz0nMnFl3TGYdFXSHZTL+/1tUmt2srOzAQBOTk4AgOPHj6OgoAB+fn5SmwYNGqBWrVqIj48HAMTHx8PHx0cKOgDg7+8PrVaLs2fPlrmfvLw8aLVanRcRERHJk8mEneLiYkyYMAFt27ZFo0aNAADp6emwsrKCo6OjTlu1Wo309HSpzYNBp6S+pK4s4eHhUKlU0svd3d3AR0NERESmwmTCTnBwMM6cOYONGzdW+L7CwsKQnZ0tvW7cuFHh+yQiIiLjMImnnoeEhGDbtm04cOAAatasKZW7uroiPz8fWVlZOrM7GRkZcHV1ldocOXJEZ3slV2uVtHmYtbU1rK2tDXwUREREZIqMOrMjhEBISAi2bt2KvXv3wtPTU6e+efPmsLS0RFxcnFSWlJSElJQUaDQaAIBGo8Hp06eRmZkptYmNjYVSqYS3t3flHAgRERGZLKPO7AQHB2P9+vX46aefUKVKFWmNjUqlgq2tLVQqFUaNGoVJkybByckJSqUSH3zwATQaDVq3bg0A6NatG7y9vfH2229j4cKFSE9Px7Rp0xAcHMzZG3oulXWVG6/QIjISXnklC0YNO6tXrwYAdOrUSac8KioKw4cPBwAsW7YMZmZmGDBgAPLy8uDv749Vq1ZJbc3NzbFt2zaMGTMGGo0G9vb2CAoKwuzZsyvrMIiIiMiEGTXslOcWPzY2Nvjiiy/wxRdfPLJN7dq1sWPHDkN2jYiIiGTCZK7GIiIiIqoIDDtEREQkaww7REREJGsmcZ8dIiIikpGHr2KrpGdlPQpndoiIiEjWGHaIiIhI1ngai8qlrBvdUeUpz/jzxoNERGXjzA4RERHJGsMOERERyRrDDhEREckaww4RERHJGsMOERERyRrDDhEREckaww4RERHJGsMOERERyRpvKkhl4k0Enz9l/cx4o0EiIoYdIiKi/3n4AZYkCzyNRURERLLGsENERESyxtNYRDL28DoeruEhohcRZ3aIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1rhAmegFx0XMRCR3DDtELxDeGZuIXkQMO0Skg4+dICK54ZodIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1o4adAwcOoHfv3nBzc4NCocCPP/6oUy+EwIwZM1CjRg3Y2trCz88Ply5d0mlz69YtBAYGQqlUwtHREaNGjUJOTk4lHgURERGZMqOGnbt376JJkyb44osvyqxfuHAhVqxYgYiICCQkJMDe3h7+/v7Izc2V2gQGBuLs2bOIjY3Ftm3bcODAAYwePbqyDoGIiIhMnFHvs9OjRw/06NGjzDohBJYvX45p06ahT58+AIB169ZBrVbjxx9/xJAhQ3D+/Hns2rULR48eha+vLwBg5cqV6NmzJxYvXgw3N7dKOxYiInrO7As3dg+okpjsTQWTk5ORnp4OPz8/qUylUqFVq1aIj4/HkCFDEB8fD0dHRynoAICfnx/MzMyQkJCAfv36lbntvLw85OXlSe+1Wm3FHchzgHfVJSIiOTPZBcrp6ekAALVarVOuVquluvT0dLi4uOjUW1hYwMnJSWpTlvDwcKhUKunl7u5u4N4TERGRqTDZsFORwsLCkJ2dLb1u3Lhh7C4RERFRBTHZsOPq6goAyMjI0CnPyMiQ6lxdXZGZmalTX1hYiFu3bkltymJtbQ2lUqnzIiIiInky2bDj6ekJV1dXxMXFSWVarRYJCQnQaDQAAI1Gg6ysLBw/flxqs3fvXhQXF6NVq1aV3meiF9my2Is6LyIiU2HUBco5OTm4fPmy9D45ORmJiYlwcnJCrVq1MGHCBMydOxd169aFp6cnpk+fDjc3N/Tt2xcA4OXlhe7du+Pdd99FREQECgoKEBISgiFDhvBKLCIiIgJg5LBz7NgxdO7cWXo/adIkAEBQUBCio6Px0Ucf4e7duxg9ejSysrLQrl077Nq1CzY2NtJnYmJiEBISgq5du8LMzAwDBgzAihUrKv1YiOTs4Zmaia/VM1JPiIienlHDTqdOnSCEeGS9QqHA7NmzMXv27Ee2cXJywvr16yuie0RERCQDJnufHSIyXVyTQ0TPE4adFxC/qIiI6EXCsENEFaKsUM21PkRkDCZ76TkRERGRIXBmh4hMCmeEiMjQOLNDREREssaZHSIikr994cbuARkRww4RVRqeoiIiY+BpLCIiIpI1hh0iIiKSNYYdIiIikjWu2SEio+IdvYmoojHsEJHJ41PXiehZ8DQWERERyRpndojohcVL4YleDAw7RCRLDDJEVIKnsYiIiEjWGHaIiIhI1ngai4ieOzxFRURPg2FH5ngPEyIietEx7BARPaA8s0acWSJ6vjDsENELgzOdRC8mhh0ikgUGGdKxL9zYPSATwrAjM/wHn8jwyvP3io+0IDJdDDtERJWkPKGJIYnI8Bh2iIgqAGdZiUwHbypIREREssaZHSIiE2bIy9y5roheVAw7REQmRN/TX4YKMlxXRHLEsENE9JzR5+owohcZww4RERlcRd5lmqfj6Gkx7BARvaCMPfvDx25QZZFN2Pniiy+waNEipKeno0mTJli5ciVatmxp7G5VKGP/Q0VELyaT+7eHd0umJ5BF2Nm0aRMmTZqEiIgItGrVCsuXL4e/vz+SkpLg4uJi7O4REVE5VVSQir/6T6kyzcvOFbIvMj0KIYQwdieeVatWrdCiRQt8/vnnAIDi4mK4u7vjgw8+wNSpU5/4ea1WC5VKhezsbCiVyorurl5M7n9SREQmonXKl6XKHg4y5Q07D7fTNxAZajuVue0KDYSdwwyznYeU9/v7uZ/Zyc/Px/HjxxEW9r+BNDMzg5+fH+Lj443Ys/LjeWsiIsMq64u7otqUJxCUZzv6Kk9I0Xf/+h5/qUDWWa/dG8xzH3b+/vtvFBUVQa1W65Sr1WpcuHChzM/k5eUhLy9Pep+dnQ3gfkI0tC/2Xi5VFtzlFZ33uXdzSrV5uC9ltSEikrsWf0U9sc3dSujH4+w5m2rkHpRWmX0qz74q4vv1we0+6STVcx929BEeHo5Zs2aVKnd3d6+U/f+fgdoQERE9Fz74vEI3f+fOHahUqkfWP/dhp1q1ajA3N0dGRoZOeUZGBlxdXcv8TFhYGCZNmiS9Ly4uxq1bt+Ds7AyFQlGh/X0WWq0W7u7uuHHjhsmuLXoecVwrBse1YnBcKwbHtWJU9LgKIXDnzh24ubk9tt1zH3asrKzQvHlzxMXFoW/fvgDuh5e4uDiEhISU+Rlra2tYW1vrlDk6OlZwTw1HqVTyL2MF4LhWDI5rxeC4VgyOa8WoyHF93IxOiec+7ADApEmTEBQUBF9fX7Rs2RLLly/H3bt3MWLECGN3jYiIiIxMFmFn8ODBuHnzJmbMmIH09HQ0bdoUu3btKrVomYiIiF48sgg7ABASEvLI01ZyYW1tjU8++aTUKTh6NhzXisFxrRgc14rBca0YpjKusripIBEREdGjmBm7A0REREQViWGHiIiIZI1hh4iIiGSNYYeIiIhkjWHHyMLDw9GiRQtUqVIFLi4u6Nu3L5KSknTa5ObmIjg4GM7OznBwcMCAAQNK3TE6JSUFAQEBsLOzg4uLC0JDQ1FYWFiZh2KyFixYAIVCgQkTJkhlHFP9/fe//8Vbb70FZ2dn2NrawsfHB8eOHZPqhRCYMWMGatSoAVtbW/j5+eHSpUs627h16xYCAwOhVCrh6OiIUaNGISfnxX3+W1FREaZPnw5PT0/Y2tqiTp06mDNnjs7zfjiuT3bgwAH07t0bbm5uUCgU+PHHH3XqDTWGp06dQvv27WFjYwN3d3csXLiwog/NqB43rgUFBZgyZQp8fHxgb28PNzc3DBs2DKmpus/LMvq4CjIqf39/ERUVJc6cOSMSExNFz549Ra1atUROTo7U5v333xfu7u4iLi5OHDt2TLRu3Vq0adNGqi8sLBSNGjUSfn5+4sSJE2LHjh2iWrVqIiwszBiHZFKOHDkiPDw8ROPGjcX48eOlco6pfm7duiVq164thg8fLhISEsTVq1fFr7/+Ki5fviy1WbBggVCpVOLHH38UJ0+eFK+//rrw9PQU//77r9Sme/fuokmTJuLw4cPi999/F6+88ooYOnSoMQ7JJMybN084OzuLbdu2ieTkZLF582bh4OAgPvvsM6kNx/XJduzYIT7++GOxZcsWAUBs3bpVp94QY5idnS3UarUIDAwUZ86cERs2bBC2trZizZo1lXWYle5x45qVlSX8/PzEpk2bxIULF0R8fLxo2bKlaN68uc42jD2uDDsmJjMzUwAQ+/fvF0Lc/0WytLQUmzdvltqcP39eABDx8fFCiPu/iGZmZiI9PV1qs3r1aqFUKkVeXl7lHoAJuXPnjqhbt66IjY0VHTt2lMIOx1R/U6ZMEe3atXtkfXFxsXB1dRWLFi2SyrKysoS1tbXYsGGDEEKIc+fOCQDi6NGjUpudO3cKhUIh/vvf/1Zc501YQECAGDlypE5Z//79RWBgoBCC46qPh7+UDTWGq1atElWrVtX5d2DKlCmifv36FXxEpqGsEPmwI0eOCADi+vXrQgjTGFeexjIx2dnZAAAnJycAwPHjx1FQUAA/Pz+pTYMGDVCrVi3Ex8cDAOLj4+Hj46Nzx2h/f39otVqcPXu2EntvWoKDgxEQEKAzdgDH9Fn8/PPP8PX1xRtvvAEXFxc0a9YMX331lVSfnJyM9PR0nbFVqVRo1aqVztg6OjrC19dXauPn5wczMzMkJCRU3sGYkDZt2iAuLg4XL14EAJw8eRJ//PEHevToAYDjagiGGsP4+Hh06NABVlZWUht/f38kJSXh9u3blXQ0pi07OxsKhUJ65qQpjKts7qAsB8XFxZgwYQLatm2LRo0aAQDS09NhZWVV6kGlarUa6enpUpuHH41R8r6kzYtm48aN+PPPP3H06NFSdRxT/V29ehWrV6/GpEmT8H//9384evQoxo0bBysrKwQFBUljU9bYPTi2Li4uOvUWFhZwcnJ6Ycd26tSp0Gq1aNCgAczNzVFUVIR58+YhMDAQADiuBmCoMUxPT4enp2epbZTUVa1atUL6/7zIzc3FlClTMHToUOnBn6Ywrgw7JiQ4OBhnzpzBH3/8YeyuPNdu3LiB8ePHIzY2FjY2NsbujqwUFxfD19cX8+fPBwA0a9YMZ86cQUREBIKCgozcu+fXd999h5iYGKxfvx4NGzZEYmIiJkyYADc3N44rPTcKCgowaNAgCCGwevVqY3dHB09jmYiQkBBs27YN+/btQ82aNaVyV1dX5OfnIysrS6d9RkYGXF1dpTYPX0lU8r6kzYvk+PHjyMzMxKuvvgoLCwtYWFhg//79WLFiBSwsLKBWqzmmeqpRowa8vb11yry8vJCSkgLgf2NT1tg9OLaZmZk69YWFhbh169YLO7ahoaGYOnUqhgwZAh8fH7z99tuYOHEiwsPDAXBcDcFQY8h/G8pWEnSuX7+O2NhYaVYHMI1xZdgxMiEEQkJCsHXrVuzdu7fUNF7z5s1haWmJuLg4qSwpKQkpKSnQaDQAAI1Gg9OnT+v8MpX8sj38xfQi6Nq1K06fPo3ExETp5evri8DAQOnPHFP9tG3bttStES5evIjatWsDADw9PeHq6qoztlqtFgkJCTpjm5WVhePHj0tt9u7di+LiYrRq1aoSjsL03Lt3D2Zmuv8cm5ubo7i4GADH1RAMNYYajQYHDhxAQUGB1CY2Nhb169d/YU9hlQSdS5cuYc+ePXB2dtapN4lxNcgyZ9LbmDFjhEqlEr/99ptIS0uTXvfu3ZPavP/++6JWrVpi79694tixY0Kj0QiNRiPVl1wm3a1bN5GYmCh27dolqlev/sJfJv2gB6/GEoJjqq8jR44ICwsLMW/ePHHp0iURExMj7OzsxLfffiu1WbBggXB0dBQ//fSTOHXqlOjTp0+Zl/c2a9ZMJCQkiD/++EPUrVv3hbpE+mFBQUHipZdeki4937Jli6hWrZr46KOPpDYc1ye7c+eOOHHihDhx4oQAIJYuXSpOnDghXRVkiDHMysoSarVavP322+LMmTNi48aNws7OTtaXnj9uXPPz88Xrr78uatasKRITE3W+xx68ssrY48qwY2QAynxFRUVJbf79918xduxYUbVqVWFnZyf69esn0tLSdLZz7do10aNHD2FrayuqVasmPvzwQ1FQUFDJR2O6Hg47HFP9/fLLL6JRo0bC2tpaNGjQQHz55Zc69cXFxWL69OlCrVYLa2tr0bVrV5GUlKTT5p9//hFDhw4VDg4OQqlUihEjRog7d+5U5mGYFK1WK8aPHy9q1aolbGxsxMsvvyw+/vhjnS8LjuuT7du3r8x/T4OCgoQQhhvDkydPinbt2glra2vx0ksviQULFlTWIRrF48Y1OTn5kd9j+/btk7Zh7HFVCPHALTqJiIiIZIZrdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaISLY8PDywfPlyY3eDiIyMYYeIHis+Ph7m5uYICAgwdleIiPTCsENEjxUZGYkPPvgABw4cQGpqqrG7I2sPPgSRiAyHYYeIHiknJwebNm3CmDFjEBAQgOjoaJ363377DQqFAnFxcfD19YWdnR3atGlT6snoq1evRp06dWBlZYX69evjm2++0alXKBRYs2YNevXqBTs7O3h5eSE+Ph6XL19Gp06dYG9vjzZt2uDKlSvSZ65cuYI+ffpArVbDwcEBLVq0wJ49ex55LCNHjkSvXr10ygoKCuDi4oLIyMgyP3P9+nX07t0bVatWhb29PRo2bIgdO3ZI9WfPnkWvXr2gVCpRpUoVtG/fXupjcXExZs+ejZo1a8La2hpNmzbFrl27pM9eu3YNCoUCmzZtQseOHWFjY4OYmBgAwH/+8x94eXnBxsYGDRo0wKpVqx55XERUDgZ7yhYRyU5kZKTw9fUVQtx/AGidOnVEcXGxVF/ygMBWrVqJ3377TZw9e1a0b99etGnTRmqzZcsWYWlpKb744guRlJQklixZIszNzcXevXulNgDESy+9JDZt2iSSkpJE3759hYeHh+jSpYvYtWuXOHfunGjdurXo3r279JnExEQREREhTp8+LS5evCimTZsmbGxspCdcCyFE7dq1xbJly4QQQhw8eFCYm5uL1NRUnb7Z29s/8mGZAQEB4rXXXhOnTp0SV65cEb/88ovYv3+/EEKIv/76Szg5OYn+/fuLo0ePiqSkJPH111+LCxcuCCGEWLp0qVAqlWLDhg3iwoUL4qOPPhKWlpbi4sWLQgghPUDRw8ND/PDDD+Lq1asiNTVVfPvtt6JGjRpS2Q8//CCcnJxEdHS0Xj9DIuJTz4noMdq0aSOWL18uhBCioKBAVKtWTedJxiVhZ8+ePVLZ9u3bBQDx77//Stt49913dbb7xhtviJ49e0rvAYhp06ZJ7+Pj4wUAERkZKZVt2LBB2NjYPLa/DRs2FCtXrpTePxh2hBDC29tbfPrpp9L73r17i+HDhz9yez4+PmLmzJll1oWFhQlPT0+Rn59fZr2bm5uYN2+eTlmLFi3E2LFjhRD/Czsl41uiTp06Yv369Tplc+bMERqN5pH9JKLH42ksIipTUlISjhw5gqFDhwIALCwsMHjw4DJP+TRu3Fj6c40aNQAAmZmZAIDz58+jbdu2Ou3btm2L8+fPP3IbarUaAODj46NTlpubC61WC+D+KbbJkyfDy8sLjo6OcHBwwPnz55GSkvLIY3rnnXcQFRUFAMjIyMDOnTsxcuTIR7YfN24c5s6di7Zt2+KTTz7BqVOnpLrExES0b98elpaWpT6n1WqRmpparuP29fWV/nz37l1cuXIFo0aNgoODg/SaO3euzik8Ino6FsbuABGZpsjISBQWFsLNzU0qE0LA2toan3/+OVQqlVT+4Be+QqEAcH/NytMoaxuP2+7kyZMRGxuLxYsX45VXXoGtrS0GDhyI/Pz8R+5j2LBhmDp1KuLj43Ho0CF4enqiffv2j2z/zjvvwN/fH9u3b8fu3bsRHh6OJUuW4IMPPoCtre1THd+j2NvbS3/OyckBAHz11Vdo1aqVTjtzc3OD7I/oRcSZHSIqpbCwEOvWrcOSJUuQmJgovU6ePAk3Nzds2LCh3Nvy8vLCwYMHdcoOHjwIb2/vZ+rjwYMHMXz4cPTr1w8+Pj5wdXXFtWvXHvsZZ2dn9O3bF1FRUYiOjsaIESOeuB93d3e8//772LJlCz788EN89dVXAO7PRP3+++9lXkGlVCrh5ub21MetVqvh5uaGq1ev4pVXXtF5eXp6PrGvRFQ2zuwQUSnbtm3D7du3MWrUKJ0ZHAAYMGAAIiMj8f7775drW6GhoRg0aBCaNWsGPz8//PLLL9iyZctjr5wqj7p162LLli3o3bs3FAoFpk+fXq7ZpHfeeQe9evVCUVERgoKCHtt2woQJ6NGjB+rVq4fbt29j37598PLyAgCEhIRg5cqVGDJkCMLCwqBSqXD48GG0bNkS9evXR2hoKD755BPUqVMHTZs2RVRUFBITE6Urrh5l1qxZGDduHFQqFbp37468vDwcO3YMt2/fxqRJk8o/QEQkYdgholIiIyPh5+dXKugA98POwoULddavPE7fvn3x2WefYfHixRg/fjw8PT0RFRWFTp06PVMfly5dipEjR6JNmzaoVq0apkyZIq3neRw/Pz/UqFEDDRs21DlFV5aioiIEBwfjr7/+glKpRPfu3bFs2TIA92eJ9u7di9DQUHTs2BHm5uZo2rSptE5n3LhxyM7OxocffojMzEx4e3vj559/Rt26dR+7z3feeQd2dnZYtGgRQkNDYW9vDx8fH0yYMKF8A0NEpSiEEMLYnSAiqiw5OTl46aWXEBUVhf79+xu7O0RUCTizQ0QvhOLiYvz9999YsmQJHB0d8frrrxu7S0RUSRh2iOiFkJKSAk9PT9SsWRPR0dGwsOA/f0QvCp7GIiIiIlnjpedEREQkaww7REREJGsMO0RERCRrDDtEREQkaww7REREJGsMO0RERCRrDDtEREQkaww7REREJGsMO0RERCRr/w+QjF6nEbzVpAAAAABJRU5ErkJggg==", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjsAAAHHCAYAAABZbpmkAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAABUZklEQVR4nO3deVwU9f8H8NdyX+4iCLuSB2QeoJgmHivekqhoHphHlKCWpeCtJb+8LzxSS8ujvgSWqGVp5ZmIVynikbeGqSj6lcNSWNHk/Pz+8MF8XQFFXNhlfD0fj308mJnPzLxnQHj5mc/MKIQQAkREREQyZWbsAoiIiIjKE8MOERERyRrDDhEREckaww4RERHJGsMOERERyRrDDhEREckaww4RERHJGsMOERERyRrDDhEREckaww4RFaFQKDBjxgxjl1EpREdHQ6FQ4OrVq9K8Dh06oEOHDhWy/8e/VzNmzIBCocDff/9dIft3d3dHSEhIheyLqKwYdojKYMWKFVAoFGjZsqWxSyGZOHToEGbMmIGMjAxjl1KEKddGVBoWxi6AqDKKiYmBu7s7jhw5gkuXLuGVV14xdklkQnbt2vXM6xw6dAgzZ85ESEgIHB0dS73ev//+CwuL8v1V/qTaEhMTYWbG/zeTaeNPKNEzSkpKwqFDh7BkyRK4uLggJibG2CXRU9y7d69C92dlZQUrK6ty235BQQEePHgAALCxsSn3sPMk1tbWsLS0NNr+iUqDYYfoGcXExKBq1aoICAhAv379ig07V69ehUKhwCeffIIvv/wSderUgbW1NZo3b46jR48Wab9nzx60bdsW9vb2cHR0RK9evXDhwgW9NoVjMS5evIi3334bKpUKLi4umDp1KoQQuH79Onr16gWlUgmNRoPFixfrrZ+Tk4Np06ahWbNmUKlUsLe3R9u2bbF3794nHu/evXuhUCiwefPmIsvWrVsHhUKB+Pj4EtfPzc3FzJkzUbduXdjY2MDZ2Rlt2rRBbGysXrs///wT/fv3h4uLC2xtbVG/fn18/PHHem1OnDiBbt26QalUwsHBAZ07d8bhw4f12hSOodm/fz9GjhwJV1dX1KhRQ1q+Y8cO6VxXqVIFAQEBOHfu3BPPQaFz586hU6dOsLW1RY0aNTBnzhwUFBQUaVfcmJ3ly5ejYcOGsLOzQ9WqVeHj44N169YBePi9nTRpEgDAw8MDCoVCbxyQQqFAWFgYYmJi0LBhQ1hbW2Pnzp3SsuLGV/3999/o378/lEolnJ2dMWbMGCkgAf/7GY2Oji6y7qPbfFptxY3ZuXLlCt588004OTnBzs4OrVq1wrZt2/Ta7Nu3DwqFAt9//z3mzp2LGjVqwMbGBp07d8alS5eK1ET0PHgZi+gZxcTEoG/fvrCyssKgQYOwcuVKHD16FM2bNy/Sdt26dbh79y7ef/99KBQKLFy4EH379sWVK1ek/w3v3r0b3bp1w8svv4wZM2bg33//xfLly+Hr64s//vgD7u7uetscMGAAPD09MX/+fGzbtg1z5syBk5MTVq9ejU6dOmHBggWIiYnBxIkT0bx5c7Rr1w4AoNPp8J///AeDBg3Ce++9h7t37yIyMhL+/v44cuQImjRpUuzxdujQATVr1kRMTAz69OlT5FzUqVMHWq22xPM1Y8YMRERE4N1330WLFi2g0+lw7Ngx/PHHH3j99dcBAKdPn0bbtm1haWmJ4cOHw93dHZcvX8aWLVswd+5cAA+DRtu2baFUKvHhhx/C0tISq1evRocOHbB///4i46dGjhwJFxcXTJs2TerZ+fbbbxEcHAx/f38sWLAA9+/fx8qVK9GmTRucOHGiyLl+VGpqKjp27Ii8vDxMnjwZ9vb2+PLLL2Fra1viOoW++uorjB49Gv369ZNCx+nTp5GQkIC33noLffv2xcWLF7F+/XosXboU1apVAwC4uLhI29izZw++//57hIWFoVq1ak+sFQD69+8Pd3d3RERE4PDhw1i2bBnu3LmDb7755qn1Pqo0tT0qLS0NrVu3xv379zF69Gg4OztjzZo1eOONN/DDDz8U+RmaP38+zMzMMHHiRGRmZmLhwoUICgpCQkLCM9VJ9ESCiErt2LFjAoCIjY0VQghRUFAgatSoIcaMGaPXLikpSQAQzs7O4vbt29L8n3/+WQAQW7ZskeY1adJEuLq6in/++Uead+rUKWFmZiYGDx4szZs+fboAIIYPHy7Ny8vLEzVq1BAKhULMnz9fmn/nzh1ha2srgoOD9dpmZ2fr1Xnnzh2hVqvF0KFD9eYDENOnT5emw8PDhbW1tcjIyJDmpaenCwsLC712xXn11VdFQEDAE9u0a9dOVKlSRVy7dk1vfkFBgfR17969hZWVlbh8+bI07+bNm6JKlSqiXbt20ryoqCgBQLRp00bk5eVJ8+/evSscHR3Fe++9p7eP1NRUoVKpisx/3NixYwUAkZCQIM1LT08XKpVKABBJSUnS/Pbt24v27dtL07169RINGzZ84vYXLVpUZDuFAAgzMzNx7ty5Ypc9+j0o/Dl544039NqNHDlSABCnTp0SQvzvZzQqKuqp23xSbbVr19b7OSs8T7/99ps07+7du8LDw0O4u7uL/Px8IYQQe/fuFQCEp6en3s/lZ599JgCIM2fOFNkXUVnxMhbRM4iJiYFarUbHjh0BPOzuHzBgADZs2ID8/Pwi7QcMGICqVatK023btgXwsJsfAFJSUnDy5EmEhITAyclJate4cWO8/vrr2L59e5Ftvvvuu9LX5ubm8PHxgRACw4YNk+Y7Ojqifv360n4K2xaOIykoKMDt27eRl5cHHx8f/PHHH0887sGDByM7Oxs//PCDNO+7775DXl4e3n777Seu6+joiHPnzuGvv/4qdvmtW7dw4MABDB06FLVq1dJbplAoAAD5+fnYtWsXevfujZdffllaXr16dbz11lv4/fffodPp9NZ97733YG5uLk3HxsYiIyMDgwYNwt9//y19zM3N0bJly6deztu+fTtatWqFFi1aSPNcXFwQFBT0xPUKz8GNGzeKvYRZWu3bt4eXl1ep24eGhupNjxo1CgCK/ZkypO3bt6NFixZo06aNNM/BwQHDhw/H1atXcf78eb32Q4YM0Rvf9Pi/ESJDYNghKqX8/Hxs2LABHTt2RFJSEi5duoRLly6hZcuWSEtLQ1xcXJF1Hv/jXRh87ty5AwC4du0aAKB+/fpF1vX09MTff/9dZHDt49tUqVSwsbGRLi88Or9wP4XWrFmDxo0bS2NnXFxcsG3bNmRmZj7x2Bs0aIDmzZvrjU+KiYlBq1atnnon2qxZs5CRkYF69erB29sbkyZNwunTp6XlhX/UGjVqVOI2bt26hfv375d4ngoKCnD9+nW9+R4eHnrThWGrU6dOcHFx0fvs2rUL6enpTzyOa9euoW7dukXmF1fT4z766CM4ODigRYsWqFu3LkJDQ3Hw4MGnrveox4/naR6vtU6dOjAzM9N7HlB5uHbtWonfp8Llj3ravxEiQ+CYHaJS2rNnD1JSUrBhwwZs2LChyPKYmBh06dJFb96jPQuPEkKUuY7itlma/axduxYhISHo3bs3Jk2aBFdXV5ibmyMiIgKXL19+6n4HDx6MMWPG4MaNG8jOzsbhw4fx+eefP3W9du3a4fLly/j555+xa9cu/Oc//8HSpUuxatUqvV4qQ3t8LE3hQOJvv/0WGo2mSPvyvKPJ09MTiYmJ2Lp1K3bu3Ikff/wRK1aswLRp0zBz5sxSbaM0Y4OepLCXrKTpQsX1UJan8vg3QvQ4hh2iUoqJiYGrqyu++OKLIss2bdqEzZs3Y9WqVc/0R6l27doAHj6r5HF//vknqlWrBnt7+7IX/YgffvgBL7/8MjZt2qT3h2769OmlWn/gwIEYP3481q9fj3///ReWlpYYMGBAqdZ1cnLCkCFDMGTIEGRlZaFdu3aYMWMG3n33Xemy1NmzZ0tc38XFBXZ2diWeJzMzM9SsWfOJNdSpUwcA4OrqCj8/v1LV/ajatWsXeymuuJqKY29vjwEDBmDAgAHIyclB3759MXfuXISHh8PGxqbE8FFWf/31l15v0KVLl1BQUCANbC7sQXn8QYGP97wAJQej4tSuXbvE71PhcqKKxstYRKXw77//YtOmTejRowf69etX5BMWFoa7d+/il19+eabtVq9eHU2aNMGaNWv0/uicPXsWu3btQvfu3Q12DIX/g370f8wJCQlPvG38UdWqVUO3bt2wdu1axMTEoGvXrkUunRXnn3/+0Zt2cHDAK6+8guzsbAAPg0y7du3w9ddfIzk5Wa9tYa3m5ubo0qULfv75Z73LMGlpaVi3bh3atGkDpVL5xDr8/f2hVCoxb9485ObmFll+69atJ67fvXt3HD58GEeOHNFbpzTPWXr8HFhZWcHLywtCCKmWwlBrqKcUPx7Kly9fDgDo1q0bAECpVKJatWo4cOCAXrsVK1YU2daz1Na9e3ccOXJE7+fq3r17+PLLL+Hu7v5M446IDIU9O0Sl8Msvv+Du3bt44403il3eqlUr6QGDpe3tKLRo0SJ069YNWq0Ww4YNk249V6lUBn0/VY8ePbBp0yb06dMHAQEBSEpKwqpVq+Dl5YWsrKxSbWPw4MHo168fAGD27NmlWsfLywsdOnRAs2bN4OTkhGPHjuGHH35AWFiY1GbZsmVo06YNXnvtNQwfPhweHh64evUqtm3bhpMnTwIA5syZg9jYWLRp0wYjR46EhYUFVq9ejezsbCxcuPCpdSiVSqxcuRLvvPMOXnvtNQwcOBAuLi5ITk7Gtm3b4Ovr+8TLch9++CG+/fZbdO3aFWPGjJFuPa9du7beGKTidOnSBRqNBr6+vlCr1bhw4QI+//xzBAQEoEqVKgCAZs2aAQA+/vhjDBw4EJaWlujZs2eZe/aSkpLwxhtvoGvXroiPj8fatWvx1ltv4dVXX5XavPvuu5g/fz7effdd+Pj44MCBA7h48WKRbT1LbZMnT8b69evRrVs3jB49Gk5OTlizZg2SkpLw448/8mnLZBxGvBOMqNLo2bOnsLGxEffu3SuxTUhIiLC0tBR///23dFvvokWLirTDY7f1CiHE7t27ha+vr7C1tRVKpVL07NlTnD9/Xq9N4S3Ft27d0psfHBws7O3ti+ynffv2erc7FxQUiHnz5onatWsLa2tr0bRpU7F161YRHBwsateu/dQahRAiOztbVK1aVahUKvHvv/+WeC4eNWfOHNGiRQvh6OgobG1tRYMGDcTcuXNFTk6OXruzZ8+KPn36CEdHR2FjYyPq168vpk6dqtfmjz/+EP7+/sLBwUHY2dmJjh07ikOHDum1Kbz1/OjRo8XWs3fvXuHv7y9UKpWwsbERderUESEhIeLYsWNPPZbTp0+L9u3bCxsbG/HSSy+J2bNni8jIyKfeer569WrRrl074ezsLKytrUWdOnXEpEmTRGZmpt72Z8+eLV566SVhZmamt00AIjQ0tNiaHv9eFf6cnD9/XvTr109UqVJFVK1aVYSFhRX5nt2/f18MGzZMqFQqUaVKFdG/f3+Rnp5e7Pe/pNoev/VcCCEuX74s+vXrJ30vW7RoIbZu3arXpvDW840bN+rNf9It8URlpRCCo8CIqHTy8vLg5uaGnj17IjIy0tjlEBGVCvsTiajUfvrpJ9y6dQuDBw82dilERKXGnh0ieqqEhAScPn0as2fPRrVq1Z76EEIiIlPCnh0ieqqVK1dixIgRcHV1feZ3KxERGRt7doiIiEjW2LNDREREssawQ0RERLLGhwri4Ttzbt68iSpVqhj8ke1ERERUPoQQuHv3Ltzc3J74wEqGHQA3b9586nt1iIiIyDRdv34dNWrUKHG5UcNOfn4+ZsyYgbVr1yI1NRVubm4ICQnBlClTpB4WIQSmT5+Or776ChkZGfD19cXKlStRt25daTu3b9/GqFGjsGXLFpiZmSEwMBCfffYZHBwcSlVH4ePar1+//tT36xAREZFp0Ol0qFmzpvR3vCRGDTsLFizAypUrsWbNGjRs2BDHjh3DkCFDoFKpMHr0aADAwoULsWzZMqxZswYeHh6YOnUq/P39cf78edjY2AAAgoKCkJKSgtjYWOTm5mLIkCEYPnw41q1bV6o6CoOVUqlk2CEiIqpknjYExai3nvfo0QNqtVrvsfOBgYGwtbXF2rVrIYSAm5sbJkyYgIkTJwIAMjMzoVarER0djYEDB+LChQvw8vLC0aNH4ePjAwDYuXMnunfvjhs3bsDNze2pdeh0OqhUKmRmZjLsEBERVRKl/ftt1LuxWrdujbi4OOktu6dOncLvv/+Obt26AXj41t7U1FT4+flJ66hUKrRs2RLx8fEAgPj4eDg6OkpBBwD8/PxgZmaGhISEYvebnZ0NnU6n9yEiIiJ5MuplrMmTJ0On06FBgwYwNzdHfn4+5s6di6CgIABAamoqAECtVuutp1arpWWpqalwdXXVW25hYQEnJyepzeMiIiIwc+ZMQx8OERERmSCjhp3vv/8eMTExWLduHRo2bIiTJ09i7NixcHNzQ3BwcLntNzw8HOPHj5emCwc4PUlBQQFycnLKrSYyLZaWljA3Nzd2GUREZABGDTuTJk3C5MmTMXDgQACAt7c3rl27hoiICAQHB0Oj0QAA0tLSUL16dWm9tLQ0NGnSBACg0WiQnp6ut928vDzcvn1bWv9x1tbWsLa2LnWdOTk5SEpKQkFBwbMcHlVyjo6O0Gg0fPYSEVElZ9Swc//+/SIPATI3N5dChYeHBzQaDeLi4qRwo9PpkJCQgBEjRgAAtFotMjIycPz4cTRr1gwAsGfPHhQUFKBly5bPXaMQAikpKTA3N0fNmjWf+NAikgchBO7fvy+F6EeDNhERVT5GDTs9e/bE3LlzUatWLTRs2BAnTpzAkiVLMHToUAAPbyUbO3Ys5syZg7p160q3nru5uaF3794AAE9PT3Tt2hXvvfceVq1ahdzcXISFhWHgwIGluhPrafLy8nD//n24ubnBzs7uubdHlYOtrS0AID09Ha6urrykRURUiRk17CxfvhxTp07FyJEjkZ6eDjc3N7z//vuYNm2a1ObDDz/EvXv3MHz4cGRkZKBNmzbYuXOn9IwdAIiJiUFYWBg6d+4sPVRw2bJlBqkxPz8fAGBlZWWQ7VHlURhuc3NzGXaIiCoxoz5nx1Q86T79Bw8eICkpCR4eHnoBi+SP33siItNWKZ6zQ0RERFTeGHbIZOzbtw8KhQIZGRnGLoWIiGSEbz0vo6WxFyt0f+Ner/dM7UNCQrBmzRpERERg8uTJ0vyffvoJffr0Aa9eEhHRi4I9OzJmY2ODBQsW4M6dOwbbJh+sSERElQ3Djoz5+flBo9EgIiKixDY//vgjGjZsCGtra7i7u2Px4sV6y93d3TF79mwMHjwYSqUSw4cPR3R0NBwdHbF161bUr18fdnZ26NevH+7fv481a9bA3d0dVatWxejRo6W72QDg22+/hY+PD6pUqQKNRoO33nqryAMhiYiIDI1hR8bMzc0xb948LF++HDdu3Ciy/Pjx4+jfvz8GDhyIM2fOYMaMGZg6dSqio6P12n3yySd49dVXceLECUydOhXAwwdCLlu2DBs2bMDOnTuxb98+9OnTB9u3b8f27dvx7bffYvXq1fjhhx+k7eTm5mL27Nk4deoUfvrpJ1y9ehUhISHleQqIiIg4Zkfu+vTpgyZNmmD69OmIjIzUW7ZkyRJ07txZCjD16tXD+fPnsWjRIr0Q0qlTJ0yYMEGa/u2335Cbm4uVK1eiTp06AIB+/frh22+/RVpaGhwcHODl5YWOHTti7969GDBgAABID4sEgJdffhnLli1D8+bNkZWVBQcHh/I6BURE8rL3sd76juHGqaMSYc/OC2DBggVYs2YNLly4oDf/woUL8PX11Zvn6+uLv/76S+/yk4+PT5Ft2tnZSUEHePgmend3d73Qolar9S5THT9+HD179kStWrVQpUoVtG/fHgCQnJz8fAdIRET0BAw7L4B27drB398f4eFlS//29vZF5llaWupNKxSKYucVvufs3r178Pf3h1KpRExMDI4ePYrNmzcD4KBnIiIqX7yM9YKYP38+mjRpgvr160vzPD09cfDgQb12Bw8eRL169Qz+eoQ///wT//zzD+bPn4+aNWsCAI4dO2bQfRARERWHPTsvCG9vbwQFBem9M2zChAmIi4vD7NmzcfHiRaxZswaff/45Jk6caPD916pVC1ZWVli+fDmuXLmCX375BbNnzzb4foiIiB7HsPMCmTVrlnRZCQBee+01fP/999iwYQMaNWqEadOmYdasWeVyh5SLiwuio6OxceNGeHl5Yf78+fjkk08Mvh8iIqLH8UWg4ItAqXj83hORSeLdWJLSvgiUY3aIiIhMweMhBnihg4whMewQERGZquICED0zjtkhIiIiWWPYISIiIllj2CEiIiJZY9ghIiIiWeMAZSIiosqMd3E9FXt2iIiISNYYdoiIiEjWGHbIJLm7u+PTTz81dhlERCQDHLNTVhX9oKcyXn+Nj49HmzZt0LVrV2zbts3ARREREZk+9uzIXGRkJEaNGoUDBw7g5s2bxi6HiIiowjHsyFhWVha+++47jBgxAgEBAYiOjpaW7du3DwqFAnFxcfDx8YGdnR1at26NxMREvW2sXLkSderUgZWVFerXr49vv/1Wb7lCocDq1avRo0cP2NnZwdPTE/Hx8bh06RI6dOgAe3t7tG7dGpcvX5bWuXz5Mnr16gW1Wg0HBwc0b94cu3fvfuKxJCcno1evXnBwcIBSqUT//v2RlpYmLQ8JCUHv3r311hk7diw6dOggTf/www/w9vaGra0tnJ2d4efnh3v37pXybBIRUWXFsCNj33//PRo0aID69evj7bffxtdff43HX3L/8ccfY/HixTh27BgsLCwwdOhQadnmzZsxZswYTJgwAWfPnsX777+PIUOGYO/evXrbmD17NgYPHoyTJ0+iQYMGeOutt/D+++8jPDwcx44dgxACYWFhUvusrCx0794dcXFxOHHiBLp27YqePXsiOTm52OMoKChAr169cPv2bezfvx+xsbG4cuUKBgwYUOpzkZKSgkGDBmHo0KG4cOEC9u3bh759+xY5H0REJD8csyNjkZGRePvttwEAXbt2RWZmJvbv36/X2zF37ly0b98eADB58mQEBATgwYMHsLGxwSeffIKQkBCMHDkSADB+/HgcPnwYn3zyCTp27ChtY8iQIejfvz8A4KOPPoJWq8XUqVPh7+8PABgzZgyGDBkitX/11Vfx6quvStOzZ8/G5s2b8csvv+iFokJxcXE4c+YMkpKSULNmTQDAN998g4YNG+Lo0aNo3rz5U89FSkoK8vLy0LdvX9SuXRsA4O3t/fSTSERElR57dmQqMTERR44cwaBBgwAAFhYWGDBgACIjI/XaNW7cWPq6evXqAID09HQAwIULF+Dr66vX3tfXFxcuXChxG2q1GoB+kFCr1Xjw4AF0Oh2Ahz07EydOhKenJxwdHeHg4IALFy6U2LNz4cIF1KxZUwo6AODl5QVHR8citZTk1VdfRefOneHt7Y0333wTX331Fe7cuVOqdYmIqHJj2JGpyMhI5OXlwc3NDRYWFrCwsMDKlSvx448/IjMzU2pnaWkpfa1QKAA8vGz0LIrbxpO2O3HiRGzevBnz5s3Db7/9hpMnT8Lb2xs5OTnPeJT/Y2ZmVuSSVG5urvS1ubk5YmNjsWPHDnh5eWH58uWoX78+kpKSyrxPIiKqHBh2ZCgvLw/ffPMNFi9ejJMnT0qfU6dOwc3NDevXry/Vdjw9PXHw4EG9eQcPHoSXl9dz1Xfw4EGEhISgT58+8Pb2hkajwdWrV59Yx/Xr13H9+nVp3vnz55GRkSHV4uLigpSUFL31Tp48qTetUCjg6+uLmTNn4sSJE7CyssLmzZuf61iIiMj0ccyODG3duhV37tzBsGHDoFKp9JYFBgYiMjISixYteup2Jk2ahP79+6Np06bw8/PDli1bsGnTpqfeOfU0devWxaZNm9CzZ08oFApMnTr1ib1Jfn5+8Pb2RlBQED799FPk5eVh5MiRaN++PXx8fAAAnTp1wqJFi/DNN99Aq9Vi7dq1OHv2LJo2bQoASEhIQFxcHLp06QJXV1ckJCTg1q1b8PT0fK5jISIi08eeHRmKjIyEn59fkaADPAw7x44dw+nTp5+6nd69e+Ozzz7DJ598goYNG2L16tWIiorSG+BcFkuWLEHVqlXRunVr9OzZE/7+/njttddKbK9QKPDzzz+jatWqaNeuHfz8/PDyyy/ju+++k9r4+/tj6tSp+PDDD9G8eXPcvXsXgwcPlpYrlUocOHAA3bt3R7169TBlyhQsXrwY3bp1e65jISIi06cQvPcWOp0OKpUKmZmZUCqVessePHiApKQkeHh4wMbGxkgVkjHwe09EFcqQT+Z/Qd56/qS/348yas+Ou7s7FApFkU9oaCiAh39sQkND4ezsDAcHBwQGBuo9SA54+LC5gIAA2NnZwdXVFZMmTUJeXp4xDoeIiIhMkFHDztGjR5GSkiJ9YmNjAQBvvvkmAGDcuHHYsmULNm7ciP379+PmzZvo27evtH5+fj4CAgKQk5ODQ4cOYc2aNYiOjsa0adOMcjxERERkeowadlxcXKDRaKTP1q1bUadOHbRv3x6ZmZmIjIzEkiVL0KlTJzRr1gxRUVE4dOgQDh8+DADYtWsXzp8/j7Vr16JJkybo1q0bZs+ejS+++OK5bmMmIiIi+TCZAco5OTlYu3Ythg4dCoVCgePHjyM3Nxd+fn5SmwYNGqBWrVqIj48H8PCN3t7e3tKD7ICHA1V1Oh3OnTtX4cdAREREpsdkbj3/6aefkJGRgZCQEABAamoqrKys4OjoqNdOrVYjNTVVavNo0ClcXrisJNnZ2cjOzpamC5/s+yQcx/3i4feciEgeTKZnJzIyEt26dYObm1u57ysiIgIqlUr6PPoagseZm5sDAC+LvYDu378PQP9p0EREVPmYRM/OtWvXsHv3bmzatEmap9FokJOTg4yMDL3enbS0NGg0GqnNkSNH9LZVeLdWYZvihIeHY/z48dK0TqcrMfBYWFjAzs4Ot27dgqWlJczMTCYfUjkRQuD+/ftIT0+Ho6OjFHiJiKhyMomwExUVBVdXVwQEBEjzmjVrBktLS8TFxSEwMBDAw5dbJicnQ6vVAgC0Wi3mzp2L9PR0uLq6AgBiY2OhVCqf+EoDa2trWFtbl6o2hUKB6tWrIykpCdeuXSvrIVIl5Ojo+MTQTERElYPRw05BQQGioqIQHBwMC4v/laNSqTBs2DCMHz8eTk5OUCqVGDVqFLRaLVq1agUA6NKlC7y8vPDOO+9g4cKFSE1NxZQpUxAaGlrqMFMaVlZWqFu3Li9lvUAsLS3Zo0NEJBNGDzu7d+9GcnIyhg4dWmTZ0qVLYWZmhsDAQGRnZ8Pf3x8rVqyQlpubm2Pr1q0YMWIEtFot7O3tERwcjFmzZhm8TjMzMz5Fl4iIqBLi6yJQ+sdNExERlRu+LuKZVYrXRRARERGVN4YdIiIikjWjj9khIiIiA3v8ktgLclmrJOzZISIiIllj2CEiIiJZY9ghIiIiWWPYISIiIllj2CEiIiJZY9ghIiIiWWPYISIiIllj2CEiIiJZY9ghIiIiWWPYISIiIllj2CEiIiJZY9ghIiIiWWPYISIiIllj2CEiIiJZY9ghIiIiWWPYISIiIllj2CEiIiJZszB2AURERC+kvRHGruCFwZ4dIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI13o1FREQkd8Xd+dUxvOLrMBL27BAREZGsMewQERGRrDHsEBERkawx7BAREZGsMewQERGRrDHsEBERkawx7BAREZGsMewQERGRrBk97Pz3v//F22+/DWdnZ9ja2sLb2xvHjh2TlgshMG3aNFSvXh22trbw8/PDX3/9pbeN27dvIygoCEqlEo6Ojhg2bBiysrIq+lCIiIjIBBk17Ny5cwe+vr6wtLTEjh07cP78eSxevBhVq1aV2ixcuBDLli3DqlWrkJCQAHt7e/j7++PBgwdSm6CgIJw7dw6xsbHYunUrDhw4gOHDhxvjkIiIiMjEKIQQwlg7nzx5Mg4ePIjffvut2OVCCLi5uWHChAmYOHEiACAzMxNqtRrR0dEYOHAgLly4AC8vLxw9ehQ+Pj4AgJ07d6J79+64ceMG3NzcnlqHTqeDSqVCZmYmlEql4Q6QiIioJMW9wqEiyeB1EaX9+23Unp1ffvkFPj4+ePPNN+Hq6oqmTZviq6++kpYnJSUhNTUVfn5+0jyVSoWWLVsiPj4eABAfHw9HR0cp6ACAn58fzMzMkJCQUOx+s7OzodPp9D5EREQkT0YNO1euXMHKlStRt25d/PrrrxgxYgRGjx6NNWvWAABSU1MBAGq1Wm89tVotLUtNTYWrq6vecgsLCzg5OUltHhcREQGVSiV9atasaehDIyIiIhNh1LBTUFCA1157DfPmzUPTpk0xfPhwvPfee1i1alW57jc8PByZmZnS5/r16+W6PyIiIjIeo4ad6tWrw8vLS2+ep6cnkpOTAQAajQYAkJaWptcmLS1NWqbRaJCenq63PC8vD7dv35baPM7a2hpKpVLvQ0RERPJk1LDj6+uLxMREvXkXL15E7dq1AQAeHh7QaDSIi4uTlut0OiQkJECr1QIAtFotMjIycPz4canNnj17UFBQgJYtW1bAURAREZEpszDmzseNG4fWrVtj3rx56N+/P44cOYIvv/wSX375JQBAoVBg7NixmDNnDurWrQsPDw9MnToVbm5u6N27N4CHPUFdu3aVLn/l5uYiLCwMAwcOLNWdWERERCRvRg07zZs3x+bNmxEeHo5Zs2bBw8MDn376KYKCgqQ2H374Ie7du4fhw4cjIyMDbdq0wc6dO2FjYyO1iYmJQVhYGDp37gwzMzMEBgZi2bJlxjgkIiIiMjFGfc6OqeBzdoiIqMLxOTvPrVI8Z4eIiIiovDHsEBERkawZdcwOERHRC8HYl6xecOzZISIiIllj2CEiIiJZY9ghIiIiWWPYISIiIllj2CEiIiJZY9ghIiIiWWPYISIiIllj2CEiIiJZY9ghIiIiWWPYISIiIllj2CEiIiJZY9ghIiIiWWPYISIiIllj2CEiIiJZY9ghIiIiWWPYISIiIllj2CEiIiJZszB2AURERGQEeyP0pzuGG6eOCsCeHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1o4adGTNmQKFQ6H0aNGggLX/w4AFCQ0Ph7OwMBwcHBAYGIi0tTW8bycnJCAgIgJ2dHVxdXTFp0iTk5eVV9KEQERGRibIwdgENGzbE7t27pWkLi/+VNG7cOGzbtg0bN26ESqVCWFgY+vbti4MHDwIA8vPzERAQAI1Gg0OHDiElJQWDBw+GpaUl5s2bV+HHQkRERKbH6GHHwsICGo2myPzMzExERkZi3bp16NSpEwAgKioKnp6eOHz4MFq1aoVdu3bh/Pnz2L17N9RqNZo0aYLZs2fjo48+wowZM2BlZVXRh0NEREQmxuhjdv766y+4ubnh5ZdfRlBQEJKTkwEAx48fR25uLvz8/KS2DRo0QK1atRAfHw8AiI+Ph7e3N9RqtdTG398fOp0O586dq9gDISIiIpNk1J6dli1bIjo6GvXr10dKSgpmzpyJtm3b4uzZs0hNTYWVlRUcHR311lGr1UhNTQUApKam6gWdwuWFy0qSnZ2N7OxsaVqn0xnoiIiIiMjUGDXsdOvWTfq6cePGaNmyJWrXro3vv/8etra25bbfiIgIzJw5s9y2T0RERKbD6JexHuXo6Ih69erh0qVL0Gg0yMnJQUZGhl6btLQ0aYyPRqMpcndW4XRx44AKhYeHIzMzU/pcv37dsAdCREREJsOkwk5WVhYuX76M6tWro1mzZrC0tERcXJy0PDExEcnJydBqtQAArVaLM2fOID09XWoTGxsLpVIJLy+vEvdjbW0NpVKp9yEiIiJ5MuplrIkTJ6Jnz56oXbs2bt68ienTp8Pc3ByDBg2CSqXCsGHDMH78eDg5OUGpVGLUqFHQarVo1aoVAKBLly7w8vLCO++8g4ULFyI1NRVTpkxBaGgorK2tjXloREREZCKMGnZu3LiBQYMG4Z9//oGLiwvatGmDw4cPw8XFBQCwdOlSmJmZITAwENnZ2fD398eKFSuk9c3NzbF161aMGDECWq0W9vb2CA4OxqxZs4x1SERERGRiFEIIYewijE2n00GlUiEzM5OXtIiIyPD2Rhi7gqfrGG7sCp5Zaf9+m9SYHSIiIiJDY9ghIiIiWWPYISIiIllj2CEiIiJZK1PYuXLliqHrICIiIioXZQo7r7zyCjp27Ii1a9fiwYMHhq6JiIiIyGDKFHb++OMPNG7cGOPHj4dGo8H777+PI0eOGLo2IiIioudWprDTpEkTfPbZZ7h58ya+/vprpKSkoE2bNmjUqBGWLFmCW7duGbpOIiIiojJ5rgHKFhYW6Nu3LzZu3IgFCxbg0qVLmDhxImrWrInBgwcjJSXFUHUSERERlclzhZ1jx45h5MiRqF69OpYsWYKJEyfi8uXLiI2Nxc2bN9GrVy9D1UlERERUJmV6N9aSJUsQFRWFxMREdO/eHd988w26d+8OM7OH2cnDwwPR0dFwd3c3ZK1EREREz6xMYWflypUYOnQoQkJCUL169WLbuLq6IjIy8rmKIyIiInpefBEo+CJQIiIqZ5XhRaDFMfGXg5bri0CjoqKwcePGIvM3btyINWvWlGWTREREROWiTGEnIiIC1apVKzLf1dUV8+bNe+6iiIiIiAylTGEnOTkZHh4eRebXrl0bycnJz10UERERkaGUKey4urri9OnTReafOnUKzs7Oz10UERERkaGUKewMGjQIo0ePxt69e5Gfn4/8/Hzs2bMHY8aMwcCBAw1dIxEREVGZlenW89mzZ+Pq1avo3LkzLCwebqKgoACDBw/mmB0iIiIyKWUKO1ZWVvjuu+8we/ZsnDp1Cra2tvD29kbt2rUNXR8RERHRcylT2ClUr1491KtXz1C1EBERERlcmcJOfn4+oqOjERcXh/T0dBQUFOgt37Nnj0GKIyIiInpeZQo7Y8aMQXR0NAICAtCoUSMoFApD10VERERkEGUKOxs2bMD333+P7t27G7oeIiIiIoMq8wDlV155xdC10AtiaexFvelxr3PcFxERlZ8yPWdnwoQJ+Oyzz8B3iBIREZGpK1PPzu+//469e/dix44daNiwISwtLfWWb9q0ySDFERERET2vMoUdR0dH9OnTx9C1EJXo8UtfAC9/EZEJ2xth7AroEWUKO1FRUYaug15gDDJERFSeyjRmBwDy8vKwe/durF69Gnfv3gUA3Lx5E1lZWQYrjoiIiOh5laln59q1a+jatSuSk5ORnZ2N119/HVWqVMGCBQuQnZ2NVatWGbpOIiIiojIpU8/OmDFj4OPjgzt37sDW1laa36dPH8TFxRmsOCIiIqLnVaaend9++w2HDh2ClZWV3nx3d3f897//NUhhRERERIZQpp6dgoIC5OfnF5l/48YNVKlS5bmLIiIiIjKUMoWdLl264NNPP5WmFQoFsrKyMH36dL5CgoiIiExKmcLO4sWLcfDgQXh5eeHBgwd46623pEtYCxYsKFMh8+fPh0KhwNixY6V5Dx48QGhoKJydneHg4IDAwECkpaXprZecnIyAgADY2dnB1dUVkyZNQl5eXplqICIiIvkp05idGjVq4NSpU9iwYQNOnz6NrKwsDBs2DEFBQXoDlkvr6NGjWL16NRo3bqw3f9y4cdi2bRs2btwIlUqFsLAw9O3bFwcPHgQA5OfnIyAgABqNBocOHUJKSgoGDx4MS0tLzJs3ryyHRkRERDJTprADABYWFnj77befu4CsrCwEBQXhq6++wpw5c6T5mZmZiIyMxLp169CpUycADx9m6OnpicOHD6NVq1bYtWsXzp8/j927d0OtVqNJkyaYPXs2PvroI8yYMaPIAGoiIiJ68ZQp7HzzzTdPXD548OBSbys0NBQBAQHw8/PTCzvHjx9Hbm4u/Pz8pHkNGjRArVq1EB8fj1atWiE+Ph7e3t5Qq9VSG39/f4wYMQLnzp1D06ZNi91ndnY2srOzpWmdTlfqeomIiKhyKVPYGTNmjN50bm4u7t+/DysrK9jZ2ZU67GzYsAF//PEHjh49WmRZamoqrKys4OjoqDdfrVYjNTVVavNo0ClcXrisJBEREZg5c2apaiQiIqLKrUwDlO/cuaP3ycrKQmJiItq0aYP169eXahvXr1/HmDFjEBMTAxsbm7KUUWbh4eHIzMyUPtevX6/Q/RMREVHFKfO7sR5Xt25dzJ8/v0ivT0mOHz+O9PR0vPbaa7CwsICFhQX279+PZcuWwcLCAmq1Gjk5OcjIyNBbLy0tDRqNBgCg0WiK3J1VOF3YpjjW1tZQKpV6HyIiIpIng4Ud4OGg5Zs3b5aqbefOnXHmzBmcPHlS+vj4+CAoKEj62tLSUu/1E4mJiUhOToZWqwUAaLVanDlzBunp6VKb2NhYKJVKeHl5GfLQiIiIqJIq05idX375RW9aCIGUlBR8/vnn8PX1LdU2qlSpgkaNGunNs7e3h7OzszR/2LBhGD9+PJycnKBUKjFq1ChotVq0atUKwMOHG3p5eeGdd97BwoULkZqaiilTpiA0NBTW1tZlOTQyEUtjLxq7BCIikokyhZ3evXvrTSsUCri4uKBTp05YvHixIeoCACxduhRmZmYIDAxEdnY2/P39sWLFCmm5ubk5tm7dihEjRkCr1cLe3h7BwcGYNWuWwWqgyqO4gDTu9XpGqISIiExJmcJOQUGBoesAAOzbt09v2sbGBl988QW++OKLEtepXbs2tm/fXi71EBERUeVn0DE7RERERKamTD0748ePL3XbJUuWlGUXRERERAZRprBz4sQJnDhxArm5uahfvz4A4OLFizA3N8drr70mtVMoFIapkqgYHMRMRESlUaaw07NnT1SpUgVr1qxB1apVATx80OCQIUPQtm1bTJgwwaBFEhEREZVVmcLO4sWLsWvXLinoAEDVqlUxZ84cdOnShWGHiIhIDvZG6E93DDdOHc+pTGFHp9Ph1q1bRebfunULd+/efe6iqHLird9ERGSKynQ3Vp8+fTBkyBBs2rQJN27cwI0bN/Djjz9i2LBh6Nu3r6FrJCIiIiqzMvXsrFq1ChMnTsRbb72F3NzchxuysMCwYcOwaNEigxZIRERE9DzKFHbs7OywYsUKLFq0CJcvXwYA1KlTB/b29gYtjoiIiOh5PddDBVNSUpCSkoK6devC3t4eQghD1UVERERkEGXq2fnnn3/Qv39/7N27FwqFAn/99RdefvllDBs2DFWrVjXo+7GInsfjg6Y5YJqI6MVTpp6dcePGwdLSEsnJybCzs5PmDxgwADt37jRYcURERETPq0w9O7t27cKvv/6KGjVq6M2vW7curl27ZpDCSB5M7SnHvD2eiOjFU6aenXv37un16BS6ffs2rK2tn7soIiIiIkMpU9hp27YtvvnmG2laoVCgoKAACxcuRMeOHQ1WHBEREdHzKtNlrIULF6Jz5844duwYcnJy8OGHH+LcuXO4ffs2Dh48aOgaiYiIiMqsTGGnUaNGuHjxIj7//HNUqVIFWVlZ6Nu3L0JDQ1G9enVD10hUrkozrojjeoiIKq9nDju5ubno2rUrVq1ahY8//rg8aiIiIiIymGces2NpaYnTp0+XRy1EREREBlemAcpvv/02IiMjDV0LERERkcGVacxOXl4evv76a+zevRvNmjUr8k6sJUuWGKQ4IiIiouf1TGHnypUrcHd3x9mzZ/Haa68BAC5e1B/cqVAoDFcdERER0XN6prBTt25dpKSkYO/evQAevh5i2bJlUKvV5VIcERER0fN6pjE7j7/VfMeOHbh3755BCyIiIiIypDINUC70ePghIiIiMjXPFHYUCkWRMTkco0NERESm7JnG7AghEBISIr3s88GDB/jggw+K3I21adMmw1VIRERE9ByeKewEBwfrTb/99tsGLYaIiIjI0J4p7ERFRZVXHURERETl4rkGKBMRERGZOoYdIiIikrUyvS6CCACWxl58eiMiIrnbG2HsCugp2LNDREREssawQ0RERLLGsENERESyZtSws3LlSjRu3BhKpRJKpRJarRY7duyQlj948AChoaFwdnaGg4MDAgMDkZaWpreN5ORkBAQEwM7ODq6urpg0aRLy8vIq+lCIiIjIRBk17NSoUQPz58/H8ePHcezYMXTq1Am9evXCuXPnAADjxo3Dli1bsHHjRuzfvx83b95E3759pfXz8/MREBCAnJwcHDp0CGvWrEF0dDSmTZtmrEMiIiIiE6MQJvY2TycnJyxatAj9+vWDi4sL1q1bh379+gEA/vzzT3h6eiI+Ph6tWrXCjh070KNHD9y8eRNqtRoAsGrVKnz00Ue4desWrKysSrVPnU4HlUqFzMxMKJXKcjs2uXmR7sYa93o9Y5dARKbqRbobq2O4sSvQU9q/3yYzZic/Px8bNmzAvXv3oNVqcfz4ceTm5sLPz09q06BBA9SqVQvx8fEAgPj4eHh7e0tBBwD8/f2h0+mk3qHiZGdnQ6fT6X2IiIhInoweds6cOQMHBwdYW1vjgw8+wObNm+Hl5YXU1FRYWVnB0dFRr71arUZqaioAIDU1VS/oFC4vXFaSiIgIqFQq6VOzZk3DHhQRERGZDKM/VLB+/fo4efIkMjMz8cMPPyA4OBj79+8v132Gh4dj/Pjx0rROp2PgISIiepriLtmZ2KWt4hg97FhZWeGVV14BADRr1gxHjx7FZ599hgEDBiAnJwcZGRl6vTtpaWnQaDQAAI1GgyNHjuhtr/BurcI2xbG2toa1tbWBj4SIiIhMkdHDzuMKCgqQnZ2NZs2awdLSEnFxcQgMDAQAJCYmIjk5GVqtFgCg1Woxd+5cpKenw9XVFQAQGxsLpVIJLy8vox0DyU9xg7E5aJmIqHIwatgJDw9Ht27dUKtWLdy9exfr1q3Dvn378Ouvv0KlUmHYsGEYP348nJycoFQqMWrUKGi1WrRq1QoA0KVLF3h5eeGdd97BwoULkZqaiilTpiA0NJQ9Nwb2It15RURE8mLUsJOeno7BgwcjJSUFKpUKjRs3xq+//orXX38dALB06VKYmZkhMDAQ2dnZ8Pf3x4oVK6T1zc3NsXXrVowYMQJarRb29vYIDg7GrFmzjHVIREREZGJM7jk7xsDn7Dwde3aK4mUsIgLwYj1npzhGHKBc6Z6zQ0RERFQeGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1iyMXQCZpqWxF41dAhERVQZ7I4rO6xhe8XU8AXt2iIiISNYYdoiIiEjWGHaIiIhI1jhmh6iMHh/XNO71ekaqhIiInoQ9O0RERCRrDDtEREQkaww7REREJGsMO0RERCRrDDtEREQka7wbi4iI6FkU98RgMmns2SEiIiJZY9ghIiIiWeNlLCIDKe7lqXzQIBGR8bFnh4iIiGSNYYeIiIhkjWGHiIiIZI1jdqjYsSZERERywbBDVI74ZnQiIuPjZSwiIiKSNaOGnYiICDRv3hxVqlSBq6srevfujcTERL02Dx48QGhoKJydneHg4IDAwECkpaXptUlOTkZAQADs7Ozg6uqKSZMmIS8vryIPhahUlsZeLPIhIqLyZdSws3//foSGhuLw4cOIjY1Fbm4uunTpgnv37kltxo0bhy1btmDjxo3Yv38/bt68ib59+0rL8/PzERAQgJycHBw6dAhr1qxBdHQ0pk2bZoxDIiIiIhOjEEIIYxdR6NatW3B1dcX+/fvRrl07ZGZmwsXFBevWrUO/fv0AAH/++Sc8PT0RHx+PVq1aYceOHejRowdu3rwJtVoNAFi1ahU++ugj3Lp1C1ZWVk/dr06ng0qlQmZmJpRKZbkeoyli74JxcRwPUSXDd2M9XcfwCtlNaf9+m9SYnczMTACAk5MTAOD48ePIzc2Fn5+f1KZBgwaoVasW4uPjAQDx8fHw9vaWgg4A+Pv7Q6fT4dy5cxVYPREREZkik7kbq6CgAGPHjoWvry8aNWoEAEhNTYWVlRUcHR312qrVaqSmpkptHg06hcsLlxUnOzsb2dnZ0rROpzPUYRAREZGJMZmwExoairNnz+L3338v931FRERg5syZ5b4fIiKiF9Ljl/oq6LJWSUziMlZYWBi2bt2KvXv3okaNGtJ8jUaDnJwcZGRk6LVPS0uDRqOR2jx+d1bhdGGbx4WHhyMzM1P6XL9+3YBHQ0RERKbEqD07QgiMGjUKmzdvxr59++Dh4aG3vFmzZrC0tERcXBwCAwMBAImJiUhOToZWqwUAaLVazJ07F+np6XB1dQUAxMbGQqlUwsvLq9j9Wltbw9rauhyPjKj0+OBBIqLyZdSwExoainXr1uHnn39GlSpVpDE2KpUKtra2UKlUGDZsGMaPHw8nJycolUqMGjUKWq0WrVq1AgB06dIFXl5eeOedd7Bw4UKkpqZiypQpCA0NZaAhIiIi44adlStXAgA6dOigNz8qKgohISEAgKVLl8LMzAyBgYHIzs6Gv78/VqxYIbU1NzfH1q1bMWLECGi1Wtjb2yM4OBizZs2qqMMgIiIiE2b0y1hPY2Njgy+++AJffPFFiW1q166N7du3G7I0IiIikgmTGKBMREREVF4YdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWTOZ1EUT0UHFvoeeDBomIyo49O0RERCRrDDtEREQkaww7REREJGsMO0RERCRrDDtEREQka7wb6wVU3N0+RERUjL0Rxq6ADIA9O0RERCRrDDtEREQkaww7REREJGsMO0RERCRrDDtEREQkaww7REREJGsMO0RERCRrDDtEREQka3yoIFElUNyDIMe9Xs8IlRARVT4MOzLHpyUTEdGLjmGHqJJ6PMiyp4eIqHgMO0QvEF4OI6IXEQcoExERkayxZ4dIJthrQ0RUPPbsEBERkawx7BAREZGs8TIWkYzx0QNEROzZISIiIplj2CEiIiJZY9ghIiIiWWPYISIiIllj2CEiIiJZY9ghIiIiWTNq2Dlw4AB69uwJNzc3KBQK/PTTT3rLhRCYNm0aqlevDltbW/j5+eGvv/7Sa3P79m0EBQVBqVTC0dERw4YNQ1ZWVgUeBREREZkyo4ade/fu4dVXX8UXX3xR7PKFCxdi2bJlWLVqFRISEmBvbw9/f388ePBAahMUFIRz584hNjYWW7duxYEDBzB8+PCKOgSiSm9p7EW9DxGR3Bj1oYLdunVDt27dil0mhMCnn36KKVOmoFevXgCAb775Bmq1Gj/99BMGDhyICxcuYOfOnTh69Ch8fHwAAMuXL0f37t3xySefwM3NrcKOhYiIiEyTyY7ZSUpKQmpqKvz8/KR5KpUKLVu2RHx8PAAgPj4ejo6OUtABAD8/P5iZmSEhIaHEbWdnZ0On0+l9iIiISJ5MNuykpqYCANRqtd58tVotLUtNTYWrq6vecgsLCzg5OUltihMREQGVSiV9atasaeDqiYiIyFSYbNgpT+Hh4cjMzJQ+169fN3ZJREREVE5MNuxoNBoAQFpamt78tLQ0aZlGo0F6erre8ry8PNy+fVtqUxxra2solUq9DxEREcmTyYYdDw8PaDQaxMXFSfN0Oh0SEhKg1WoBAFqtFhkZGTh+/LjUZs+ePSgoKEDLli0rvGYiIiIyPUa9GysrKwuXLl2SppOSknDy5Ek4OTmhVq1aGDt2LObMmYO6devCw8MDU6dOhZubG3r37g0A8PT0RNeuXfHee+9h1apVyM3NRVhYGAYOHMg7sYiIiAiAkcPOsWPH0LFjR2l6/PjxAIDg4GBER0fjww8/xL179zB8+HBkZGSgTZs22LlzJ2xsbKR1YmJiEBYWhs6dO8PMzAyBgYFYtmxZhR8LERERmSaFEEIYuwhj0+l0UKlUyMzMrPTjd/hQOCoP416vZ+wSiIxjb4SxK5CHjuHlstnS/v022TE7RERERIZg1MtYRFQ5PN5jyJ4eIqpM2LNDREREssaeHSIiokIcoyNLDDtE9MyKGwjPS1tEZKoYdiox3nlFpoyBiIhMBcfsEBERkawx7BAREZGsMewQERGRrHHMDhEZBMeQEZGpYs8OERERyRp7doiowvAOLSIyBvbsEBERkayxZ4eIjIrv3SKi8saeHSIiIpI19uwQkUkpzV1dpen94fggIirEsFNJ8LZeIiID40s/XxgMO0QkC2X5DwF7f4heDAw7RFTpsKeTiJ4FBygTERGRrLFnh4joEby0JVMcn/NCY9ghohdGWS9/8VlARJUbL2MRERGRrLFnh4joGfFSF1Hlwp4dIiIikjX27BARmTD2IhE9P4YdIqJyYKjXXhDR82PYISIyAD7Bmch0MewQEZkQPh2ayPAYdoiIZIi9RkT/w7Bjovi/OyIqCX8/lAKfmEyP4K3nREREJGvs2SEiekGY2msvDHapjb049BQMOyaAXdJEVJmV9neYscMVvbhkE3a++OILLFq0CKmpqXj11VexfPlytGjRwthlERFVKsb+z5ehep/ir/yjN6192blc1yPTJouw891332H8+PFYtWoVWrZsiU8//RT+/v5ITEyEq6urscsjIqIyKi58tUr+0giVPBuGJtOiEEIIYxfxvFq2bInmzZvj888/BwAUFBSgZs2aGDVqFCZPnvzU9XU6HVQqFTIzM6FUKsu7XKP/z4mIqDKryLBT1pBSkWHn8X0Vx+hhq2N4uWy2tH+/K33PTk5ODo4fP47w8P+dSDMzM/j5+SE+Pt6IlT3EYENEVLziQsvhWsONUEnJjB0kSrN/Q23HkMdRJOx1NNimy6TSh52///4b+fn5UKvVevPVajX+/PPPYtfJzs5Gdna2NJ2ZmQngYUI0tAf3sgy+TSIiQ2h+I6rIvKM1hlTY/u4V08Y7cflTt1Pcesa0+9xNg7QxttLU2MLdqci8I1dvP3W98vj7+uh2n3aRqtKHnbKIiIjAzJkzi8yvWbOmEaohIjIlnxu7AJKjUeX7c3X37l2oVKoSl1f6sFOtWjWYm5sjLS1Nb35aWho0Gk2x64SHh2P8+PHSdEFBAW7fvg1nZ2coFIpyrfdROp0ONWvWxPXr1ytkrBD9D8+9cfC8Gw/PvXHwvJcvIQTu3r0LNze3J7ar9GHHysoKzZo1Q1xcHHr37g3gYXiJi4tDWFhYsetYW1vD2tpab56jo2M5V1oypVLJfwRGwnNvHDzvxsNzbxw87+XnST06hSp92AGA8ePHIzg4GD4+PmjRogU+/fRT3Lt3D0OGlN+1ZyIiIqocZBF2BgwYgFu3bmHatGlITU1FkyZNsHPnziKDlomIiOjFI4uwAwBhYWElXrYyVdbW1pg+fXqRS2pU/njujYPn3Xh47o2D5900yOKhgkREREQlMTN2AURERETliWGHiIiIZI1hh4iIiGSNYYeIiIhkjWHHwCIiItC8eXNUqVIFrq6u6N27NxITE/XaPHjwAKGhoXB2doaDgwMCAwOLPAE6OTkZAQEBsLOzg6urKyZNmoS8vLyKPJRKbf78+VAoFBg7dqw0j+e9/Pz3v//F22+/DWdnZ9ja2sLb2xvHjh2TlgshMG3aNFSvXh22trbw8/PDX3/9pbeN27dvIygoCEqlEo6Ojhg2bBiysvhuuSfJz8/H1KlT4eHhAVtbW9SpUwezZ8/We08Qz/3zO3DgAHr27Ak3NzcoFAr89NNPessNdY5Pnz6Ntm3bwsbGBjVr1sTChQvL+9BeHIIMyt/fX0RFRYmzZ8+KkydPiu7du4tatWqJrKwsqc0HH3wgatasKeLi4sSxY8dEq1atROvWraXleXl5olGjRsLPz0+cOHFCbN++XVSrVk2Eh4cb45AqnSNHjgh3d3fRuHFjMWbMGGk+z3v5uH37tqhdu7YICQkRCQkJ4sqVK+LXX38Vly5dktrMnz9fqFQq8dNPP4lTp06JN954Q3h4eIh///1XatO1a1fx6quvisOHD4vffvtNvPLKK2LQoEHGOKRKY+7cucLZ2Vls3bpVJCUliY0bNwoHBwfx2WefSW147p/f9u3bxccffyw2bdokAIjNmzfrLTfEOc7MzBRqtVoEBQWJs2fPivXr1wtbW1uxevXqijpMWWPYKWfp6ekCgNi/f78QQoiMjAxhaWkpNm7cKLW5cOGCACDi4+OFEA//YZmZmYnU1FSpzcqVK4VSqRTZ2dkVewCVzN27d0XdunVFbGysaN++vRR2eN7Lz0cffSTatGlT4vKCggKh0WjEokWLpHkZGRnC2tparF+/XgghxPnz5wUAcfToUanNjh07hEKhEP/973/Lr/hKLiAgQAwdOlRvXt++fUVQUJAQgue+PDwedgx1jlesWCGqVq2q97vmo48+EvXr1y/nI3ox8DJWOcvMzAQAODk5AQCOHz+O3Nxc+Pn5SW0aNGiAWrVqIT4+HgAQHx8Pb29vvSdA+/v7Q6fT4dy5cxVYfeUTGhqKgIAAvfML8LyXp19++QU+Pj5488034erqiqZNm+Krr76SliclJSE1NVXv3KtUKrRs2VLv3Ds6OsLHx0dq4+fnBzMzMyQkJFTcwVQyrVu3RlxcHC5evAgAOHXqFH7//Xd069YNAM99RTDUOY6Pj0e7du1gZWUltfH390diYiLu3LlTQUcjX7J5grIpKigowNixY+Hr64tGjRoBAFJTU2FlZVXkxaNqtRqpqalSm8dfdVE4XdiGitqwYQP++OMPHD16tMgynvfyc+XKFaxcuRLjx4/H//3f/+Ho0aMYPXo0rKysEBwcLJ274s7to+fe1dVVb7mFhQWcnJx47p9g8uTJ0Ol0aNCgAczNzZGfn4+5c+ciKCgIAHjuK4ChznFqaio8PDyKbKNwWdWqVcul/hcFw045Cg0NxdmzZ/H7778buxTZu379OsaMGYPY2FjY2NgYu5wXSkFBAXx8fDBv3jwAQNOmTXH27FmsWrUKwcHBRq5O3r7//nvExMRg3bp1aNiwIU6ePImxY8fCzc2N557oEbyMVU7CwsKwdetW7N27FzVq1JDmazQa5OTkICMjQ699WloaNBqN1Obxu4QKpwvbkL7jx48jPT0dr732GiwsLGBhYYH9+/dj2bJlsLCwgFqt5nkvJ9WrV4eXl5fePE9PTyQnJwP437kr7tw+eu7T09P1lufl5eH27ds8908wadIkTJ48GQMHDoS3tzfeeecdjBs3DhEREQB47iuCoc4xf/+UL4YdAxNCICwsDJs3b8aePXuKdEs2a9YMlpaWiIuLk+YlJiYiOTkZWq0WAKDVanHmzBm9fxyxsbFQKpVF/qjQQ507d8aZM2dw8uRJ6ePj44OgoCDpa5738uHr61vk8QoXL15E7dq1AQAeHh7QaDR6516n0yEhIUHv3GdkZOD48eNSmz179qCgoAAtW7asgKOonO7fvw8zM/1f4+bm5igoKADAc18RDHWOtVotDhw4gNzcXKlNbGws6tevz0tYhmDsEdJyM2LECKFSqcS+fftESkqK9Ll//77U5oMPPhC1atUSe/bsEceOHRNarVZotVppeeEt0F26dBEnT54UO3fuFC4uLrwF+hk9ejeWEDzv5eXIkSPCwsJCzJ07V/z1118iJiZG2NnZibVr10pt5s+fLxwdHcXPP/8sTp8+LXr16lXsrblNmzYVCQkJ4vfffxd169bl7c9PERwcLF566SXp1vNNmzaJatWqiQ8//FBqw3P//O7evStOnDghTpw4IQCIJUuWiBMnTohr164JIQxzjjMyMoRarRbvvPOOOHv2rNiwYYOws7PjrecGwrBjYACK/URFRUlt/v33XzFy5EhRtWpVYWdnJ/r06SNSUlL0tnP16lXRrVs3YWtrK6pVqyYmTJggcnNzK/hoKrfHww7Pe/nZsmWLaNSokbC2thYNGjQQX375pd7ygoICMXXqVKFWq4W1tbXo3LmzSExM1Gvzzz//iEGDBgkHBwehVCrFkCFDxN27dyvyMCodnU4nxowZI2rVqiVsbGzEyy+/LD7++GO925d57p/f3r17i/29HhwcLIQw3Dk+deqUaNOmjbC2thYvvfSSmD9/fkUdouwphHjkUZtEREREMsMxO0RERCRrDDtEREQkaww7REREJGsMO0RERCRrDDtEREQkaww7REREJGsMO0RERCRrDDtEJFvu7u749NNPjV0GERkZww4RPVF8fDzMzc0REBBg7FKIiMqEYYeInigyMhKjRo3CgQMHcPPmTWOXI2uPvgSSiAyHYYeISpSVlYXvvvsOI0aMQEBAAKKjo/WW79u3DwqFAnFxcfDx8YGdnR1at25d5C3oK1euRJ06dWBlZYX69evj22+/1VuuUCiwevVq9OjRA3Z2dvD09ER8fDwuXbqEDh06wN7eHq1bt8bly5eldS5fvoxevXpBrVbDwcEBzZs3x+7du0s8lqFDh6JHjx5683Jzc+Hq6orIyMhi17l27Rp69uyJqlWrwt7eHg0bNsT27dul5efOnUOPHj2gVCpRpUoVtG3bVqqxoKAAs2bNQo0aNWBtbY0mTZpg586d0rpXr16FQqHAd999h/bt28PGxgYxMTEAgP/85z/w9PSEjY0NGjRogBUrVpR4XERUCsZ+ORcRma7IyEjh4+MjhHj4ss86deqIgoICaXnhCxJbtmwp9u3bJ86dOyfatm0rWrduLbXZtGmTsLS0FF988YVITEwUixcvFubm5mLPnj1SGwDipZdeEt99951ITEwUvXv3Fu7u7qJTp05i586d4vz586JVq1aia9eu0jonT54Uq1atEmfOnBEXL14UU6ZMETY2NtKbqIUQonbt2mLp0qVCCCEOHjwozM3Nxc2bN/Vqs7e3L/GllwEBAeL1118Xp0+fFpcvXxZbtmwR+/fvF0IIcePGDeHk5CT69u0rjh49KhITE8XXX38t/vzzTyGEEEuWLBFKpVKsX79e/Pnnn+LDDz8UlpaW4uLFi0IIIZKSkgQA4e7uLn788Udx5coVcfPmTbF27VpRvXp1ad6PP/4onJycRHR0dJm+h0TEt54T0RO0bt1afPrpp0IIIXJzc0W1atXE3r17peWFYWf37t3SvG3btgkA4t9//5W28d577+lt98033xTdu3eXpgGIKVOmSNPx8fECgIiMjJTmrV+/XtjY2Dyx3oYNG4rly5dL04+GHSGE8PLyEgsWLJCme/bsKUJCQkrcnre3t5gxY0axy8LDw4WHh4fIyckpdrmbm5uYO3eu3rzmzZuLkSNHCiH+F3YKz2+hOnXqiHXr1unNmz17ttBqtSXWSURPxstYRFSsxMREHDlyBIMGDQIAWFhYYMCAAcVe8mncuLH0dfXq1QEA6enpAIALFy7A19dXr72vry8uXLhQ4jbUajUAwNvbW2/egwcPoNPpADy8xDZx4kR4enrC0dERDg4OuHDhApKTk0s8pnfffRdRUVEAgLS0NOzYsQNDhw4tsf3o0aMxZ84c+Pr6Yvr06Th9+rS07OTJk2jbti0sLS2LrKfT6XDz5s1SHbePj4/09b1793D58mUMGzYMDg4O0mfOnDl6l/CI6NlYGLsAIjJNkZGRyMvLg5ubmzRPCAFra2t8/vnnUKlU0vxH/+ArFAoAD8esPIvitvGk7U6cOBGxsbH45JNP8Morr8DW1hb9+vVDTk5OifsYPHgwJk+ejPj4eBw6dAgeHh5o27Ztie3fffdd+Pv7Y9u2bdi1axciIiKwePFijBo1Cra2ts90fCWxt7eXvs7KygIAfPXVV2jZsqVeO3Nzc4Psj+hFxJ4dIioiLy8P33zzDRYvXoyTJ09Kn1OnTsHNzQ3r168v9bY8PT1x8OBBvXkHDx6El5fXc9V48OBBhISEoE+fPvD29oZGo8HVq1efuI6zszN69+6NqKgoREdHY8iQIU/dT82aNfHBBx9g06ZNmDBhAr766isAD3uifvvtt2LvoFIqlXBzc3vm41ar1XBzc8OVK1fwyiuv6H08PDyeWisRFY89O0RUxNatW3Hnzh0MGzZMrwcHAAIDAxEZGYkPPvigVNuaNGkS+vfvj6ZNm8LPzw9btmzBpk2bnnjnVGnUrVsXmzZtQs+ePaFQKDB16tRS9Sa9++676NGjB/Lz8xEcHPzEtmPHjkW3bt1Qr1493LlzB3v37oWnpycAICwsDMuXL8fAgQMRHh4OlUqFw4cPo0WLFqhfvz4mTZqE6dOno06dOmjSpAmioqJw8uRJ6Y6rksycOROjR4+GSqVC165dkZ2djWPHjuHOnTsYP3586U8QEUkYdoioiMjISPj5+RUJOsDDsLNw4UK98StP0rt3b3z22Wf45JNPMGbMGHh4eCAqKgodOnR4rhqXLFmCoUOHonXr1qhWrRo++ugjaTzPk/j5+aF69epo2LCh3iW64uTn5yM0NBQ3btyAUqlE165dsXTpUgAPe4n27NmDSZMmoX379jA3N0eTJk2kcTqjR49GZmYmJkyYgPT0dHh5eeGXX35B3bp1n7jPd999F3Z2dli0aBEmTZoEe3t7eHt7Y+zYsaU7MURUhEIIIYxdBBFRRcnKysJLL72EqKgo9O3b19jlEFEFYM8OEb0QCgoK8Pfff2Px4sVwdHTEG2+8YeySiKiCMOwQ0QshOTkZHh4eqFGjBqKjo2FhwV9/RC8KXsYiIiIiWeOt50RERCRrDDtEREQkaww7REREJGsMO0RERCRrDDtEREQkaww7REREJGsMO0RERCRrDDtEREQkaww7REREJGv/D9BMPgpo05gcAAAAAElFTkSuQmCC", "text/plain": [ "
" ] @@ -461,24 +439,24 @@ ], "source": [ "scripts.eval_detector(\n", - " scripts.EvalDetectorConfig(\n", - " detector=detector,\n", - " task=tasks.Task.from_separate_data(\n", - " model=model,\n", - " # TODO: this won't actually be used, plausibly Tasks should be split better\n", - " # into their training and test data.\n", - " trusted_data=train_data,\n", - " # Our anomalous data is the backdoor data from above, except we use the\n", - " # MNIST test split.\n", - " anomalous_test_data=data.BackdoorDataset(\n", - " original=val_data,\n", - " backdoor=data.CornerPixelBackdoor(),\n", - " ),\n", - " # Our normal data is MNIST with added noise, this makes the images OOD\n", - " # but they shouldn't be mechanistically anomalous.\n", - " clean_test_data=data.TransformDataset(val_data, data.GaussianNoise(0.3)),\n", + " detector=detector,\n", + " # We save to a different directory to avoid overwriting the existing default eval:\n", + " save_path=detector_path / \"ood_eval\",\n", + " task=tasks.Task.from_separate_data(\n", + " model=model,\n", + " # TODO: this won't actually be used, plausibly Tasks should be split better\n", + " # into their training and test data.\n", + " trusted_data=train_data,\n", + " # Our anomalous data is the backdoor data from above, except we use the\n", + " # MNIST test split.\n", + " anomalous_test_data=data.BackdoorDataset(\n", + " original=val_data,\n", + " backdoor=data.CornerPixelBackdoor(),\n", " ),\n", - " )\n", + " # Our normal data is MNIST with added noise, this makes the images OOD\n", + " # but they shouldn't be mechanistically anomalous.\n", + " clean_test_data=data.TransformDataset(val_data, data.GaussianNoise(0.3)),\n", + " ),\n", ")" ] }, diff --git a/src/cupbearer/utils/__init__.py b/src/cupbearer/utils/__init__.py index 61fcceaa..793052c8 100644 --- a/src/cupbearer/utils/__init__.py +++ b/src/cupbearer/utils/__init__.py @@ -1,6 +1,7 @@ import codecs import importlib import pickle +from datetime import datetime from pathlib import Path from typing import Union @@ -117,3 +118,11 @@ def inputs_from_batch(batch): return batch[0] else: return batch + + +def log_path(base="logs", time=True): + if time: + timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + else: + timestamp = datetime.now().strftime("%Y-%m-%d") + return Path(base) / timestamp From 975289ef83a928a2884a8a3a9a7d59462cc7dff6 Mon Sep 17 00:00:00 2001 From: Erik Jenner Date: Sat, 2 Mar 2024 18:12:42 -0800 Subject: [PATCH 19/25] Add WaNet warning --- src/cupbearer/data/backdoors.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/cupbearer/data/backdoors.py b/src/cupbearer/data/backdoors.py index c4f1298b..13bf9baa 100644 --- a/src/cupbearer/data/backdoors.py +++ b/src/cupbearer/data/backdoors.py @@ -95,7 +95,16 @@ def inject_backdoor(self, img: torch.Tensor): @dataclass(kw_only=True) class WanetBackdoor(Backdoor): """Implements trigger transform from "Wanet - Imperceptible Warping-based - Backdoor Attack" by Anh Tuan Nguyen and Anh Tuan Tran, ICLR, 2021.""" + Backdoor Attack" by Anh Tuan Nguyen and Anh Tuan Tran, ICLR, 2021. + + WARNING: The backdoor trigger is a specific (randomly generated) warping pattern. + Networks are trained to only respond to this specific pattern, so evaluating + a network on a freshly initialized WanetBackdoor with a new trigger won't work. + Within a single process, just make sure you only initialize WanetBackdoor once + and then use that everywhere. + Between different processes, you need to store the trigger using the `store()` + method, and then later pass it in as the `path` argument to the new WanetBackdoor. + """ # Path to load control grid from, or None to generate a new one. # Deliberartely non-optional to avoid accidentally generating a new grid! From 1b82635c2ade7f204f7dbc8e89b596f2d1aa95fb Mon Sep 17 00:00:00 2001 From: Erik Jenner Date: Sat, 2 Mar 2024 18:13:32 -0800 Subject: [PATCH 20/25] Update gitignore We also want to ignore log dirs in e.g. the notebook folder --- .gitignore | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/.gitignore b/.gitignore index 6304768c..6b4b6cd2 100644 --- a/.gitignore +++ b/.gitignore @@ -4,10 +4,9 @@ *.egg-info .python-version __pycache__ -/data -/results -/slurm -/logs +data/ +logs/ +slurm/ .venv -/wandb -/dist +wandb/ +dist/ From 35220aabc9c65952ef20999d193f00947d1a6819 Mon Sep 17 00:00:00 2001 From: Erik Jenner Date: Sat, 2 Mar 2024 18:21:36 -0800 Subject: [PATCH 21/25] Update documentation somewhat --- README.md | 4 +- docs/adding_a_script.md | 139 ----------------------------------- docs/adding_a_task.md | 87 ---------------------- docs/configuration.md | 43 ----------- docs/high_level_structure.md | 70 ++++-------------- 5 files changed, 15 insertions(+), 328 deletions(-) delete mode 100644 docs/adding_a_script.md delete mode 100644 docs/configuration.md diff --git a/README.md b/README.md index eed2d3d3..ee4754e7 100644 --- a/README.md +++ b/README.md @@ -31,13 +31,13 @@ installing `cupbearer`, in particular if you want to control CUDA version etc. ## Running experiments We provide scripts in `cupbearer.scripts` for more easily running experiments. -See [demo.ipynb](demo.ipynb) for a quick example of how to use them---this is likely +See [the demo notebook](notebooks/simple_demo.ipynb) for a quick example of how to use them---this is likely also the best way to get an overview of how the components of `cupbearer` fit together. These "scripts" are Python functions and designed to be used from within Python, e.g. in a Jupyter notebook or via [submitit](https://github.com/facebookincubator/submitit/tree/main) if on Slurm. But of course you could also write a simple Python wrapper and then use -them from the CLI. Their configuration interface is designed to be very general, +them from the CLI. The scripts are designed to be pretty general, which sometimes comes at the cost of being a bit verbose---we recommend writing helper functions for your specific use case on top of the general script interface. Of course you can also use the components of `cupbearer` directly without going through diff --git a/docs/adding_a_script.md b/docs/adding_a_script.md deleted file mode 100644 index 4c8b6723..00000000 --- a/docs/adding_a_script.md +++ /dev/null @@ -1,139 +0,0 @@ -# Creating new scripts -You don't need to implement any scripts your new task or detector needs using the -interface described in this document. However, it's designed to work well with the -rest of `cupbearer` and probably makes sense for most cases. - -As an overview, here's how to create a new script: -1. Put a python file in `scripts` with some function `my_function`. -2. The only argument to `my_function` should be an object of a dataclass `MyConfig` - that inherits from `utils.scripts.ScriptConfig`. -3. The definition of `MyConfig` needs to be placed in its own file. -4. Use `utils.scripts.run(my_function, MyConfig)` in the python file to run the script. -5. Now you'll be able to run the script from the command line using `python -m cupbearer.scripts.my_file`. - -The rest of this doc goes into some background to understand how scripts work -in `cupbearer`. - -## Example walkthrough -Let's look at `eval_detector.py`: -```python -from cupbearer.scripts.conf.eval_detector_conf import Config -from cupbearer.utils.scripts import run -from torch.utils.data import Subset - - -def main(cfg: Config): - reference_data = cfg.task.build_reference_data() - anomalous_data = cfg.task.build_anomalous_data() - if cfg.max_size: - reference_data = Subset(reference_data, range(cfg.max_size)) - anomalous_data = Subset(anomalous_data, range(cfg.max_size)) - model = cfg.task.build_model() - params = cfg.task.build_params() - detector = cfg.detector.build(model=model, params=params, save_dir=cfg.dir.path) - - detector.eval( - normal_dataset=reference_data, - anomalous_datasets={"anomalous": anomalous_data}, - ) - - -if __name__ == "__main__": - run(main, Config, save_config=False) -``` -There are two key things to note here: -- We have a function `main` that takes a single argument of type `Config`. (The name of `main` doesn't matter.) -- If the script is run as the main file, we call `run(main, Config)`. - -Actually, in this case, we also have `save_config=False` in the call to `run`. By default, -`run` will save the config as a yaml file, which this flag disables. - -Here is the definition of `Config`, in `conf/eval_detector_conf.py`: -```python -@dataclass(kw_only=True) -class Config(ScriptConfig): - task: TaskConfigBase = config_group(TaskConfigBase) - detector: DetectorConfig = config_group(DetectorConfig) - max_size: Optional[int] = None - - def _set_debug(self): - super()._set_debug() - self.max_size = 2 -``` -A few things to note: -- `Config` inherits from `ScriptConfig`. All script configurations should do this. -- `Config` is a dataclass, as all configs should be. -- For the fields that are themselves dataclasses, we use `config_group` as a default. - This lets users set these fields from the command line (where you otherwise couldn't - pass dataclasses as values). A config group is basically a dictionary mapping from - names (that users use on the CLI) to subclasses of some base class. For example, - `config_group(DetectorConfig)` means that users can choose any of the registered - detectors. If this detector has config options, these can also be set. - Config groups are discussed in more detail in [configuration.md](configuration.md). -- There's a `_set_debug` method. This is a special method that's called when the - `--debug` flag is passed to the script. It should set all values where this makes - sense to values that lead to a fast run. (For example, this flag is always used - in unit tests.) The `super()._set_debug()` call is important, since it ensures - that `_set_debug` is called recusively on all fields that support it. - Again see [configuration.md](configuration.md) for more details. - -## The `Config` definition needs to be in its own file -There is currently a technical limitation: the definition of the `Config` class -mustn't be in the same file as the script that users will call from the CLI. That's -why all the configs are in the `conf` folder. - -The reason for this is that serializing a configuration dataclass to yaml stores -the full path of the dataclass (in order to reliably deserialize it later). If the -dataclass is defined in the main script, that path will be `__main__.Config`, which -can then not be restored from a different script. - -## `ScriptConfig` -As mentioned above, all configs for scripts should inherit from `ScriptConfig`. -Let's look at `ScriptConfig` to understand the effects of that: -```python -@dataclass(kw_only=True) -class ScriptConfig(BaseConfig): - seed: int = 0 - dir: DirConfig = mutable_field(DirConfig) - debug: bool = field(action="store_true") - debug_with_logging: bool = field(action="store_true") - - ... -``` -(See [configuration.md](configuration.md) for more details on `BaseConfig`.) - -This is where the `debug` flag mentioned above is defined (the `field` here -is from `simple_parsing` and extends `dataclasses.field`). Apart from that, there's -a `seed` field, since basically every script will need that. - -Perhaps most interesting is the `dir` field. This is a `DirConfig`, which has three -fields: -```python -@dataclass(kw_only=True) -class DirConfig(BaseConfig): - base: Optional[str] = None - run: str = field( - default_factory=lambda: datetime.now().strftime("%Y-%m-%d_%H-%M-%S") - ) - full: Optional[str] = None -``` -By default, `base` and `full` are `None`. This means that nothing will be logged to disk. -If `full` is set, then the path will always be `full`, no matter what `base` and `run` -are. Otherwise, if `base` is set, the path will be `base/run`. This can be useful if you -want to automatically generate new directories for each run without naming them all. -For example, -```bash -python -m cupbearer.scripts.train_detector --dir.base logs/train_detector ... -``` -would create a new directory `logs/train_detector//` for each run. - -While typically, `ScriptConfig.dir` is meant to be a newly created logging directory, -it can also sometimes take on the role of an input directory. For example, -```bash -python -m cupbearer.scripts.eval_detector --dir.full logs/train_detector/... --detector from_run ... -``` -would load a detector from the directory `logs/train_detector/...` and evaluate it -(since the `from_run` option for the detector config group uses the `dir` argument). - -How directories are handled is one of the places that seems most likely to change, -so try not to rely too much on the current version. diff --git a/docs/adding_a_task.md b/docs/adding_a_task.md index c7995771..b1e18ac2 100644 --- a/docs/adding_a_task.md +++ b/docs/adding_a_task.md @@ -1,88 +1 @@ # Adding a new task - -The only component that a task absolutely needs is an implementation of the -`TaskConfigBase` abstract class: -```python -class TaskConfigBase(BaseConfig, ABC): - @abstractmethod - def build_reference_data(self) -> Dataset: - pass - - @abstractmethod - def build_model(self) -> Model: - pass - - def build_params(self): - return None - - @abstractmethod - def build_anomalous_data(self) -> Dataset: - pass -``` -If your config has any parameters, you should use a dataclass to set them. E.g. -```python -@dataclass -class MyTaskConfig(TaskConfigBase): - my_required_param: str - my_optional_param: int = 42 - - ... -``` -This will automagically let you override these parameters from the command line -(and any parameters without default values will be required). - -`build_reference_data` and `build_anomalous_data` both need to return `pytorch` `Dataset`s. -`build_model` needs to return a `models.Model`, which is a special type of `flax.linen.Module`. -`build_params` can return a parameter dict for the returned `Model` (if `None`, the model -will be randomly initialized, which is usually not what you want). - -In practice, the datasets and the model will have to come from somewhere, so you'll -often implement a few things in addition to the task config class. There are predefined -interfaces for datasets and models, and if possible I suggest using those (either -using their existing implementations, or adding your own). For example, consider -the adversarial example task: -```python -@dataclass -class AdversarialExampleTask(TaskConfigBase): - run_path: Path - - def __post_init__(self): - self._reference_data = TrainDataFromRun(path=self.run_path) - self._anomalous_data = AdversarialExampleConfig(run_path=self.run_path) - self._model = StoredModel(path=self.run_path) - - def build_anomalous_data(self) -> Dataset: - return self._anomalous_data.build_dataset() - - def build_model(self) -> Model: - return self._model.build_model() - - def build_params(self) -> Model: - return self._model.build_params() - - def build_reference_data(self) -> Dataset: - return self._reference_data.build_dataset() -``` -This task only has one parameter, the path to the training run of a base model. -It then uses the training data of that run as reference data, and an adversarial -version of it as anomalous data. The model is just the trained base model, loaded -from disk. - -You can also add new scripts in the `scripts` directory, to generate the datasets -and/or train the model. For example, the adversarial examples task has an -associated script `make_adversarial_examples.py`. (To get the model, we can simply -use the existing `train_classifier.py` script.) - -There's no formal connection between scripts and the rest of the library---you can -leave it up to users to run the necessary preparatory scripts before using your new -task. But if feasible, you may want to automate this. For example, the `AdversarialExampleDataset` -automatically runs `make_adversarial_examples.py` if the necessary files are not found. - -Finally, you need to register your task to make it accessible from the command line -in the existing scripts. Simply add the task config class to the `TASKS` dict in `tasks/__init__.py` -(with an arbitrary name as the key). - -Then you should be able to run commands like -```bash -python -m cupbearer.scripts.train_detector --task my_task --detector my_detector --task.my_required_param foo -``` diff --git a/docs/configuration.md b/docs/configuration.md deleted file mode 100644 index 4d6db583..00000000 --- a/docs/configuration.md +++ /dev/null @@ -1,43 +0,0 @@ -# Configuration dataclasses -As briefly discussed in [high_level_structure.md](high_level_structure.md), `cupbearer` -makes heavy use of dataclasses for configuration. For the most part, these are just -normal dataclasses, but there are a few special things to keep in mind. - -## Inherit from `BaseConfig` -All configuration dataclasses should inherit from `cupbearer.utils.utils.BaseConfig`. -That ensures that storing configs to disk and loading them again will work correctly, -as well as a debug feature described below. - -Most dataclasses will not inherit from `BaseConfig` directly, but instead from a more -specialized class like `ScriptConfig` or `DatasetConfig`. - -## `kw_only=True` -Sometimes a parent dataclass will have some optional fields, and then a child class will -add required fields. This would usually lead to problems because required fields can't -come after optional ones. To deal with that, many dataclasses in `cupbearer` use -`@dataclass(kw_only=True)`, which makes all fields keyword-only arguments to `__init__`. - -## `_set_debug()` -It can be convenient to run a script with the fastest possible settings for debugging -error messages or for automated testing (e.g. just train for a single batch with -a single sample, use a small model, ...). In `cupbearer`, every configuration dataclass -should "know" how to set itself to such a debug mode: it should have a `_set_debug()` -method that sets all its fields to the debug values that lead to fast runs. Of course -if a config has no such values, it doesn't need to implement `_set_debug()`. - -Importantly, `_set_debug()` should also call `super()._set_debug()`. This ensures that -fields from the parent class are set to their debug values. It also recursively calls -`_set_debug()` on all fields that are themselves configuration dataclasses, so there's -no need to do that manually. - -## Special CLI fields -You can use `simple_parsing.helpers.field` instead of the builtin `dataclasses.field` -to get some additional functionality, most notably specifying how options can be changed -from the CLI. This will mostly be unnecessary, but can be nice for boolean flags. - -For example, the debug option described above is implemented using -```python -debug: bool = field(action="store_true") -``` -in `ScriptConfig`, which means you can call scripts using simply `--debug` instead of -`--debug True`. diff --git a/docs/high_level_structure.md b/docs/high_level_structure.md index 253d25ac..371cb4d4 100644 --- a/docs/high_level_structure.md +++ b/docs/high_level_structure.md @@ -3,32 +3,13 @@ In this document, we'll go over all the subpackages of `cupbearer` to see what r they play and how to extend them. For more details of extending `cupbearer`, see the other documentation files on specific subpackages. -## Configuration -Different parts of `cupbearer` interface with each other through many configuration -dataclasses. Each dataset, model, task, detector, script, etc. should expose all its -hyperparameters and configuration options through such a dataclass. That way, -all options will automatically be configurable from the command line. - -Many of the configuration dataclass ABCs have one or several `build()` methods that -create the actual object of interest based on the configuration. For example, -the `DetectorConfig` ABC has an abstract `build()` method that must return an -`AnomalyDetector` instance. - -See [configuration.md](configuration.md) for more details on the configuration -dataclasses and what to keep in mind when writing your own. - ## Helper subpackages ### `cupbearer.data` The `data` package contains implementations of basic datasets, transforms, and specialized datasets (e.g. datasets consisting only of adversarial examples). -The key interface is the `DatasetConfig` class. It has a `build()` method that -needs to return a pytorch `Dataset` instance. -In principle, you don't need to use the `DatasetConfig` interface (or anything -from the `data` package) to implement new tasks or detectors. Tasks and detectors -just pass `Dataset` instances between each other. But unless you have a good reason -to avoid the `DatasetConfig` interface, it's best to use it since it already works -with the scripts and you get some features such as configuring transforms for free. +Using this subpackage is optional, you can define tasks directly using standard +pytorch `Dataset`s. ### `cupbearer.models` Unlike the `data` package, you have to use the `models` package at the moment. @@ -37,53 +18,32 @@ to the model's activations. Using the implementations from the `models` package ensures a consistent way to get activations from models. As long as you don't want to add new model architectures, most of the details of this package won't matter. -For now, only linear computational graphs are supported, i.e. each model needs to -be a fixed sequence of computational steps performed one after the other -(like a `Sequential` module in many deep learning frameworks). A `Computation` -is just a type alias for such as sequence of steps. The `Model` class takes such a -`Computation` and is itself a `flax.linen.Module` that implements the computation. -The main thing it does on top of `flax.linen.Sequential` is that it can also return -all the activations of the model. It also has a function for plotting the architecture -of the model. - -Similar to the `DataConfig` interface, there's a `ModelConfig` with a `build()` -method that returns a `Model` instance. +In the future, we'll likely deprecate the `HookedModel` interface and just support +standard `torch.nn.Module`s via pytorch hooks. ### `cupbearer.utils` -The `utils` package contains many miscallaneous helper functions. You probably won't -interact with these too much, but here are a few that it may be good to know about: -- `utils.trainer` contains a `Trainer` class that's a very simple version of pytorch - lightning for flax. You certainly don't need to use this in any scripts you add, - but it may save you some boilerplate. NOTE: we might deprecate this in the future - and replace it with something like `elegy`. -- `utils.utils.save` and `utils.utils.load` can save and store pytrees. They use the - `orbax` checkpointer under the hood, but add some hacky support for saving/loading - types. - -We'll cover a few more functions from the `utils` package when we talk about scripts. +The `utils` package contains some miscallaneous helper functions. Most of these are +mainly for internal usage, but see the example notebooks for helpful ones. ## Tasks -The `tasks` package contains the `TaskConfigBase` ABC, which is the interface any -task needs to implement, as well as all the existing tasks. To add a new task: -1. Create a new module or subpackage in `tasks`, where you implement a new class - that inherits `TaskConfigBase`. -2. Add your new class to the `TASKS` dictionary in `tasks/__init__.py`. +The `tasks` package contains the `Task` class, which is the interface any +task needs to implement, as well as all the existing tasks. To add a new task, +you can either inherit `Task` or simply write a function that returns a `Task` instance. -Often, you'll also need to implement a new type of dataset or model. +Often, you'll also need to implement a new type of dataset or model for your task. That code probably belongs in the `data` and `model` packages, though sometimes it's a judgement call. See [adding_a_task.md](adding_a_task.md) for more details. ## Detectors -The `detectors` package is similar to `tasks`, but for anomaly detectors. In addition -to the `DetectorConfig` interface, it also contains an `AnomalyDetector` ABC, which -any detection method needs to subclass for its actual implementation. +The `detectors` package is similar to `tasks`, but for anomaly detectors. The key +interface is `AnomalyDetector`. See [adding_a_detector.md](adding_a_detector.md) for more details. ## Scripts -The `scripts` package contains command line scripts and their configurations. +The `scripts` package contains Python functions for running common workflows. Two scripts are meant to be used by all detectors/tasks: - `train_detector` trains a detector on a task and saves the trained detector to disk. - `eval_detector` evaluates a stored (or otherwise specified) detector and evaluates @@ -92,7 +52,3 @@ Two scripts are meant to be used by all detectors/tasks: All other scripts are helper scripts for specific tasks or detectors. For example, most tasks will need a script to train the model to be analyzed, and perhaps to prepare the dataset. - -There's a lot more to be said about scripts, see the [README](../README.md) for a brief -overview of *running* scripts, and [adding_a_script.md](adding_a_script.md) for details -on writing new scripts. From f9ab02b47e3d09b10c3dccc998f948550cc31d6e Mon Sep 17 00:00:00 2001 From: Erik Jenner Date: Sat, 2 Mar 2024 18:29:52 -0800 Subject: [PATCH 22/25] Remove simple_parsing dependency --- pyproject.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 58026bf3..77edeb37 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -21,7 +21,6 @@ dependencies = [ "torch~=2.0.0", "torchvision~=0.15.1", "torchattacks~=3.5.1", - "simple_parsing~=0.1.3", "lightning~=2.1.0", "torchmetrics~=1.2.0", "tensorboard", From d61c6762a15848c548cdf5a003ae26e5ed5c3218 Mon Sep 17 00:00:00 2001 From: Erik Jenner Date: Sun, 3 Mar 2024 18:09:18 -0800 Subject: [PATCH 23/25] Adjust tampering/LM code to no-config style --- src/cupbearer/data/__init__.py | 4 +- src/cupbearer/data/huggingface.py | 22 +++-------- src/cupbearer/data/tampering.py | 61 +++++++++---------------------- src/cupbearer/models/__init__.py | 3 +- tests/test_tampering.py | 55 +++++++++++++++++++--------- 5 files changed, 62 insertions(+), 83 deletions(-) diff --git a/src/cupbearer/data/__init__.py b/src/cupbearer/data/__init__.py index fca090c3..08879ef3 100644 --- a/src/cupbearer/data/__init__.py +++ b/src/cupbearer/data/__init__.py @@ -8,9 +8,9 @@ NoiseBackdoor, WanetBackdoor, ) -from .huggingface import IMDBDatasetConfig +from .huggingface import IMDBDataset from .pytorch import CIFAR10, GTSRB, MNIST, PytorchDataset -from .tampering import TamperingDataConfig +from .tampering import TamperingDataset from .toy_ambiguous_features import ToyDataset from .transforms import ( GaussianNoise, diff --git a/src/cupbearer/data/huggingface.py b/src/cupbearer/data/huggingface.py index 25b06646..18499f12 100644 --- a/src/cupbearer/data/huggingface.py +++ b/src/cupbearer/data/huggingface.py @@ -1,27 +1,15 @@ -from dataclasses import dataclass - import datasets import torch -from . import DatasetConfig - class IMDBDataset(torch.utils.data.Dataset): - def __init__(self): - self.dataset = datasets.load_dataset("imdb", split="train") + def __init__(self, train: bool = True): + split = "train" if train else "validation" + self.dataset = datasets.load_dataset("imdb", split=split) def __len__(self): return len(self.dataset) def __getitem__(self, idx): - return self.dataset[idx]["text"], self.dataset[idx]["label"] - - -@dataclass -class IMDBDatasetConfig(DatasetConfig): - @property - def num_classes(self): - return 2 - - def _build(self): - return IMDBDataset() + sample = self.dataset[idx] + return sample["text"], sample["label"] diff --git a/src/cupbearer/data/tampering.py b/src/cupbearer/data/tampering.py index e51a6447..a9d3a9c2 100644 --- a/src/cupbearer/data/tampering.py +++ b/src/cupbearer/data/tampering.py @@ -1,16 +1,27 @@ -from dataclasses import dataclass -from typing import ClassVar - import torch from datasets import load_dataset -from . import DatasetConfig +TAMPERING_DATSETS = { + "diamonds": "redwoodresearch/diamonds-seed0", + "text_props": "redwoodresearch/text_properties", + "gen_stories": "redwoodresearch/generated_stories", +} class TamperingDataset(torch.utils.data.Dataset): - def __init__(self, dataset): + def __init__(self, name: str, train: bool = True): + # TODO: allow for local loading / saving super().__init__() - self.dataset = dataset + self.train = train + self.name = name + + hf_name = ( + TAMPERING_DATSETS[self.name] + if self.name in TAMPERING_DATSETS + else self.name + ) + split = "train" if self.train else "validation" + self.dataset = load_dataset(hf_name, split=split) def __getitem__(self, idx): sample = self.dataset[idx] @@ -22,41 +33,3 @@ def __getitem__(self, idx): def __len__(self): return len(self.dataset) - - -TAMPERING_DATSETS = { - "diamonds": "redwoodresearch/diamonds-seed0", - "text_props": "redwoodresearch/text_properties", - "gen_stories": "redwoodresearch/generated_stories", -} - - -@dataclass -class TamperingDataConfig(DatasetConfig): - n_sensors: ClassVar[int] = 3 # not configurable - train: bool = True # TODO: how does cupbearer use this? - name: str = None - - def __post_init__(self): - assert self.name, "must pass name argument" - return super().__post_init__() - - @property - def num_classes(self): - # only used for multi-class classification - return None - - @property - def num_labels(self): - # n sensors + all(sensors) - return self.n_sensors + 1 - - def _build(self) -> TamperingDataset: # TODO: allow for local loading / saving - name = ( - TAMPERING_DATSETS[self.name] - if self.name in TAMPERING_DATSETS - else self.name - ) - split = "train" if self.train else "validation" - dataset = load_dataset(name, split=split) - return TamperingDataset(dataset) diff --git a/src/cupbearer/models/__init__.py b/src/cupbearer/models/__init__.py index 7dd0f470..185e256a 100644 --- a/src/cupbearer/models/__init__.py +++ b/src/cupbearer/models/__init__.py @@ -2,11 +2,10 @@ from pathlib import Path import torch -from transformers.modeling_utils import PreTrainedModel -from transformers.tokenization_utils_base import PreTrainedTokenizerBase from .hooked_model import HookedModel from .models import CNN, MLP, PreActResNet +from .transformers_hf import TamperingPredictionTransformer def load(model: HookedModel, path: Path | str): diff --git a/tests/test_tampering.py b/tests/test_tampering.py index d3db1c14..f54fdbf9 100644 --- a/tests/test_tampering.py +++ b/tests/test_tampering.py @@ -1,41 +1,60 @@ import pytest +import torch from cupbearer import data, models from cupbearer.scripts import ( eval_classifier, train_classifier, ) -from cupbearer.scripts.conf import ( - eval_classifier_conf, - train_classifier_conf, -) @pytest.fixture(scope="module") -def measurement_predictor_path(module_tmp_path): - cfg = train_classifier_conf.DebugConfig( - model=models.TamperTransformerConfig(name="pythia-14m"), - train_data=data.TamperingDataConfig(name="redwoodresearch/diamonds-seed0"), +def pythia(): + transformer, tokenizer, emb_dim, max_len = models.transformers_hf.load_transformer( + "pythia-14m" + ) + return models.TamperingPredictionTransformer( + model=transformer, + tokenizer=tokenizer, + embed_dim=emb_dim, + max_length=max_len, + n_sensors=3, + ) + + +@pytest.fixture(scope="module") +def diamond(): + return torch.utils.data.Subset(data.TamperingDataset("diamonds"), range(10)) + + +@pytest.fixture(scope="module") +def measurement_predictor_path(pythia, diamond, module_tmp_path): + train_loader = torch.utils.data.DataLoader(diamond, batch_size=2) + + train_classifier( + train_loader=train_loader, + model=pythia, + num_labels=4, task="multilabel", path=module_tmp_path, + max_steps=1, + logger=False, ) - train_classifier(cfg) - assert (module_tmp_path / "config.yaml").is_file() assert (module_tmp_path / "checkpoints" / "last.ckpt").is_file() - assert (module_tmp_path / "tensorboard").is_dir() return module_tmp_path @pytest.mark.slow -def test_eval_classifier(measurement_predictor_path): - cfg = eval_classifier_conf.DebugConfig( +def test_eval_classifier(pythia, diamond, measurement_predictor_path): + models.load(pythia, measurement_predictor_path) + + eval_classifier( + data=diamond, + model=pythia, path=measurement_predictor_path, - data=data.TamperingDataConfig( - name="redwoodresearch/diamonds-seed0", train=False - ), + max_batches=1, + batch_size=2, ) - eval_classifier(cfg) - assert (measurement_predictor_path / "eval.json").is_file() From 565f45623debb07caa1a7a629b3feb981de37b6d Mon Sep 17 00:00:00 2001 From: Viktor Rehnberg Date: Mon, 4 Mar 2024 11:56:26 +0100 Subject: [PATCH 24/25] Add convenience method to clone WanetBackdoor instance --- src/cupbearer/data/backdoors.py | 45 ++++++++++++++++++++++++++++++++- tests/test_data.py | 28 ++++++++++++-------- 2 files changed, 61 insertions(+), 12 deletions(-) diff --git a/src/cupbearer/data/backdoors.py b/src/cupbearer/data/backdoors.py index 13bf9baa..2e34d9d3 100644 --- a/src/cupbearer/data/backdoors.py +++ b/src/cupbearer/data/backdoors.py @@ -1,8 +1,10 @@ +from __future__ import annotations + import os from abc import ABC from dataclasses import dataclass from pathlib import Path -from typing import Tuple +from typing import Optional, Tuple import torch import torch.nn.functional as F @@ -165,6 +167,47 @@ def control_grid(self, control_grid: torch.Tensor): self._control_grid = control_grid + def clone( + self, + *, + target_class: Optional[int] = None, + path: Optional[Path | str] = None, + p_backdoor: Optional[float] = None, + p_noise: Optional[float] = None, + warping_strength: Optional[float] = None, + grid_rescale: Optional[float] = None, + ) -> WanetBackdoor: + """Create a new instance but with the same control_grid as current instance.""" + other = type(self)( + path=(path if path is not None else self.path), + p_backdoor=(p_backdoor if p_backdoor is not None else self.p_backdoor), + p_noise=(p_noise if p_noise is not None else self.p_noise), + target_class=( + target_class if target_class is not None else self.target_class + ), + control_grid_width=self.control_grid_width, + warping_strength=( + warping_strength + if warping_strength is not None + else self.warping_strength + ), + grid_rescale=( + grid_rescale if grid_rescale is not None else self.grid_rescale + ), + ) + logger.debug("Setting control grid of clone from instance.") + assert self._warping_field is None + other.control_grid = ( + self.control_grid * other.warping_strength / self.warping_strength + ) + return other + + path: Path | str | None + p_noise: float = 0.0 # Probability of non-backdoor warping + control_grid_width: int = 4 # Side length of unscaled warping field + warping_strength: float = 0.5 # Strength of warping effect + grid_rescale: float = 1.0 # Factor to rescale grid from warping effect + @property def warping_field(self) -> torch.Tensor: if self._warping_field is None: diff --git a/tests/test_data.py b/tests/test_data.py index b323b89d..9e66ba04 100644 --- a/tests/test_data.py +++ b/tests/test_data.py @@ -1,4 +1,5 @@ import functools +import itertools from dataclasses import dataclass import numpy as np @@ -154,29 +155,26 @@ def test_wanet_backdoor(clean_image_dataset): # Pick a target class outside the actual range so we can later tell whether it # was set correctly. target_class = 10_000 + backdoor = data.backdoors.WanetBackdoor( + path=None, + p_backdoor=0.0, + target_class=target_class, + ) clean_data = data.BackdoorDataset( original=clean_image_dataset, - backdoor=data.backdoors.WanetBackdoor( - path=None, - p_backdoor=0.0, - target_class=target_class, - ), + backdoor=backdoor, ) anomalous_data = data.BackdoorDataset( original=clean_image_dataset, - backdoor=data.backdoors.WanetBackdoor( - path=None, + backdoor=backdoor.clone( p_backdoor=1.0, - target_class=target_class, ), ) noise_data = data.BackdoorDataset( original=clean_image_dataset, - backdoor=data.backdoors.WanetBackdoor( - path=None, + backdoor=backdoor.clone( p_backdoor=0.0, p_noise=1.0, - target_class=target_class, ), ) for ( @@ -202,6 +200,14 @@ def test_wanet_backdoor(clean_image_dataset): assert torch.max(clean_img) <= 1 assert torch.max(anoma_img) <= 1 assert torch.max(noise_img) <= 1 + for ds1, ds2 in itertools.combinations( + [clean_data, anomalous_data, noise_data], + r=2, + ): + assert torch.allclose( + ds1.backdoor.warping_field, + ds2.backdoor.warping_field, + ) def test_wanet_backdoor_on_multiple_workers( From 2c1b38cbfdb7e0f7c2e8b22edc9b7eed4f11ebb4 Mon Sep 17 00:00:00 2001 From: Erik Jenner Date: Mon, 4 Mar 2024 12:18:29 -0800 Subject: [PATCH 25/25] Minor changes to WaNet cloning --- src/cupbearer/data/backdoors.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/src/cupbearer/data/backdoors.py b/src/cupbearer/data/backdoors.py index 2e34d9d3..9f3b9b16 100644 --- a/src/cupbearer/data/backdoors.py +++ b/src/cupbearer/data/backdoors.py @@ -171,7 +171,6 @@ def clone( self, *, target_class: Optional[int] = None, - path: Optional[Path | str] = None, p_backdoor: Optional[float] = None, p_noise: Optional[float] = None, warping_strength: Optional[float] = None, @@ -179,7 +178,7 @@ def clone( ) -> WanetBackdoor: """Create a new instance but with the same control_grid as current instance.""" other = type(self)( - path=(path if path is not None else self.path), + path=self.path, p_backdoor=(p_backdoor if p_backdoor is not None else self.p_backdoor), p_noise=(p_noise if p_noise is not None else self.p_noise), target_class=( @@ -202,12 +201,6 @@ def clone( ) return other - path: Path | str | None - p_noise: float = 0.0 # Probability of non-backdoor warping - control_grid_width: int = 4 # Side length of unscaled warping field - warping_strength: float = 0.5 # Strength of warping effect - grid_rescale: float = 1.0 # Factor to rescale grid from warping effect - @property def warping_field(self) -> torch.Tensor: if self._warping_field is None: