diff --git a/art/experimental/attacks/evasion/fast_gradient.py b/art/experimental/attacks/evasion/fast_gradient.py index ef12f9d3cb..496e240513 100644 --- a/art/experimental/attacks/evasion/fast_gradient.py +++ b/art/experimental/attacks/evasion/fast_gradient.py @@ -27,7 +27,7 @@ from art.attacks.evasion.fast_gradient import FastGradientMethod from art.attacks.attack import EvasionAttack from art.estimators.estimator import BaseEstimator, LossGradientsMixin -from art.experimental.estimators.huggingface_multimodal import HuggingFaceMultiModalInput +from art.experimental.estimators.hugging_face_multimodal import HuggingFaceMultiModalInput from art.summary_writer import SummaryWriter from art.config import ART_NUMPY_DTYPE diff --git a/art/experimental/estimators/__init__.py b/art/experimental/estimators/__init__.py index 2cd257e783..693e6c884d 100644 --- a/art/experimental/estimators/__init__.py +++ b/art/experimental/estimators/__init__.py @@ -1,5 +1,5 @@ """ Experimental Estimator API """ -from art.experimental.estimators import huggingface_multimodal +from art.experimental.estimators import hugging_face_multimodal from art.experimental.estimators.jax import JaxEstimator diff --git a/art/experimental/estimators/hugging_face_multimodal/__init__.py b/art/experimental/estimators/hugging_face_multimodal/__init__.py new file mode 100644 index 0000000000..474a7c826b --- /dev/null +++ b/art/experimental/estimators/hugging_face_multimodal/__init__.py @@ -0,0 +1,5 @@ +""" +Module containing estimators for CLIP. +""" +from art.experimental.estimators.hugging_face_multimodal.hugging_face_mm import HuggingFaceMultiModalPyTorch +from art.experimental.estimators.hugging_face_multimodal.hugging_face_mm_inputs import HuggingFaceMultiModalInput diff --git a/art/experimental/estimators/huggingface_multimodal/hugging_face_mm.py b/art/experimental/estimators/hugging_face_multimodal/hugging_face_mm.py similarity index 97% rename from art/experimental/estimators/huggingface_multimodal/hugging_face_mm.py rename to art/experimental/estimators/hugging_face_multimodal/hugging_face_mm.py index 8c6d295fcf..aeca1d0c97 100644 --- a/art/experimental/estimators/huggingface_multimodal/hugging_face_mm.py +++ b/art/experimental/estimators/hugging_face_multimodal/hugging_face_mm.py @@ -36,7 +36,7 @@ from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE from art.defences.preprocessor.preprocessor import Preprocessor from art.defences.postprocessor.postprocessor import Postprocessor - from art.experimental.estimators.huggingface_multimodal.huggingface_mm_inputs import HuggingFaceMultiModalInput + from art.experimental.estimators.hugging_face_multimodal import HuggingFaceMultiModalInput logger = logging.getLogger(__name__) @@ -245,7 +245,9 @@ def predict( and providing it takes no effect. :return: Predictions over the supplied data. """ - from art.experimental.estimators.huggingface_multimodal.hugging_face_mm_inputs import HuggingFaceMultiModalInput + from art.experimental.estimators.hugging_face_multimodal.hugging_face_mm_inputs import ( + HuggingFaceMultiModalInput, + ) # Set model to evaluation mode self._model.eval() @@ -289,7 +291,9 @@ def fit( # pylint: disable=W0221 and providing it takes no effect. """ import torch - from art.experimental.estimators.huggingface_multimodal.hugging_face_mm_inputs import HuggingFaceMultiModalInput + from art.experimental.estimators.hugging_face_multimodal.hugging_face_mm_inputs import ( + HuggingFaceMultiModalInput, + ) self._model.train() if self._optimizer is None: diff --git a/art/experimental/estimators/huggingface_multimodal/hugging_face_mm_inputs.py b/art/experimental/estimators/hugging_face_multimodal/hugging_face_mm_inputs.py similarity index 100% rename from art/experimental/estimators/huggingface_multimodal/hugging_face_mm_inputs.py rename to art/experimental/estimators/hugging_face_multimodal/hugging_face_mm_inputs.py diff --git a/art/experimental/estimators/huggingface_multimodal/__init__.py b/art/experimental/estimators/huggingface_multimodal/__init__.py deleted file mode 100644 index 0cbbdaf570..0000000000 --- a/art/experimental/estimators/huggingface_multimodal/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -""" -Module containing estimators for CLIP. -""" -from art.experimental.estimators.huggingface_multimodal.hugging_face_mm import HuggingFaceMultiModalPyTorch -from art.experimental.estimators.huggingface_multimodal.hugging_face_mm_inputs import HuggingFaceMultiModalInput diff --git a/notebooks/clip_attack.ipynb b/notebooks/clip_attack.ipynb index 4e50653e81..8d5ea7459a 100644 --- a/notebooks/clip_attack.ipynb +++ b/notebooks/clip_attack.ipynb @@ -22,7 +22,7 @@ "import numpy as np\n", "import torch\n", "\n", - "from art.experimental.estimators.huggingface_multimodal import HuggingFaceMultiModalPyTorch, HuggingFaceMultiModalInput\n", + "from art.experimental.estimators.hugging_face_multimodal import HuggingFaceMultiModalPyTorch, HuggingFaceMultiModalInput\n", "from art.experimental.attacks.evasion import CLIPProjectedGradientDescentNumpy\n", "\n", "# Image normalization numbers\n", @@ -150,7 +150,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "2023-11-30 09:48:11.240678: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA\n", + "2023-11-30 14:58:29.132186: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA\n", "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n" ] }, @@ -168,7 +168,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:00<00:00, 1.70it/s]" + "100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:00<00:00, 1.60it/s]" ] }, { @@ -188,7 +188,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "d733d074704040ec8eb12855556f4922", + "model_id": "04de95b8f8e244eea99333f888cdd862", "version_major": 2, "version_minor": 0 }, @@ -203,7 +203,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:00<00:00, 1.95it/s]" + "100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:00<00:00, 2.15it/s]\n" ] }, { @@ -212,13 +212,6 @@ "text": [ "The adversarial accuracy is 0.0\n" ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n" - ] } ], "source": [ diff --git a/tests/attacks/evasion/test_multimodal_attack.py b/tests/attacks/evasion/test_multimodal_attack.py index bff239fcaf..a74d4eee75 100644 --- a/tests/attacks/evasion/test_multimodal_attack.py +++ b/tests/attacks/evasion/test_multimodal_attack.py @@ -46,7 +46,7 @@ def test_grad_equivalence(max_iter, art_warning): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") from transformers import CLIPModel - from art.experimental.estimators.huggingface_multimodal import ( + from art.experimental.estimators.hugging_face_multimodal import ( HuggingFaceMultiModalPyTorch, HuggingFaceMultiModalInput, ) @@ -103,7 +103,7 @@ def test_perturbation_equivalence(to_batch, art_warning): from transformers import CLIPModel - from art.experimental.estimators.huggingface_multimodal import ( + from art.experimental.estimators.hugging_face_multimodal import ( HuggingFaceMultiModalPyTorch, HuggingFaceMultiModalInput, ) @@ -183,7 +183,7 @@ def test_attack_functionality(art_warning, to_one_hot): from transformers import CLIPProcessor, CLIPModel - from art.experimental.estimators.huggingface_multimodal import ( + from art.experimental.estimators.hugging_face_multimodal import ( HuggingFaceMultiModalPyTorch, HuggingFaceMultiModalInput, ) diff --git a/tests/estimators/classification/test_multimodal.py b/tests/estimators/classification/test_multimodal.py index 1b442c1475..f786029e5f 100644 --- a/tests/estimators/classification/test_multimodal.py +++ b/tests/estimators/classification/test_multimodal.py @@ -30,7 +30,7 @@ def test_predict(art_warning): try: import torch from transformers import CLIPModel, CLIPProcessor - from art.experimental.estimators.huggingface_multimodal import ( + from art.experimental.estimators.hugging_face_multimodal import ( HuggingFaceMultiModalPyTorch, HuggingFaceMultiModalInput, ) @@ -69,7 +69,7 @@ def test_predict(art_warning): ) inputs = HuggingFaceMultiModalInput(**inputs) predictions = art_classifier.predict(inputs) - assert ((np.sum(np.argmax(predictions, axis=1) == labels) / len(labels)) == 1.0) + assert (np.sum(np.argmax(predictions, axis=1) == labels) / len(labels)) == 1.0 except ARTTestException as e: art_warning(e) @@ -82,7 +82,7 @@ def test_fit(art_warning, fix_get_cifar10_data): try: import torch from transformers import CLIPProcessor, CLIPModel - from art.experimental.estimators.huggingface_multimodal import ( + from art.experimental.estimators.hugging_face_multimodal import ( HuggingFaceMultiModalPyTorch, HuggingFaceMultiModalInput, )