diff --git a/docs/user-guide/advanced/replicate_evaluations.md b/docs/user-guide/advanced/replicate_evaluations.md index ccee2bbb3..24d17c620 100644 --- a/docs/user-guide/advanced/replicate_evaluations.md +++ b/docs/user-guide/advanced/replicate_evaluations.md @@ -15,7 +15,7 @@ Keep in mind: ### DINO ViT-S16 (random weights) -Evaluating the backbone with randomly initialized weights serves as a baseline to compare the pretrained FMs to an FM that produces embeddings without any prior learning on image tasks. To evaluate, run: +Evaluating the backbone with randomly initialized weights serves as a baseline to compare the pretrained FMs to a FM that produces embeddings without any prior learning on image tasks. To evaluate, run: ``` MODEL_NAME="universal/vit_small_patch16_224_random" \ @@ -62,7 +62,7 @@ eva predict_fit --config configs/vision/pathology/offline/<task>.yaml ### Phikon (Owkin) - iBOT ViT-B16 (TCGA) [[2]](#references) -[Owkin](https://www.owkin.com/) released the weights for "Phikon", an FM trained with iBOT on TCGA data, via +[Owkin](https://www.owkin.com/) released the weights for "Phikon", a FM trained with iBOT on TCGA data, via [HuggingFace](https://huggingface.co/owkin/phikon). To evaluate, run: ``` @@ -73,6 +73,20 @@ IN_FEATURES=768 \ eva predict_fit --config configs/vision/pathology/offline/<task>.yaml ``` +### Phikon-v2 (Owkin) - DINOv2 ViT-L16 (PANCAN-XL) [[9]](#references) + +[Owkin](https://www.owkin.com/) released the weights for "Phikon-v2", a FM trained with DINOv2 +on the PANCAN-XL dataset (450M 20x magnification histology images sampled from 60K WSIs), via +[HuggingFace](https://huggingface.co/owkin/phikon-v2). To evaluate, run: + +``` +MODEL_NAME=pathology/owkin_phikon_v2 \ +NORMALIZE_MEAN="[0.485,0.456,0.406]" \ +NORMALIZE_STD="[0.229,0.224,0.225]" \ +IN_FEATURES=1024 \ +eva predict_fit --config configs/vision/pathology/offline/<task>.yaml +``` + ### UNI (MahmoodLab) - DINOv2 ViT-L16 (Mass-100k) [[3]](#references) The UNI FM by MahmoodLab is available on [HuggingFace](https://huggingface.co/MahmoodLab/UNI). Note that access needs to @@ -234,4 +248,6 @@ eva predict_fit --config configs/vision/pathology/offline/<task>.yaml [7]: Nechaev, Dmitry, Alexey Pchelnikov, and Ekaterina Ivanova. "Hibou: A Family of Foundational Vision Transformers for Pathology." arXiv preprint arXiv:2406.05074 (2024). - [8]: Zimmermann, Eric, et al. "Virchow 2: Scaling Self-Supervised Mixed Magnification Models in Pathology." arXiv preprint arXiv:2408.00738 (2024). \ No newline at end of file + [8]: Zimmermann, Eric, et al. "Virchow 2: Scaling Self-Supervised Mixed Magnification Models in Pathology." arXiv preprint arXiv:2408.00738 (2024). + + [9]: Filiot, Alexandre, et al. "Phikon-v2, A large and public feature extractor for biomarker prediction." arXiv preprint arXiv:2409.09173 (2024). \ No newline at end of file diff --git a/src/eva/vision/models/networks/backbones/pathology/__init__.py b/src/eva/vision/models/networks/backbones/pathology/__init__.py index 48222ab8c..a8e81c032 100644 --- a/src/eva/vision/models/networks/backbones/pathology/__init__.py +++ b/src/eva/vision/models/networks/backbones/pathology/__init__.py @@ -12,7 +12,7 @@ ) from eva.vision.models.networks.backbones.pathology.lunit import lunit_vits8, lunit_vits16 from eva.vision.models.networks.backbones.pathology.mahmood import mahmood_uni -from eva.vision.models.networks.backbones.pathology.owkin import owkin_phikon +from eva.vision.models.networks.backbones.pathology.owkin import owkin_phikon, owkin_phikon_v2 from eva.vision.models.networks.backbones.pathology.paige import paige_virchow2 __all__ = [ @@ -22,6 +22,7 @@ "kaiko_vits16", "kaiko_vits8", "owkin_phikon", + "owkin_phikon_v2", "lunit_vits16", "lunit_vits8", "mahmood_uni", diff --git a/src/eva/vision/models/networks/backbones/pathology/owkin.py b/src/eva/vision/models/networks/backbones/pathology/owkin.py index 175406ede..3670fe781 100644 --- a/src/eva/vision/models/networks/backbones/pathology/owkin.py +++ b/src/eva/vision/models/networks/backbones/pathology/owkin.py @@ -20,3 +20,17 @@ def owkin_phikon(out_indices: int | Tuple[int, ...] | None = None) -> nn.Module: The model instance. """ return _utils.load_hugingface_model(model_name="owkin/phikon", out_indices=out_indices) + + +@register_model("pathology/owkin_phikon_v2") +def owkin_phikon_v2(out_indices: int | Tuple[int, ...] | None = None) -> nn.Module: + """Initializes the phikon-v2 pathology FM by owkin (https://huggingface.co/owkin/phikon-v2). + + Args: + out_indices: Whether and which multi-level patch embeddings to return. + Currently only out_indices=1 is supported. + + Returns: + The model instance. + """ + return _utils.load_hugingface_model(model_name="owkin/phikon-v2", out_indices=out_indices) diff --git a/src/eva/vision/models/networks/backbones/pathology/paige.py b/src/eva/vision/models/networks/backbones/pathology/paige.py index dfb41aa65..10929392b 100644 --- a/src/eva/vision/models/networks/backbones/pathology/paige.py +++ b/src/eva/vision/models/networks/backbones/pathology/paige.py @@ -14,7 +14,7 @@ from eva.vision.models.networks.backbones.registry import register_model -@register_model("paige/virchow2") +@register_model("pathology/paige_virchow2") def paige_virchow2( dynamic_img_size: bool = True, out_indices: int | Tuple[int, ...] | None = None,